blob: adc42d7ff4f89c0b84a3dacae80a33dc90897b61 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
Somnath Kotur74828b12017-08-31 09:27:33 +0530262 port_attr->active_speed = rdev->active_speed;
263 port_attr->active_width = rdev->active_width;
264
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800265 return 0;
266}
267
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800268int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 struct ib_port_immutable *immutable)
270{
271 struct ib_port_attr port_attr;
272
273 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 return -EINVAL;
275
276 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 return 0;
282}
283
284int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
285 u16 index, u16 *pkey)
286{
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
288
289 /* Ignore port_num */
290
291 memset(pkey, 0, sizeof(*pkey));
292 return bnxt_qplib_get_pkey(&rdev->qplib_res,
293 &rdev->qplib_res.pkey_tbl, index, pkey);
294}
295
296int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
297 int index, union ib_gid *gid)
298{
299 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
300 int rc = 0;
301
302 /* Ignore port_num */
303 memset(gid, 0, sizeof(*gid));
304 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
305 &rdev->qplib_res.sgid_tbl, index,
306 (struct bnxt_qplib_gid *)gid);
307 return rc;
308}
309
310int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
311 unsigned int index, void **context)
312{
313 int rc = 0;
314 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
316 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530317 struct bnxt_qplib_gid *gid_to_del;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800318
319 /* Delete the entry from the hardware */
320 ctx = *context;
321 if (!ctx)
322 return -EINVAL;
323
324 if (sgid_tbl && sgid_tbl->active) {
325 if (ctx->idx >= sgid_tbl->max)
326 return -EINVAL;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530327 gid_to_del = &sgid_tbl->tbl[ctx->idx];
328 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
329 * or via the ib_unregister_device path. In the former case QP1
330 * may not be destroyed yet, in which case just return as FW
331 * needs that entry to be present and will fail it's deletion.
332 * We could get invoked again after QP1 is destroyed OR get an
333 * ADD_GID call with a different GID value for the same index
334 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
335 */
336 if (ctx->idx == 0 &&
337 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
338 ctx->refcnt == 1 && rdev->qp1_sqp) {
339 dev_dbg(rdev_to_dev(rdev),
340 "Trying to delete GID0 while QP1 is alive\n");
341 return -EFAULT;
342 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800343 ctx->refcnt--;
344 if (!ctx->refcnt) {
Somnath Kotur89aaca52017-08-31 09:27:35 +0530345 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700346 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800347 dev_err(rdev_to_dev(rdev),
348 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700349 } else {
350 ctx_tbl = sgid_tbl->ctx;
351 ctx_tbl[ctx->idx] = NULL;
352 kfree(ctx);
353 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800354 }
355 } else {
356 return -EINVAL;
357 }
358 return rc;
359}
360
361int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
362 unsigned int index, const union ib_gid *gid,
363 const struct ib_gid_attr *attr, void **context)
364{
365 int rc;
366 u32 tbl_idx = 0;
367 u16 vlan_id = 0xFFFF;
368 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
369 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
370 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
371
372 if ((attr->ndev) && is_vlan_dev(attr->ndev))
373 vlan_id = vlan_dev_vlan_id(attr->ndev);
374
375 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
376 rdev->qplib_res.netdev->dev_addr,
377 vlan_id, true, &tbl_idx);
378 if (rc == -EALREADY) {
379 ctx_tbl = sgid_tbl->ctx;
380 ctx_tbl[tbl_idx]->refcnt++;
381 *context = ctx_tbl[tbl_idx];
382 return 0;
383 }
384
385 if (rc < 0) {
386 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
387 return rc;
388 }
389
390 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
391 if (!ctx)
392 return -ENOMEM;
393 ctx_tbl = sgid_tbl->ctx;
394 ctx->idx = tbl_idx;
395 ctx->refcnt = 1;
396 ctx_tbl[tbl_idx] = ctx;
Sriharsha Basavapatna063fb5b2017-11-03 02:39:04 +0530397 *context = ctx;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800398
399 return rc;
400}
401
402enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
403 u8 port_num)
404{
405 return IB_LINK_LAYER_ETHERNET;
406}
407
Eddie Wai9152e0b2017-06-14 03:26:23 -0700408#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
409
410static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
411{
412 struct bnxt_re_fence_data *fence = &pd->fence;
413 struct ib_mr *ib_mr = &fence->mr->ib_mr;
414 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
415
416 memset(wqe, 0, sizeof(*wqe));
417 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
418 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
419 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
420 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
421 wqe->bind.zero_based = false;
422 wqe->bind.parent_l_key = ib_mr->lkey;
423 wqe->bind.va = (u64)(unsigned long)fence->va;
424 wqe->bind.length = fence->size;
425 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
426 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
427
428 /* Save the initial rkey in fence structure for now;
429 * wqe->bind.r_key will be set at (re)bind time.
430 */
431 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
432}
433
434static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
435{
436 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
437 qplib_qp);
438 struct ib_pd *ib_pd = qp->ib_qp.pd;
439 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
440 struct bnxt_re_fence_data *fence = &pd->fence;
441 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
442 struct bnxt_qplib_swqe wqe;
443 int rc;
444
445 memcpy(&wqe, fence_wqe, sizeof(wqe));
446 wqe.bind.r_key = fence->bind_rkey;
447 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
448
449 dev_dbg(rdev_to_dev(qp->rdev),
450 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
451 wqe.bind.r_key, qp->qplib_qp.id, pd);
452 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
453 if (rc) {
454 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
455 return rc;
456 }
457 bnxt_qplib_post_send_db(&qp->qplib_qp);
458
459 return rc;
460}
461
462static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
463{
464 struct bnxt_re_fence_data *fence = &pd->fence;
465 struct bnxt_re_dev *rdev = pd->rdev;
466 struct device *dev = &rdev->en_dev->pdev->dev;
467 struct bnxt_re_mr *mr = fence->mr;
468
469 if (fence->mw) {
470 bnxt_re_dealloc_mw(fence->mw);
471 fence->mw = NULL;
472 }
473 if (mr) {
474 if (mr->ib_mr.rkey)
475 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
476 true);
477 if (mr->ib_mr.lkey)
478 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
479 kfree(mr);
480 fence->mr = NULL;
481 }
482 if (fence->dma_addr) {
483 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
484 DMA_BIDIRECTIONAL);
485 fence->dma_addr = 0;
486 }
487}
488
489static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
490{
491 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
492 struct bnxt_re_fence_data *fence = &pd->fence;
493 struct bnxt_re_dev *rdev = pd->rdev;
494 struct device *dev = &rdev->en_dev->pdev->dev;
495 struct bnxt_re_mr *mr = NULL;
496 dma_addr_t dma_addr = 0;
497 struct ib_mw *mw;
498 u64 pbl_tbl;
499 int rc;
500
501 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
502 DMA_BIDIRECTIONAL);
503 rc = dma_mapping_error(dev, dma_addr);
504 if (rc) {
505 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
506 rc = -EIO;
507 fence->dma_addr = 0;
508 goto fail;
509 }
510 fence->dma_addr = dma_addr;
511
512 /* Allocate a MR */
513 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
514 if (!mr) {
515 rc = -ENOMEM;
516 goto fail;
517 }
518 fence->mr = mr;
519 mr->rdev = rdev;
520 mr->qplib_mr.pd = &pd->qplib_pd;
521 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
522 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
523 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
524 if (rc) {
525 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
526 goto fail;
527 }
528
529 /* Register MR */
530 mr->ib_mr.lkey = mr->qplib_mr.lkey;
531 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
532 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
533 pbl_tbl = dma_addr;
534 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
535 BNXT_RE_FENCE_PBL_SIZE, false);
536 if (rc) {
537 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
538 goto fail;
539 }
540 mr->ib_mr.rkey = mr->qplib_mr.rkey;
541
542 /* Create a fence MW only for kernel consumers */
543 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300544 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700545 dev_err(rdev_to_dev(rdev),
546 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300547 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700548 goto fail;
549 }
550 fence->mw = mw;
551
552 bnxt_re_create_fence_wqe(pd);
553 return 0;
554
555fail:
556 bnxt_re_destroy_fence_mr(pd);
557 return rc;
558}
559
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800560/* Protection Domains */
561int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
562{
563 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
564 struct bnxt_re_dev *rdev = pd->rdev;
565 int rc;
566
Eddie Wai9152e0b2017-06-14 03:26:23 -0700567 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800568
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700569 if (pd->qplib_pd.id) {
570 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
571 &rdev->qplib_res.pd_tbl,
572 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800573 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700574 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800575 }
576
577 kfree(pd);
578 return 0;
579}
580
581struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
582 struct ib_ucontext *ucontext,
583 struct ib_udata *udata)
584{
585 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
586 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
587 struct bnxt_re_ucontext,
588 ib_uctx);
589 struct bnxt_re_pd *pd;
590 int rc;
591
592 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
593 if (!pd)
594 return ERR_PTR(-ENOMEM);
595
596 pd->rdev = rdev;
597 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
598 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
599 rc = -ENOMEM;
600 goto fail;
601 }
602
603 if (udata) {
604 struct bnxt_re_pd_resp resp;
605
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700606 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800607 /* Allocate DPI in alloc_pd to avoid failing of
608 * ibv_devinfo and family of application when DPIs
609 * are depleted.
610 */
611 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700612 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800613 rc = -ENOMEM;
614 goto dbfail;
615 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800616 }
617
618 resp.pdid = pd->qplib_pd.id;
619 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700620 resp.dpi = ucntx->dpi.dpi;
621 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800622
623 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
624 if (rc) {
625 dev_err(rdev_to_dev(rdev),
626 "Failed to copy user response\n");
627 goto dbfail;
628 }
629 }
630
Eddie Wai9152e0b2017-06-14 03:26:23 -0700631 if (!udata)
632 if (bnxt_re_create_fence_mr(pd))
633 dev_warn(rdev_to_dev(rdev),
634 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800635 return &pd->ib_pd;
636dbfail:
637 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
638 &pd->qplib_pd);
639fail:
640 kfree(pd);
641 return ERR_PTR(rc);
642}
643
644/* Address Handles */
645int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
646{
647 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
648 struct bnxt_re_dev *rdev = ah->rdev;
649 int rc;
650
651 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
652 if (rc) {
653 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
654 return rc;
655 }
656 kfree(ah);
657 return 0;
658}
659
660struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400661 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800662 struct ib_udata *udata)
663{
664 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
665 struct bnxt_re_dev *rdev = pd->rdev;
666 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400667 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800668 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800669 u8 nw_type;
670
671 struct ib_gid_attr sgid_attr;
672
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400673 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800674 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
675 return ERR_PTR(-EINVAL);
676 }
677 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
678 if (!ah)
679 return ERR_PTR(-ENOMEM);
680
681 ah->rdev = rdev;
682 ah->qplib_ah.pd = &pd->qplib_pd;
683
684 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400685 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800686 sizeof(union ib_gid));
687 /*
688 * If RoCE V2 is enabled, stack will have two entries for
689 * each GID entry. Avoiding this duplicte entry in HW. Dividing
690 * the GID index by 2 for RoCE V2
691 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400692 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
693 ah->qplib_ah.host_sgid_index = grh->sgid_index;
694 ah->qplib_ah.traffic_class = grh->traffic_class;
695 ah->qplib_ah.flow_label = grh->flow_label;
696 ah->qplib_ah.hop_limit = grh->hop_limit;
697 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800698 if (ib_pd->uobject &&
699 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400700 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800701 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400702 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800703 union ib_gid sgid;
704
705 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400706 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800707 &sgid_attr);
708 if (rc) {
709 dev_err(rdev_to_dev(rdev),
710 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400711 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800712 goto fail;
713 }
Leon Romanovskye32d2d72017-10-29 17:05:22 +0200714 if (sgid_attr.ndev)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800715 dev_put(sgid_attr.ndev);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800716 /* Get network header type for this GID */
717 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
718 switch (nw_type) {
719 case RDMA_NETWORK_IPV4:
720 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
721 break;
722 case RDMA_NETWORK_IPV6:
723 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
724 break;
725 default:
726 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
727 break;
728 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800729 }
730
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400731 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800732 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
733 if (rc) {
734 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
735 goto fail;
736 }
737
738 /* Write AVID to shared page. */
739 if (ib_pd->uobject) {
740 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
741 struct bnxt_re_ucontext *uctx;
742 unsigned long flag;
743 u32 *wrptr;
744
745 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
746 spin_lock_irqsave(&uctx->sh_lock, flag);
747 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
748 *wrptr = ah->qplib_ah.id;
749 wmb(); /* make sure cache is updated. */
750 spin_unlock_irqrestore(&uctx->sh_lock, flag);
751 }
752
753 return &ah->ib_ah;
754
755fail:
756 kfree(ah);
757 return ERR_PTR(rc);
758}
759
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400760int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800761{
762 return 0;
763}
764
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400765int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800766{
767 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
768
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400769 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400770 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400771 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400772 rdma_ah_set_grh(ah_attr, NULL, 0,
773 ah->qplib_ah.host_sgid_index,
774 0, ah->qplib_ah.traffic_class);
775 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
776 rdma_ah_set_port_num(ah_attr, 1);
777 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800778 return 0;
779}
780
781/* Queue Pairs */
782int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
783{
784 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
785 struct bnxt_re_dev *rdev = qp->rdev;
786 int rc;
787
Selvin Xavierf218d672017-06-29 12:28:15 -0700788 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800789 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
790 if (rc) {
791 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
792 return rc;
793 }
794 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
795 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
796 &rdev->sqp_ah->qplib_ah);
797 if (rc) {
798 dev_err(rdev_to_dev(rdev),
799 "Failed to destroy HW AH for shadow QP");
800 return rc;
801 }
802
Selvin Xavierf218d672017-06-29 12:28:15 -0700803 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800804 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
805 &rdev->qp1_sqp->qplib_qp);
806 if (rc) {
807 dev_err(rdev_to_dev(rdev),
808 "Failed to destroy Shadow QP");
809 return rc;
810 }
811 mutex_lock(&rdev->qp_lock);
812 list_del(&rdev->qp1_sqp->list);
813 atomic_dec(&rdev->qp_count);
814 mutex_unlock(&rdev->qp_lock);
815
816 kfree(rdev->sqp_ah);
817 kfree(rdev->qp1_sqp);
Somnath Kotur89aaca52017-08-31 09:27:35 +0530818 rdev->qp1_sqp = NULL;
819 rdev->sqp_ah = NULL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800820 }
821
Doug Ledford374cb862017-04-25 14:00:59 -0400822 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800823 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400824 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800825 ib_umem_release(qp->sumem);
826
827 mutex_lock(&rdev->qp_lock);
828 list_del(&qp->list);
829 atomic_dec(&rdev->qp_count);
830 mutex_unlock(&rdev->qp_lock);
831 kfree(qp);
832 return 0;
833}
834
835static u8 __from_ib_qp_type(enum ib_qp_type type)
836{
837 switch (type) {
838 case IB_QPT_GSI:
839 return CMDQ_CREATE_QP1_TYPE_GSI;
840 case IB_QPT_RC:
841 return CMDQ_CREATE_QP_TYPE_RC;
842 case IB_QPT_UD:
843 return CMDQ_CREATE_QP_TYPE_UD;
844 default:
845 return IB_QPT_MAX;
846 }
847}
848
849static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
850 struct bnxt_re_qp *qp, struct ib_udata *udata)
851{
852 struct bnxt_re_qp_req ureq;
853 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
854 struct ib_umem *umem;
855 int bytes = 0;
856 struct ib_ucontext *context = pd->ib_pd.uobject->context;
857 struct bnxt_re_ucontext *cntx = container_of(context,
858 struct bnxt_re_ucontext,
859 ib_uctx);
860 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
861 return -EFAULT;
862
863 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
864 /* Consider mapping PSN search memory only for RC QPs. */
865 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
866 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
867 bytes = PAGE_ALIGN(bytes);
868 umem = ib_umem_get(context, ureq.qpsva, bytes,
869 IB_ACCESS_LOCAL_WRITE, 1);
870 if (IS_ERR(umem))
871 return PTR_ERR(umem);
872
873 qp->sumem = umem;
874 qplib_qp->sq.sglist = umem->sg_head.sgl;
875 qplib_qp->sq.nmap = umem->nmap;
876 qplib_qp->qp_handle = ureq.qp_handle;
877
878 if (!qp->qplib_qp.srq) {
879 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
880 bytes = PAGE_ALIGN(bytes);
881 umem = ib_umem_get(context, ureq.qprva, bytes,
882 IB_ACCESS_LOCAL_WRITE, 1);
883 if (IS_ERR(umem))
884 goto rqfail;
885 qp->rumem = umem;
886 qplib_qp->rq.sglist = umem->sg_head.sgl;
887 qplib_qp->rq.nmap = umem->nmap;
888 }
889
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700890 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800891 return 0;
892rqfail:
893 ib_umem_release(qp->sumem);
894 qp->sumem = NULL;
895 qplib_qp->sq.sglist = NULL;
896 qplib_qp->sq.nmap = 0;
897
898 return PTR_ERR(umem);
899}
900
901static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
902 (struct bnxt_re_pd *pd,
903 struct bnxt_qplib_res *qp1_res,
904 struct bnxt_qplib_qp *qp1_qp)
905{
906 struct bnxt_re_dev *rdev = pd->rdev;
907 struct bnxt_re_ah *ah;
908 union ib_gid sgid;
909 int rc;
910
911 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
912 if (!ah)
913 return NULL;
914
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800915 ah->rdev = rdev;
916 ah->qplib_ah.pd = &pd->qplib_pd;
917
918 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
919 if (rc)
920 goto fail;
921
922 /* supply the dgid data same as sgid */
923 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
924 sizeof(union ib_gid));
925 ah->qplib_ah.sgid_index = 0;
926
927 ah->qplib_ah.traffic_class = 0;
928 ah->qplib_ah.flow_label = 0;
929 ah->qplib_ah.hop_limit = 1;
930 ah->qplib_ah.sl = 0;
931 /* Have DMAC same as SMAC */
932 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
933
934 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
935 if (rc) {
936 dev_err(rdev_to_dev(rdev),
937 "Failed to allocate HW AH for Shadow QP");
938 goto fail;
939 }
940
941 return ah;
942
943fail:
944 kfree(ah);
945 return NULL;
946}
947
948static struct bnxt_re_qp *bnxt_re_create_shadow_qp
949 (struct bnxt_re_pd *pd,
950 struct bnxt_qplib_res *qp1_res,
951 struct bnxt_qplib_qp *qp1_qp)
952{
953 struct bnxt_re_dev *rdev = pd->rdev;
954 struct bnxt_re_qp *qp;
955 int rc;
956
957 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
958 if (!qp)
959 return NULL;
960
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800961 qp->rdev = rdev;
962
963 /* Initialize the shadow QP structure from the QP1 values */
964 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
965
966 qp->qplib_qp.pd = &pd->qplib_pd;
967 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
968 qp->qplib_qp.type = IB_QPT_UD;
969
970 qp->qplib_qp.max_inline_data = 0;
971 qp->qplib_qp.sig_type = true;
972
973 /* Shadow QP SQ depth should be same as QP1 RQ depth */
974 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
975 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700976 /* Q full delta can be 1 since it is internal QP */
977 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800978
979 qp->qplib_qp.scq = qp1_qp->scq;
980 qp->qplib_qp.rcq = qp1_qp->rcq;
981
982 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
983 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700984 /* Q full delta can be 1 since it is internal QP */
985 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800986
987 qp->qplib_qp.mtu = qp1_qp->mtu;
988
989 qp->qplib_qp.sq_hdr_buf_size = 0;
990 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
991 qp->qplib_qp.dpi = &rdev->dpi_privileged;
992
993 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
994 if (rc)
995 goto fail;
996
997 rdev->sqp_id = qp->qplib_qp.id;
998
999 spin_lock_init(&qp->sq_lock);
1000 INIT_LIST_HEAD(&qp->list);
1001 mutex_lock(&rdev->qp_lock);
1002 list_add_tail(&qp->list, &rdev->qp_list);
1003 atomic_inc(&rdev->qp_count);
1004 mutex_unlock(&rdev->qp_lock);
1005 return qp;
1006fail:
1007 kfree(qp);
1008 return NULL;
1009}
1010
1011struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1012 struct ib_qp_init_attr *qp_init_attr,
1013 struct ib_udata *udata)
1014{
1015 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1016 struct bnxt_re_dev *rdev = pd->rdev;
1017 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1018 struct bnxt_re_qp *qp;
1019 struct bnxt_re_cq *cq;
1020 int rc, entries;
1021
1022 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1023 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1024 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1025 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1026 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1027 return ERR_PTR(-EINVAL);
1028
1029 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1030 if (!qp)
1031 return ERR_PTR(-ENOMEM);
1032
1033 qp->rdev = rdev;
1034 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1035 qp->qplib_qp.pd = &pd->qplib_pd;
1036 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1037 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1038 if (qp->qplib_qp.type == IB_QPT_MAX) {
1039 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1040 qp->qplib_qp.type);
1041 rc = -EINVAL;
1042 goto fail;
1043 }
1044 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1045 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1046 IB_SIGNAL_ALL_WR) ? true : false);
1047
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001048 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1049 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1050 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1051
1052 if (qp_init_attr->send_cq) {
1053 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1054 ib_cq);
1055 if (!cq) {
1056 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1057 rc = -EINVAL;
1058 goto fail;
1059 }
1060 qp->qplib_qp.scq = &cq->qplib_cq;
1061 }
1062
1063 if (qp_init_attr->recv_cq) {
1064 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1065 ib_cq);
1066 if (!cq) {
1067 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1068 rc = -EINVAL;
1069 goto fail;
1070 }
1071 qp->qplib_qp.rcq = &cq->qplib_cq;
1072 }
1073
1074 if (qp_init_attr->srq) {
1075 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1076 rc = -ENOTSUPP;
1077 goto fail;
1078 } else {
1079 /* Allocate 1 more than what's provided so posting max doesn't
1080 * mean empty
1081 */
1082 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1083 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1084 dev_attr->max_qp_wqes + 1);
1085
Eddie Wai9152e0b2017-06-14 03:26:23 -07001086 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1087 qp_init_attr->cap.max_recv_wr;
1088
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001089 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1090 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1091 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1092 }
1093
1094 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1095
1096 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001097 /* Allocate 1 more than what's provided */
1098 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1099 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1100 dev_attr->max_qp_wqes + 1);
1101 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1102 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001103 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1104 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1105 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1106 qp->qplib_qp.sq.max_sge++;
1107 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1108 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1109
1110 qp->qplib_qp.rq_hdr_buf_size =
1111 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1112
1113 qp->qplib_qp.sq_hdr_buf_size =
1114 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1115 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1116 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1117 if (rc) {
1118 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1119 goto fail;
1120 }
1121 /* Create a shadow QP to handle the QP1 traffic */
1122 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1123 &qp->qplib_qp);
1124 if (!rdev->qp1_sqp) {
1125 rc = -EINVAL;
1126 dev_err(rdev_to_dev(rdev),
1127 "Failed to create Shadow QP for QP1");
1128 goto qp_destroy;
1129 }
1130 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1131 &qp->qplib_qp);
1132 if (!rdev->sqp_ah) {
1133 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1134 &rdev->qp1_sqp->qplib_qp);
1135 rc = -EINVAL;
1136 dev_err(rdev_to_dev(rdev),
1137 "Failed to create AH entry for ShadowQP");
1138 goto qp_destroy;
1139 }
1140
1141 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001142 /* Allocate 128 + 1 more than what's provided */
1143 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1144 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1145 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1146 dev_attr->max_qp_wqes +
1147 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1148 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1149
1150 /*
1151 * Reserving one slot for Phantom WQE. Application can
1152 * post one extra entry in this case. But allowing this to avoid
1153 * unexpected Queue full condition
1154 */
1155
1156 qp->qplib_qp.sq.q_full_delta -= 1;
1157
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001158 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1159 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1160 if (udata) {
1161 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1162 if (rc)
1163 goto fail;
1164 } else {
1165 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1166 }
1167
1168 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1169 if (rc) {
1170 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1171 goto fail;
1172 }
1173 }
1174
1175 qp->ib_qp.qp_num = qp->qplib_qp.id;
1176 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001177 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001178
1179 if (udata) {
1180 struct bnxt_re_qp_resp resp;
1181
1182 resp.qpid = qp->ib_qp.qp_num;
1183 resp.rsvd = 0;
1184 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1185 if (rc) {
1186 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1187 goto qp_destroy;
1188 }
1189 }
1190 INIT_LIST_HEAD(&qp->list);
1191 mutex_lock(&rdev->qp_lock);
1192 list_add_tail(&qp->list, &rdev->qp_list);
1193 atomic_inc(&rdev->qp_count);
1194 mutex_unlock(&rdev->qp_lock);
1195
1196 return &qp->ib_qp;
1197qp_destroy:
1198 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1199fail:
1200 kfree(qp);
1201 return ERR_PTR(rc);
1202}
1203
1204static u8 __from_ib_qp_state(enum ib_qp_state state)
1205{
1206 switch (state) {
1207 case IB_QPS_RESET:
1208 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1209 case IB_QPS_INIT:
1210 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1211 case IB_QPS_RTR:
1212 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1213 case IB_QPS_RTS:
1214 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1215 case IB_QPS_SQD:
1216 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1217 case IB_QPS_SQE:
1218 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1219 case IB_QPS_ERR:
1220 default:
1221 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1222 }
1223}
1224
1225static enum ib_qp_state __to_ib_qp_state(u8 state)
1226{
1227 switch (state) {
1228 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1229 return IB_QPS_RESET;
1230 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1231 return IB_QPS_INIT;
1232 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1233 return IB_QPS_RTR;
1234 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1235 return IB_QPS_RTS;
1236 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1237 return IB_QPS_SQD;
1238 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1239 return IB_QPS_SQE;
1240 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1241 default:
1242 return IB_QPS_ERR;
1243 }
1244}
1245
1246static u32 __from_ib_mtu(enum ib_mtu mtu)
1247{
1248 switch (mtu) {
1249 case IB_MTU_256:
1250 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1251 case IB_MTU_512:
1252 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1253 case IB_MTU_1024:
1254 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1255 case IB_MTU_2048:
1256 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1257 case IB_MTU_4096:
1258 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1259 default:
1260 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1261 }
1262}
1263
1264static enum ib_mtu __to_ib_mtu(u32 mtu)
1265{
1266 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1267 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1268 return IB_MTU_256;
1269 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1270 return IB_MTU_512;
1271 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1272 return IB_MTU_1024;
1273 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1274 return IB_MTU_2048;
1275 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1276 return IB_MTU_4096;
1277 default:
1278 return IB_MTU_2048;
1279 }
1280}
1281
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001282static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1283 struct bnxt_re_qp *qp1_qp,
1284 int qp_attr_mask)
1285{
1286 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1287 int rc = 0;
1288
1289 if (qp_attr_mask & IB_QP_STATE) {
1290 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1291 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1292 }
1293 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1294 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1295 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1296 }
1297
1298 if (qp_attr_mask & IB_QP_QKEY) {
1299 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1300 /* Using a Random QKEY */
1301 qp->qplib_qp.qkey = 0x81818181;
1302 }
1303 if (qp_attr_mask & IB_QP_SQ_PSN) {
1304 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1305 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1306 }
1307
1308 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1309 if (rc)
1310 dev_err(rdev_to_dev(rdev),
1311 "Failed to modify Shadow QP for QP1");
1312 return rc;
1313}
1314
1315int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1316 int qp_attr_mask, struct ib_udata *udata)
1317{
1318 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1319 struct bnxt_re_dev *rdev = qp->rdev;
1320 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1321 enum ib_qp_state curr_qp_state, new_qp_state;
1322 int rc, entries;
1323 int status;
1324 union ib_gid sgid;
1325 struct ib_gid_attr sgid_attr;
1326 u8 nw_type;
1327
1328 qp->qplib_qp.modify_flags = 0;
1329 if (qp_attr_mask & IB_QP_STATE) {
1330 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1331 new_qp_state = qp_attr->qp_state;
1332 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1333 ib_qp->qp_type, qp_attr_mask,
1334 IB_LINK_LAYER_ETHERNET)) {
1335 dev_err(rdev_to_dev(rdev),
1336 "Invalid attribute mask: %#x specified ",
1337 qp_attr_mask);
1338 dev_err(rdev_to_dev(rdev),
1339 "for qpn: %#x type: %#x",
1340 ib_qp->qp_num, ib_qp->qp_type);
1341 dev_err(rdev_to_dev(rdev),
1342 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1343 curr_qp_state, new_qp_state);
1344 return -EINVAL;
1345 }
1346 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1347 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001348
1349 if (!qp->sumem &&
1350 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1351 dev_dbg(rdev_to_dev(rdev),
1352 "Move QP = %p to flush list\n",
1353 qp);
1354 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1355 }
1356 if (!qp->sumem &&
1357 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1358 dev_dbg(rdev_to_dev(rdev),
1359 "Move QP = %p out of flush list\n",
1360 qp);
1361 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1362 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001363 }
1364 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1365 qp->qplib_qp.modify_flags |=
1366 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1367 qp->qplib_qp.en_sqd_async_notify = true;
1368 }
1369 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1370 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1371 qp->qplib_qp.access =
1372 __from_ib_access_flags(qp_attr->qp_access_flags);
1373 /* LOCAL_WRITE access must be set to allow RC receive */
1374 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1375 }
1376 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1377 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1378 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1379 }
1380 if (qp_attr_mask & IB_QP_QKEY) {
1381 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1382 qp->qplib_qp.qkey = qp_attr->qkey;
1383 }
1384 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001385 const struct ib_global_route *grh =
1386 rdma_ah_read_grh(&qp_attr->ah_attr);
1387
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001388 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1389 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1390 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1391 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1392 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1393 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1394 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001395 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001396 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001397 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001398 /* If RoCE V2 is enabled, stack will have two entries for
1399 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1400 * the GID index by 2 for RoCE V2
1401 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001402 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1403 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1404 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1405 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1406 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001407 ether_addr_copy(qp->qplib_qp.ah.dmac,
1408 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001409
1410 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001411 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001412 &sgid, &sgid_attr);
1413 if (!status && sgid_attr.ndev) {
1414 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1415 ETH_ALEN);
1416 dev_put(sgid_attr.ndev);
1417 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1418 &sgid);
1419 switch (nw_type) {
1420 case RDMA_NETWORK_IPV4:
1421 qp->qplib_qp.nw_type =
1422 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1423 break;
1424 case RDMA_NETWORK_IPV6:
1425 qp->qplib_qp.nw_type =
1426 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1427 break;
1428 default:
1429 qp->qplib_qp.nw_type =
1430 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1431 break;
1432 }
1433 }
1434 }
1435
1436 if (qp_attr_mask & IB_QP_PATH_MTU) {
1437 qp->qplib_qp.modify_flags |=
1438 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1439 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301440 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001441 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1442 qp->qplib_qp.modify_flags |=
1443 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1444 qp->qplib_qp.path_mtu =
1445 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301446 qp->qplib_qp.mtu =
1447 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001448 }
1449
1450 if (qp_attr_mask & IB_QP_TIMEOUT) {
1451 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1452 qp->qplib_qp.timeout = qp_attr->timeout;
1453 }
1454 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1455 qp->qplib_qp.modify_flags |=
1456 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1457 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1458 }
1459 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1460 qp->qplib_qp.modify_flags |=
1461 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1462 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1463 }
1464 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1465 qp->qplib_qp.modify_flags |=
1466 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1467 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1468 }
1469 if (qp_attr_mask & IB_QP_RQ_PSN) {
1470 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1471 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1472 }
1473 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1474 qp->qplib_qp.modify_flags |=
1475 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001476 /* Cap the max_rd_atomic to device max */
1477 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1478 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001479 }
1480 if (qp_attr_mask & IB_QP_SQ_PSN) {
1481 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1482 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1483 }
1484 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001485 if (qp_attr->max_dest_rd_atomic >
1486 dev_attr->max_qp_init_rd_atom) {
1487 dev_err(rdev_to_dev(rdev),
1488 "max_dest_rd_atomic requested%d is > dev_max%d",
1489 qp_attr->max_dest_rd_atomic,
1490 dev_attr->max_qp_init_rd_atom);
1491 return -EINVAL;
1492 }
1493
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001494 qp->qplib_qp.modify_flags |=
1495 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1496 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1497 }
1498 if (qp_attr_mask & IB_QP_CAP) {
1499 qp->qplib_qp.modify_flags |=
1500 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1501 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1502 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1503 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1504 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1505 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1506 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1507 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1508 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1509 (qp_attr->cap.max_inline_data >=
1510 dev_attr->max_inline_data)) {
1511 dev_err(rdev_to_dev(rdev),
1512 "Create QP failed - max exceeded");
1513 return -EINVAL;
1514 }
1515 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1516 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1517 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001518 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1519 qp_attr->cap.max_send_wr;
1520 /*
1521 * Reserving one slot for Phantom WQE. Some application can
1522 * post one extra entry in this case. Allowing this to avoid
1523 * unexpected Queue full condition
1524 */
1525 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001526 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1527 if (qp->qplib_qp.rq.max_wqe) {
1528 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1529 qp->qplib_qp.rq.max_wqe =
1530 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001531 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1532 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001533 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1534 } else {
1535 /* SRQ was used prior, just ignore the RQ caps */
1536 }
1537 }
1538 if (qp_attr_mask & IB_QP_DEST_QPN) {
1539 qp->qplib_qp.modify_flags |=
1540 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1541 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1542 }
1543 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1544 if (rc) {
1545 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1546 return rc;
1547 }
1548 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1549 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1550 return rc;
1551}
1552
1553int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1554 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1555{
1556 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1557 struct bnxt_re_dev *rdev = qp->rdev;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001558 struct bnxt_qplib_qp *qplib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001559 int rc;
1560
Leon Romanovskye13547b2017-09-19 13:22:13 +03001561 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1562 if (!qplib_qp)
1563 return -ENOMEM;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001564
Leon Romanovskye13547b2017-09-19 13:22:13 +03001565 qplib_qp->id = qp->qplib_qp.id;
1566 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1567
1568 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001569 if (rc) {
1570 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
Leon Romanovskye13547b2017-09-19 13:22:13 +03001571 goto out;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001572 }
Leon Romanovskye13547b2017-09-19 13:22:13 +03001573 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1574 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1575 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1576 qp_attr->pkey_index = qplib_qp->pkey_index;
1577 qp_attr->qkey = qplib_qp->qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001578 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001579 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1580 qplib_qp->ah.host_sgid_index,
1581 qplib_qp->ah.hop_limit,
1582 qplib_qp->ah.traffic_class);
1583 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1584 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1585 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1586 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1587 qp_attr->timeout = qplib_qp->timeout;
1588 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1589 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1590 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1591 qp_attr->rq_psn = qplib_qp->rq.psn;
1592 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1593 qp_attr->sq_psn = qplib_qp->sq.psn;
1594 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1595 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1596 IB_SIGNAL_REQ_WR;
1597 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001598
1599 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1600 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1601 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1602 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1603 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1604 qp_init_attr->cap = qp_attr->cap;
1605
Leon Romanovskye13547b2017-09-19 13:22:13 +03001606out:
1607 kfree(qplib_qp);
1608 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001609}
1610
1611/* Routine for sending QP1 packets for RoCE V1 an V2
1612 */
1613static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1614 struct ib_send_wr *wr,
1615 struct bnxt_qplib_swqe *wqe,
1616 int payload_size)
1617{
1618 struct ib_device *ibdev = &qp->rdev->ibdev;
1619 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1620 ib_ah);
1621 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1622 struct bnxt_qplib_sge sge;
1623 union ib_gid sgid;
1624 u8 nw_type;
1625 u16 ether_type;
1626 struct ib_gid_attr sgid_attr;
1627 union ib_gid dgid;
1628 bool is_eth = false;
1629 bool is_vlan = false;
1630 bool is_grh = false;
1631 bool is_udp = false;
1632 u8 ip_version = 0;
1633 u16 vlan_id = 0xFFFF;
1634 void *buf;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001635 int i, rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001636
1637 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1638
1639 rc = ib_get_cached_gid(ibdev, 1,
1640 qplib_ah->host_sgid_index, &sgid,
1641 &sgid_attr);
1642 if (rc) {
1643 dev_err(rdev_to_dev(qp->rdev),
1644 "Failed to query gid at index %d",
1645 qplib_ah->host_sgid_index);
1646 return rc;
1647 }
1648 if (sgid_attr.ndev) {
1649 if (is_vlan_dev(sgid_attr.ndev))
1650 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1651 dev_put(sgid_attr.ndev);
1652 }
1653 /* Get network header type for this GID */
1654 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1655 switch (nw_type) {
1656 case RDMA_NETWORK_IPV4:
1657 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1658 break;
1659 case RDMA_NETWORK_IPV6:
1660 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1661 break;
1662 default:
1663 nw_type = BNXT_RE_ROCE_V1_PACKET;
1664 break;
1665 }
1666 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1667 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1668 if (is_udp) {
1669 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1670 ip_version = 4;
1671 ether_type = ETH_P_IP;
1672 } else {
1673 ip_version = 6;
1674 ether_type = ETH_P_IPV6;
1675 }
1676 is_grh = false;
1677 } else {
1678 ether_type = ETH_P_IBOE;
1679 is_grh = true;
1680 }
1681
1682 is_eth = true;
1683 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1684
1685 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1686 ip_version, is_udp, 0, &qp->qp1_hdr);
1687
1688 /* ETH */
1689 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1690 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1691
1692 /* For vlan, check the sgid for vlan existence */
1693
1694 if (!is_vlan) {
1695 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1696 } else {
1697 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1698 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1699 }
1700
1701 if (is_grh || (ip_version == 6)) {
1702 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1703 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1704 sizeof(sgid));
1705 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1706 }
1707
1708 if (ip_version == 4) {
1709 qp->qp1_hdr.ip4.tos = 0;
1710 qp->qp1_hdr.ip4.id = 0;
1711 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1712 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1713
1714 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1715 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1716 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1717 }
1718
1719 if (is_udp) {
1720 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1721 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1722 qp->qp1_hdr.udp.csum = 0;
1723 }
1724
1725 /* BTH */
1726 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1727 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1728 qp->qp1_hdr.immediate_present = 1;
1729 } else {
1730 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1731 }
1732 if (wr->send_flags & IB_SEND_SOLICITED)
1733 qp->qp1_hdr.bth.solicited_event = 1;
1734 /* pad_count */
1735 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1736
1737 /* P_key for QP1 is for all members */
1738 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1739 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1740 qp->qp1_hdr.bth.ack_req = 0;
1741 qp->send_psn++;
1742 qp->send_psn &= BTH_PSN_MASK;
1743 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1744 /* DETH */
1745 /* Use the priviledged Q_Key for QP1 */
1746 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1747 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1748
1749 /* Pack the QP1 to the transmit buffer */
1750 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1751 if (buf) {
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001752 ib_ud_header_pack(&qp->qp1_hdr, buf);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001753 for (i = wqe->num_sge; i; i--) {
1754 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1755 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1756 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1757 }
1758
1759 /*
1760 * Max Header buf size for IPV6 RoCE V2 is 86,
1761 * which is same as the QP1 SQ header buffer.
1762 * Header buf size for IPV4 RoCE V2 can be 66.
1763 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1764 * Subtract 20 bytes from QP1 SQ header buf size
1765 */
1766 if (is_udp && ip_version == 4)
1767 sge.size -= 20;
1768 /*
1769 * Max Header buf size for RoCE V1 is 78.
1770 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1771 * Subtract 8 bytes from QP1 SQ header buf size
1772 */
1773 if (!is_udp)
1774 sge.size -= 8;
1775
1776 /* Subtract 4 bytes for non vlan packets */
1777 if (!is_vlan)
1778 sge.size -= 4;
1779
1780 wqe->sg_list[0].addr = sge.addr;
1781 wqe->sg_list[0].lkey = sge.lkey;
1782 wqe->sg_list[0].size = sge.size;
1783 wqe->num_sge++;
1784
1785 } else {
1786 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1787 rc = -ENOMEM;
1788 }
1789 return rc;
1790}
1791
1792/* For the MAD layer, it only provides the recv SGE the size of
1793 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1794 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1795 * receive packet (334 bytes) with no VLAN and then copy the GRH
1796 * and the MAD datagram out to the provided SGE.
1797 */
1798static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1799 struct ib_recv_wr *wr,
1800 struct bnxt_qplib_swqe *wqe,
1801 int payload_size)
1802{
1803 struct bnxt_qplib_sge ref, sge;
1804 u32 rq_prod_index;
1805 struct bnxt_re_sqp_entries *sqp_entry;
1806
1807 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1808
1809 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1810 return -ENOMEM;
1811
1812 /* Create 1 SGE to receive the entire
1813 * ethernet packet
1814 */
1815 /* Save the reference from ULP */
1816 ref.addr = wqe->sg_list[0].addr;
1817 ref.lkey = wqe->sg_list[0].lkey;
1818 ref.size = wqe->sg_list[0].size;
1819
1820 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1821
1822 /* SGE 1 */
1823 wqe->sg_list[0].addr = sge.addr;
1824 wqe->sg_list[0].lkey = sge.lkey;
1825 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1826 sge.size -= wqe->sg_list[0].size;
1827
1828 sqp_entry->sge.addr = ref.addr;
1829 sqp_entry->sge.lkey = ref.lkey;
1830 sqp_entry->sge.size = ref.size;
1831 /* Store the wrid for reporting completion */
1832 sqp_entry->wrid = wqe->wr_id;
1833 /* change the wqe->wrid to table index */
1834 wqe->wr_id = rq_prod_index;
1835 return 0;
1836}
1837
1838static int is_ud_qp(struct bnxt_re_qp *qp)
1839{
1840 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1841}
1842
1843static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1844 struct ib_send_wr *wr,
1845 struct bnxt_qplib_swqe *wqe)
1846{
1847 struct bnxt_re_ah *ah = NULL;
1848
1849 if (is_ud_qp(qp)) {
1850 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1851 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1852 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1853 wqe->send.avid = ah->qplib_ah.id;
1854 }
1855 switch (wr->opcode) {
1856 case IB_WR_SEND:
1857 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1858 break;
1859 case IB_WR_SEND_WITH_IMM:
1860 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1861 wqe->send.imm_data = wr->ex.imm_data;
1862 break;
1863 case IB_WR_SEND_WITH_INV:
1864 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1865 wqe->send.inv_key = wr->ex.invalidate_rkey;
1866 break;
1867 default:
1868 return -EINVAL;
1869 }
1870 if (wr->send_flags & IB_SEND_SIGNALED)
1871 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1872 if (wr->send_flags & IB_SEND_FENCE)
1873 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1874 if (wr->send_flags & IB_SEND_SOLICITED)
1875 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1876 if (wr->send_flags & IB_SEND_INLINE)
1877 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1878
1879 return 0;
1880}
1881
1882static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1883 struct bnxt_qplib_swqe *wqe)
1884{
1885 switch (wr->opcode) {
1886 case IB_WR_RDMA_WRITE:
1887 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1888 break;
1889 case IB_WR_RDMA_WRITE_WITH_IMM:
1890 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1891 wqe->rdma.imm_data = wr->ex.imm_data;
1892 break;
1893 case IB_WR_RDMA_READ:
1894 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1895 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1896 break;
1897 default:
1898 return -EINVAL;
1899 }
1900 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1901 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1902 if (wr->send_flags & IB_SEND_SIGNALED)
1903 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1904 if (wr->send_flags & IB_SEND_FENCE)
1905 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1906 if (wr->send_flags & IB_SEND_SOLICITED)
1907 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1908 if (wr->send_flags & IB_SEND_INLINE)
1909 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1910
1911 return 0;
1912}
1913
1914static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1915 struct bnxt_qplib_swqe *wqe)
1916{
1917 switch (wr->opcode) {
1918 case IB_WR_ATOMIC_CMP_AND_SWP:
1919 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
Devesh Sharma55311d02017-08-31 09:27:30 +05301920 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001921 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1922 break;
1923 case IB_WR_ATOMIC_FETCH_AND_ADD:
1924 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1925 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1926 break;
1927 default:
1928 return -EINVAL;
1929 }
1930 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1931 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1932 if (wr->send_flags & IB_SEND_SIGNALED)
1933 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1934 if (wr->send_flags & IB_SEND_FENCE)
1935 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1936 if (wr->send_flags & IB_SEND_SOLICITED)
1937 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1938 return 0;
1939}
1940
1941static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1942 struct bnxt_qplib_swqe *wqe)
1943{
1944 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1945 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1946
1947 if (wr->send_flags & IB_SEND_SIGNALED)
1948 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1949 if (wr->send_flags & IB_SEND_FENCE)
1950 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1951 if (wr->send_flags & IB_SEND_SOLICITED)
1952 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1953
1954 return 0;
1955}
1956
1957static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1958 struct bnxt_qplib_swqe *wqe)
1959{
1960 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1961 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1962 int access = wr->access;
1963
1964 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1965 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1966 wqe->frmr.page_list = mr->pages;
1967 wqe->frmr.page_list_len = mr->npages;
1968 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1969 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1970
1971 if (wr->wr.send_flags & IB_SEND_FENCE)
1972 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1973 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1974 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1975
1976 if (access & IB_ACCESS_LOCAL_WRITE)
1977 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1978 if (access & IB_ACCESS_REMOTE_READ)
1979 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1980 if (access & IB_ACCESS_REMOTE_WRITE)
1981 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1982 if (access & IB_ACCESS_REMOTE_ATOMIC)
1983 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1984 if (access & IB_ACCESS_MW_BIND)
1985 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1986
1987 wqe->frmr.l_key = wr->key;
1988 wqe->frmr.length = wr->mr->length;
1989 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1990 wqe->frmr.va = wr->mr->iova;
1991 return 0;
1992}
1993
1994static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1995 struct ib_send_wr *wr,
1996 struct bnxt_qplib_swqe *wqe)
1997{
1998 /* Copy the inline data to the data field */
1999 u8 *in_data;
2000 u32 i, sge_len;
2001 void *sge_addr;
2002
2003 in_data = wqe->inline_data;
2004 for (i = 0; i < wr->num_sge; i++) {
2005 sge_addr = (void *)(unsigned long)
2006 wr->sg_list[i].addr;
2007 sge_len = wr->sg_list[i].length;
2008
2009 if ((sge_len + wqe->inline_len) >
2010 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2011 dev_err(rdev_to_dev(rdev),
2012 "Inline data size requested > supported value");
2013 return -EINVAL;
2014 }
2015 sge_len = wr->sg_list[i].length;
2016
2017 memcpy(in_data, sge_addr, sge_len);
2018 in_data += wr->sg_list[i].length;
2019 wqe->inline_len += wr->sg_list[i].length;
2020 }
2021 return wqe->inline_len;
2022}
2023
2024static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2025 struct ib_send_wr *wr,
2026 struct bnxt_qplib_swqe *wqe)
2027{
2028 int payload_sz = 0;
2029
2030 if (wr->send_flags & IB_SEND_INLINE)
2031 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2032 else
2033 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2034 wqe->num_sge);
2035
2036 return payload_sz;
2037}
2038
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002039static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2040{
2041 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2042 qp->ib_qp.qp_type == IB_QPT_GSI ||
2043 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2044 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2045 int qp_attr_mask;
2046 struct ib_qp_attr qp_attr;
2047
2048 qp_attr_mask = IB_QP_STATE;
2049 qp_attr.qp_state = IB_QPS_RTS;
2050 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2051 qp->qplib_qp.wqe_cnt = 0;
2052 }
2053}
2054
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002055static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2056 struct bnxt_re_qp *qp,
2057 struct ib_send_wr *wr)
2058{
2059 struct bnxt_qplib_swqe wqe;
2060 int rc = 0, payload_sz = 0;
2061 unsigned long flags;
2062
2063 spin_lock_irqsave(&qp->sq_lock, flags);
2064 memset(&wqe, 0, sizeof(wqe));
2065 while (wr) {
2066 /* House keeping */
2067 memset(&wqe, 0, sizeof(wqe));
2068
2069 /* Common */
2070 wqe.num_sge = wr->num_sge;
2071 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2072 dev_err(rdev_to_dev(rdev),
2073 "Limit exceeded for Send SGEs");
2074 rc = -EINVAL;
2075 goto bad;
2076 }
2077
2078 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2079 if (payload_sz < 0) {
2080 rc = -EINVAL;
2081 goto bad;
2082 }
2083 wqe.wr_id = wr->wr_id;
2084
2085 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2086
2087 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2088 if (!rc)
2089 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2090bad:
2091 if (rc) {
2092 dev_err(rdev_to_dev(rdev),
2093 "Post send failed opcode = %#x rc = %d",
2094 wr->opcode, rc);
2095 break;
2096 }
2097 wr = wr->next;
2098 }
2099 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002100 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002101 spin_unlock_irqrestore(&qp->sq_lock, flags);
2102 return rc;
2103}
2104
2105int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2106 struct ib_send_wr **bad_wr)
2107{
2108 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2109 struct bnxt_qplib_swqe wqe;
2110 int rc = 0, payload_sz = 0;
2111 unsigned long flags;
2112
2113 spin_lock_irqsave(&qp->sq_lock, flags);
2114 while (wr) {
2115 /* House keeping */
2116 memset(&wqe, 0, sizeof(wqe));
2117
2118 /* Common */
2119 wqe.num_sge = wr->num_sge;
2120 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2121 dev_err(rdev_to_dev(qp->rdev),
2122 "Limit exceeded for Send SGEs");
2123 rc = -EINVAL;
2124 goto bad;
2125 }
2126
2127 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2128 if (payload_sz < 0) {
2129 rc = -EINVAL;
2130 goto bad;
2131 }
2132 wqe.wr_id = wr->wr_id;
2133
2134 switch (wr->opcode) {
2135 case IB_WR_SEND:
2136 case IB_WR_SEND_WITH_IMM:
2137 if (ib_qp->qp_type == IB_QPT_GSI) {
2138 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2139 payload_sz);
2140 if (rc)
2141 goto bad;
2142 wqe.rawqp1.lflags |=
2143 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2144 }
2145 switch (wr->send_flags) {
2146 case IB_SEND_IP_CSUM:
2147 wqe.rawqp1.lflags |=
2148 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2149 break;
2150 default:
2151 break;
2152 }
2153 /* Fall thru to build the wqe */
2154 case IB_WR_SEND_WITH_INV:
2155 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2156 break;
2157 case IB_WR_RDMA_WRITE:
2158 case IB_WR_RDMA_WRITE_WITH_IMM:
2159 case IB_WR_RDMA_READ:
2160 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2161 break;
2162 case IB_WR_ATOMIC_CMP_AND_SWP:
2163 case IB_WR_ATOMIC_FETCH_AND_ADD:
2164 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2165 break;
2166 case IB_WR_RDMA_READ_WITH_INV:
2167 dev_err(rdev_to_dev(qp->rdev),
2168 "RDMA Read with Invalidate is not supported");
2169 rc = -EINVAL;
2170 goto bad;
2171 case IB_WR_LOCAL_INV:
2172 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2173 break;
2174 case IB_WR_REG_MR:
2175 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2176 break;
2177 default:
2178 /* Unsupported WRs */
2179 dev_err(rdev_to_dev(qp->rdev),
2180 "WR (%#x) is not supported", wr->opcode);
2181 rc = -EINVAL;
2182 goto bad;
2183 }
2184 if (!rc)
2185 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2186bad:
2187 if (rc) {
2188 dev_err(rdev_to_dev(qp->rdev),
2189 "post_send failed op:%#x qps = %#x rc = %d\n",
2190 wr->opcode, qp->qplib_qp.state, rc);
2191 *bad_wr = wr;
2192 break;
2193 }
2194 wr = wr->next;
2195 }
2196 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002197 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002198 spin_unlock_irqrestore(&qp->sq_lock, flags);
2199
2200 return rc;
2201}
2202
2203static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2204 struct bnxt_re_qp *qp,
2205 struct ib_recv_wr *wr)
2206{
2207 struct bnxt_qplib_swqe wqe;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002208 int rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002209
2210 memset(&wqe, 0, sizeof(wqe));
2211 while (wr) {
2212 /* House keeping */
2213 memset(&wqe, 0, sizeof(wqe));
2214
2215 /* Common */
2216 wqe.num_sge = wr->num_sge;
2217 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2218 dev_err(rdev_to_dev(rdev),
2219 "Limit exceeded for Receive SGEs");
2220 rc = -EINVAL;
2221 break;
2222 }
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002223 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002224 wqe.wr_id = wr->wr_id;
2225 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2226
2227 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2228 if (rc)
2229 break;
2230
2231 wr = wr->next;
2232 }
2233 if (!rc)
2234 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2235 return rc;
2236}
2237
2238int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2239 struct ib_recv_wr **bad_wr)
2240{
2241 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2242 struct bnxt_qplib_swqe wqe;
2243 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002244 unsigned long flags;
2245 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002246
Devesh Sharma018cf592017-05-22 03:15:40 -07002247 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002248 while (wr) {
2249 /* House keeping */
2250 memset(&wqe, 0, sizeof(wqe));
2251
2252 /* Common */
2253 wqe.num_sge = wr->num_sge;
2254 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2255 dev_err(rdev_to_dev(qp->rdev),
2256 "Limit exceeded for Receive SGEs");
2257 rc = -EINVAL;
2258 *bad_wr = wr;
2259 break;
2260 }
2261
2262 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2263 wr->num_sge);
2264 wqe.wr_id = wr->wr_id;
2265 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2266
2267 if (ib_qp->qp_type == IB_QPT_GSI)
2268 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2269 payload_sz);
2270 if (!rc)
2271 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2272 if (rc) {
2273 *bad_wr = wr;
2274 break;
2275 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002276
2277 /* Ring DB if the RQEs posted reaches a threshold value */
2278 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2279 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2280 count = 0;
2281 }
2282
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002283 wr = wr->next;
2284 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002285
2286 if (count)
2287 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2288
2289 spin_unlock_irqrestore(&qp->rq_lock, flags);
2290
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002291 return rc;
2292}
2293
2294/* Completion Queues */
2295int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2296{
2297 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2298 struct bnxt_re_dev *rdev = cq->rdev;
2299 int rc;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002300 struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002301
2302 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2303 if (rc) {
2304 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2305 return rc;
2306 }
Doug Ledford374cb862017-04-25 14:00:59 -04002307 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002308 ib_umem_release(cq->umem);
2309
2310 if (cq) {
2311 kfree(cq->cql);
2312 kfree(cq);
2313 }
2314 atomic_dec(&rdev->cq_count);
Selvin Xavier6a5df912017-08-02 01:46:18 -07002315 nq->budget--;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002316 return 0;
2317}
2318
2319struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2320 const struct ib_cq_init_attr *attr,
2321 struct ib_ucontext *context,
2322 struct ib_udata *udata)
2323{
2324 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2325 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2326 struct bnxt_re_cq *cq = NULL;
2327 int rc, entries;
2328 int cqe = attr->cqe;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002329 struct bnxt_qplib_nq *nq = NULL;
2330 unsigned int nq_alloc_cnt;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002331
2332 /* Validate CQ fields */
2333 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2334 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2335 return ERR_PTR(-EINVAL);
2336 }
2337 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2338 if (!cq)
2339 return ERR_PTR(-ENOMEM);
2340
2341 cq->rdev = rdev;
2342 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2343
2344 entries = roundup_pow_of_two(cqe + 1);
2345 if (entries > dev_attr->max_cq_wqes + 1)
2346 entries = dev_attr->max_cq_wqes + 1;
2347
2348 if (context) {
2349 struct bnxt_re_cq_req req;
2350 struct bnxt_re_ucontext *uctx = container_of
2351 (context,
2352 struct bnxt_re_ucontext,
2353 ib_uctx);
2354 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2355 rc = -EFAULT;
2356 goto fail;
2357 }
2358
2359 cq->umem = ib_umem_get(context, req.cq_va,
2360 entries * sizeof(struct cq_base),
2361 IB_ACCESS_LOCAL_WRITE, 1);
2362 if (IS_ERR(cq->umem)) {
2363 rc = PTR_ERR(cq->umem);
2364 goto fail;
2365 }
2366 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2367 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002368 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002369 } else {
2370 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2371 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2372 GFP_KERNEL);
2373 if (!cq->cql) {
2374 rc = -ENOMEM;
2375 goto fail;
2376 }
2377
2378 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2379 cq->qplib_cq.sghead = NULL;
2380 cq->qplib_cq.nmap = 0;
2381 }
Selvin Xavier6a5df912017-08-02 01:46:18 -07002382 /*
2383 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2384 * used for getting the NQ index.
2385 */
2386 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2387 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002388 cq->qplib_cq.max_wqe = entries;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002389 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2390 cq->qplib_cq.nq = nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002391
2392 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2393 if (rc) {
2394 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2395 goto fail;
2396 }
2397
2398 cq->ib_cq.cqe = entries;
2399 cq->cq_period = cq->qplib_cq.period;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002400 nq->budget++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002401
2402 atomic_inc(&rdev->cq_count);
2403
2404 if (context) {
2405 struct bnxt_re_cq_resp resp;
2406
2407 resp.cqid = cq->qplib_cq.id;
2408 resp.tail = cq->qplib_cq.hwq.cons;
2409 resp.phase = cq->qplib_cq.period;
2410 resp.rsvd = 0;
2411 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2412 if (rc) {
2413 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2414 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2415 goto c2fail;
2416 }
2417 }
2418
2419 return &cq->ib_cq;
2420
2421c2fail:
2422 if (context)
2423 ib_umem_release(cq->umem);
2424fail:
2425 kfree(cq->cql);
2426 kfree(cq);
2427 return ERR_PTR(rc);
2428}
2429
2430static u8 __req_to_ib_wc_status(u8 qstatus)
2431{
2432 switch (qstatus) {
2433 case CQ_REQ_STATUS_OK:
2434 return IB_WC_SUCCESS;
2435 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2436 return IB_WC_BAD_RESP_ERR;
2437 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2438 return IB_WC_LOC_LEN_ERR;
2439 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2440 return IB_WC_LOC_QP_OP_ERR;
2441 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2442 return IB_WC_LOC_PROT_ERR;
2443 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2444 return IB_WC_GENERAL_ERR;
2445 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2446 return IB_WC_REM_INV_REQ_ERR;
2447 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2448 return IB_WC_REM_ACCESS_ERR;
2449 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2450 return IB_WC_REM_OP_ERR;
2451 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2452 return IB_WC_RNR_RETRY_EXC_ERR;
2453 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2454 return IB_WC_RETRY_EXC_ERR;
2455 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2456 return IB_WC_WR_FLUSH_ERR;
2457 default:
2458 return IB_WC_GENERAL_ERR;
2459 }
2460 return 0;
2461}
2462
2463static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2464{
2465 switch (qstatus) {
2466 case CQ_RES_RAWETH_QP1_STATUS_OK:
2467 return IB_WC_SUCCESS;
2468 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2469 return IB_WC_LOC_ACCESS_ERR;
2470 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2471 return IB_WC_LOC_LEN_ERR;
2472 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2473 return IB_WC_LOC_PROT_ERR;
2474 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2475 return IB_WC_LOC_QP_OP_ERR;
2476 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2477 return IB_WC_GENERAL_ERR;
2478 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2479 return IB_WC_WR_FLUSH_ERR;
2480 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2481 return IB_WC_WR_FLUSH_ERR;
2482 default:
2483 return IB_WC_GENERAL_ERR;
2484 }
2485}
2486
2487static u8 __rc_to_ib_wc_status(u8 qstatus)
2488{
2489 switch (qstatus) {
2490 case CQ_RES_RC_STATUS_OK:
2491 return IB_WC_SUCCESS;
2492 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2493 return IB_WC_LOC_ACCESS_ERR;
2494 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2495 return IB_WC_LOC_LEN_ERR;
2496 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2497 return IB_WC_LOC_PROT_ERR;
2498 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2499 return IB_WC_LOC_QP_OP_ERR;
2500 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2501 return IB_WC_GENERAL_ERR;
2502 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2503 return IB_WC_REM_INV_REQ_ERR;
2504 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2505 return IB_WC_WR_FLUSH_ERR;
2506 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2507 return IB_WC_WR_FLUSH_ERR;
2508 default:
2509 return IB_WC_GENERAL_ERR;
2510 }
2511}
2512
2513static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2514{
2515 switch (cqe->type) {
2516 case BNXT_QPLIB_SWQE_TYPE_SEND:
2517 wc->opcode = IB_WC_SEND;
2518 break;
2519 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2520 wc->opcode = IB_WC_SEND;
2521 wc->wc_flags |= IB_WC_WITH_IMM;
2522 break;
2523 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2524 wc->opcode = IB_WC_SEND;
2525 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2526 break;
2527 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2528 wc->opcode = IB_WC_RDMA_WRITE;
2529 break;
2530 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2531 wc->opcode = IB_WC_RDMA_WRITE;
2532 wc->wc_flags |= IB_WC_WITH_IMM;
2533 break;
2534 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2535 wc->opcode = IB_WC_RDMA_READ;
2536 break;
2537 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2538 wc->opcode = IB_WC_COMP_SWAP;
2539 break;
2540 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2541 wc->opcode = IB_WC_FETCH_ADD;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2544 wc->opcode = IB_WC_LOCAL_INV;
2545 break;
2546 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2547 wc->opcode = IB_WC_REG_MR;
2548 break;
2549 default:
2550 wc->opcode = IB_WC_SEND;
2551 break;
2552 }
2553
2554 wc->status = __req_to_ib_wc_status(cqe->status);
2555}
2556
2557static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2558 u16 raweth_qp1_flags2)
2559{
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002560 bool is_ipv6 = false, is_ipv4 = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002561
2562 /* raweth_qp1_flags Bit 9-6 indicates itype */
2563 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2564 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2565 return -1;
2566
2567 if (raweth_qp1_flags2 &
2568 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2569 raweth_qp1_flags2 &
2570 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002571 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2572 (raweth_qp1_flags2 &
2573 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2574 (is_ipv6 = true) : (is_ipv4 = true);
2575 return ((is_ipv6) ?
2576 BNXT_RE_ROCEV2_IPV6_PACKET :
2577 BNXT_RE_ROCEV2_IPV4_PACKET);
2578 } else {
2579 return BNXT_RE_ROCE_V1_PACKET;
2580 }
2581}
2582
2583static int bnxt_re_to_ib_nw_type(int nw_type)
2584{
2585 u8 nw_hdr_type = 0xFF;
2586
2587 switch (nw_type) {
2588 case BNXT_RE_ROCE_V1_PACKET:
2589 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2590 break;
2591 case BNXT_RE_ROCEV2_IPV4_PACKET:
2592 nw_hdr_type = RDMA_NETWORK_IPV4;
2593 break;
2594 case BNXT_RE_ROCEV2_IPV6_PACKET:
2595 nw_hdr_type = RDMA_NETWORK_IPV6;
2596 break;
2597 }
2598 return nw_hdr_type;
2599}
2600
2601static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2602 void *rq_hdr_buf)
2603{
2604 u8 *tmp_buf = NULL;
2605 struct ethhdr *eth_hdr;
2606 u16 eth_type;
2607 bool rc = false;
2608
2609 tmp_buf = (u8 *)rq_hdr_buf;
2610 /*
2611 * If dest mac is not same as I/F mac, this could be a
2612 * loopback address or multicast address, check whether
2613 * it is a loopback packet
2614 */
2615 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2616 tmp_buf += 4;
2617 /* Check the ether type */
2618 eth_hdr = (struct ethhdr *)tmp_buf;
2619 eth_type = ntohs(eth_hdr->h_proto);
2620 switch (eth_type) {
2621 case ETH_P_IBOE:
2622 rc = true;
2623 break;
2624 case ETH_P_IP:
2625 case ETH_P_IPV6: {
2626 u32 len;
2627 struct udphdr *udp_hdr;
2628
2629 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2630 sizeof(struct ipv6hdr));
2631 tmp_buf += sizeof(struct ethhdr) + len;
2632 udp_hdr = (struct udphdr *)tmp_buf;
2633 if (ntohs(udp_hdr->dest) ==
2634 ROCE_V2_UDP_DPORT)
2635 rc = true;
2636 break;
2637 }
2638 default:
2639 break;
2640 }
2641 }
2642
2643 return rc;
2644}
2645
2646static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2647 struct bnxt_qplib_cqe *cqe)
2648{
2649 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2650 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2651 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2652 struct ib_send_wr *swr;
2653 struct ib_ud_wr udwr;
2654 struct ib_recv_wr rwr;
2655 int pkt_type = 0;
2656 u32 tbl_idx;
2657 void *rq_hdr_buf;
2658 dma_addr_t rq_hdr_buf_map;
2659 dma_addr_t shrq_hdr_buf_map;
2660 u32 offset = 0;
2661 u32 skip_bytes = 0;
2662 struct ib_sge s_sge[2];
2663 struct ib_sge r_sge[2];
2664 int rc;
2665
2666 memset(&udwr, 0, sizeof(udwr));
2667 memset(&rwr, 0, sizeof(rwr));
2668 memset(&s_sge, 0, sizeof(s_sge));
2669 memset(&r_sge, 0, sizeof(r_sge));
2670
2671 swr = &udwr.wr;
2672 tbl_idx = cqe->wr_id;
2673
2674 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2675 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2676 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2677 tbl_idx);
2678
2679 /* Shadow QP header buffer */
2680 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2681 tbl_idx);
2682 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2683
2684 /* Store this cqe */
2685 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2686 sqp_entry->qp1_qp = qp1_qp;
2687
2688 /* Find packet type from the cqe */
2689
2690 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2691 cqe->raweth_qp1_flags2);
2692 if (pkt_type < 0) {
2693 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2694 return -EINVAL;
2695 }
2696
2697 /* Adjust the offset for the user buffer and post in the rq */
2698
2699 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2700 offset = 20;
2701
2702 /*
2703 * QP1 loopback packet has 4 bytes of internal header before
2704 * ether header. Skip these four bytes.
2705 */
2706 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2707 skip_bytes = 4;
2708
2709 /* First send SGE . Skip the ether header*/
2710 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2711 + skip_bytes;
2712 s_sge[0].lkey = 0xFFFFFFFF;
2713 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2714 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2715
2716 /* Second Send SGE */
2717 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2718 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2719 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2720 s_sge[1].addr += 8;
2721 s_sge[1].lkey = 0xFFFFFFFF;
2722 s_sge[1].length = 256;
2723
2724 /* First recv SGE */
2725
2726 r_sge[0].addr = shrq_hdr_buf_map;
2727 r_sge[0].lkey = 0xFFFFFFFF;
2728 r_sge[0].length = 40;
2729
2730 r_sge[1].addr = sqp_entry->sge.addr + offset;
2731 r_sge[1].lkey = sqp_entry->sge.lkey;
2732 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2733
2734 /* Create receive work request */
2735 rwr.num_sge = 2;
2736 rwr.sg_list = r_sge;
2737 rwr.wr_id = tbl_idx;
2738 rwr.next = NULL;
2739
2740 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2741 if (rc) {
2742 dev_err(rdev_to_dev(rdev),
2743 "Failed to post Rx buffers to shadow QP");
2744 return -ENOMEM;
2745 }
2746
2747 swr->num_sge = 2;
2748 swr->sg_list = s_sge;
2749 swr->wr_id = tbl_idx;
2750 swr->opcode = IB_WR_SEND;
2751 swr->next = NULL;
2752
2753 udwr.ah = &rdev->sqp_ah->ib_ah;
2754 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2755 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2756
2757 /* post data received in the send queue */
2758 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2759
2760 return 0;
2761}
2762
2763static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2764 struct bnxt_qplib_cqe *cqe)
2765{
2766 wc->opcode = IB_WC_RECV;
2767 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2768 wc->wc_flags |= IB_WC_GRH;
2769}
2770
2771static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2772 struct bnxt_qplib_cqe *cqe)
2773{
2774 wc->opcode = IB_WC_RECV;
2775 wc->status = __rc_to_ib_wc_status(cqe->status);
2776
2777 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2778 wc->wc_flags |= IB_WC_WITH_IMM;
2779 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2780 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2781 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2782 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2783 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2784}
2785
2786static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2787 struct ib_wc *wc,
2788 struct bnxt_qplib_cqe *cqe)
2789{
2790 u32 tbl_idx;
2791 struct bnxt_re_dev *rdev = qp->rdev;
2792 struct bnxt_re_qp *qp1_qp = NULL;
2793 struct bnxt_qplib_cqe *orig_cqe = NULL;
2794 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2795 int nw_type;
2796
2797 tbl_idx = cqe->wr_id;
2798
2799 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2800 qp1_qp = sqp_entry->qp1_qp;
2801 orig_cqe = &sqp_entry->cqe;
2802
2803 wc->wr_id = sqp_entry->wrid;
2804 wc->byte_len = orig_cqe->length;
2805 wc->qp = &qp1_qp->ib_qp;
2806
2807 wc->ex.imm_data = orig_cqe->immdata;
2808 wc->src_qp = orig_cqe->src_qp;
2809 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2810 wc->port_num = 1;
2811 wc->vendor_err = orig_cqe->status;
2812
2813 wc->opcode = IB_WC_RECV;
2814 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2815 wc->wc_flags |= IB_WC_GRH;
2816
2817 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2818 orig_cqe->raweth_qp1_flags2);
2819 if (nw_type >= 0) {
2820 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2821 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2822 }
2823}
2824
2825static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2826 struct bnxt_qplib_cqe *cqe)
2827{
2828 wc->opcode = IB_WC_RECV;
2829 wc->status = __rc_to_ib_wc_status(cqe->status);
2830
2831 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2832 wc->wc_flags |= IB_WC_WITH_IMM;
2833 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2834 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2835 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2836 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2837 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2838}
2839
Eddie Wai9152e0b2017-06-14 03:26:23 -07002840static int send_phantom_wqe(struct bnxt_re_qp *qp)
2841{
2842 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2843 unsigned long flags;
2844 int rc = 0;
2845
2846 spin_lock_irqsave(&qp->sq_lock, flags);
2847
2848 rc = bnxt_re_bind_fence_mw(lib_qp);
2849 if (!rc) {
2850 lib_qp->sq.phantom_wqe_cnt++;
2851 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2852 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2853 lib_qp->id, lib_qp->sq.hwq.prod,
2854 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2855 lib_qp->sq.phantom_wqe_cnt);
2856 }
2857
2858 spin_unlock_irqrestore(&qp->sq_lock, flags);
2859 return rc;
2860}
2861
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002862int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2863{
2864 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2865 struct bnxt_re_qp *qp;
2866 struct bnxt_qplib_cqe *cqe;
2867 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002868 struct bnxt_qplib_q *sq;
2869 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002870 u32 tbl_idx;
2871 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2872 unsigned long flags;
2873
2874 spin_lock_irqsave(&cq->cq_lock, flags);
2875 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002876 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002877 if (!cq->cql) {
2878 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2879 goto exit;
2880 }
2881 cqe = &cq->cql[0];
2882 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002883 lib_qp = NULL;
2884 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2885 if (lib_qp) {
2886 sq = &lib_qp->sq;
2887 if (sq->send_phantom) {
2888 qp = container_of(lib_qp,
2889 struct bnxt_re_qp, qplib_qp);
2890 if (send_phantom_wqe(qp) == -ENOMEM)
2891 dev_err(rdev_to_dev(cq->rdev),
2892 "Phantom failed! Scheduled to send again\n");
2893 else
2894 sq->send_phantom = false;
2895 }
2896 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002897 if (ncqe < budget)
2898 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2899 cqe + ncqe,
2900 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002901
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002902 if (!ncqe)
2903 break;
2904
2905 for (i = 0; i < ncqe; i++, cqe++) {
2906 /* Transcribe each qplib_wqe back to ib_wc */
2907 memset(wc, 0, sizeof(*wc));
2908
2909 wc->wr_id = cqe->wr_id;
2910 wc->byte_len = cqe->length;
2911 qp = container_of
2912 ((struct bnxt_qplib_qp *)
2913 (unsigned long)(cqe->qp_handle),
2914 struct bnxt_re_qp, qplib_qp);
2915 if (!qp) {
2916 dev_err(rdev_to_dev(cq->rdev),
2917 "POLL CQ : bad QP handle");
2918 continue;
2919 }
2920 wc->qp = &qp->ib_qp;
2921 wc->ex.imm_data = cqe->immdata;
2922 wc->src_qp = cqe->src_qp;
2923 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2924 wc->port_num = 1;
2925 wc->vendor_err = cqe->status;
2926
2927 switch (cqe->opcode) {
2928 case CQ_BASE_CQE_TYPE_REQ:
2929 if (qp->qplib_qp.id ==
2930 qp->rdev->qp1_sqp->qplib_qp.id) {
2931 /* Handle this completion with
2932 * the stored completion
2933 */
2934 memset(wc, 0, sizeof(*wc));
2935 continue;
2936 }
2937 bnxt_re_process_req_wc(wc, cqe);
2938 break;
2939 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2940 if (!cqe->status) {
2941 int rc = 0;
2942
2943 rc = bnxt_re_process_raw_qp_pkt_rx
2944 (qp, cqe);
2945 if (!rc) {
2946 memset(wc, 0, sizeof(*wc));
2947 continue;
2948 }
2949 cqe->status = -1;
2950 }
2951 /* Errors need not be looped back.
2952 * But change the wr_id to the one
2953 * stored in the table
2954 */
2955 tbl_idx = cqe->wr_id;
2956 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2957 wc->wr_id = sqp_entry->wrid;
2958 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2959 break;
2960 case CQ_BASE_CQE_TYPE_RES_RC:
2961 bnxt_re_process_res_rc_wc(wc, cqe);
2962 break;
2963 case CQ_BASE_CQE_TYPE_RES_UD:
2964 if (qp->qplib_qp.id ==
2965 qp->rdev->qp1_sqp->qplib_qp.id) {
2966 /* Handle this completion with
2967 * the stored completion
2968 */
2969 if (cqe->status) {
2970 continue;
2971 } else {
2972 bnxt_re_process_res_shadow_qp_wc
2973 (qp, wc, cqe);
2974 break;
2975 }
2976 }
2977 bnxt_re_process_res_ud_wc(wc, cqe);
2978 break;
2979 default:
2980 dev_err(rdev_to_dev(cq->rdev),
2981 "POLL CQ : type 0x%x not handled",
2982 cqe->opcode);
2983 continue;
2984 }
2985 wc++;
2986 budget--;
2987 }
2988 }
2989exit:
2990 spin_unlock_irqrestore(&cq->cq_lock, flags);
2991 return num_entries - budget;
2992}
2993
2994int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2995 enum ib_cq_notify_flags ib_cqn_flags)
2996{
2997 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2998 int type = 0;
2999
3000 /* Trigger on the very next completion */
3001 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3002 type = DBR_DBR_TYPE_CQ_ARMALL;
3003 /* Trigger on the next solicited completion */
3004 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3005 type = DBR_DBR_TYPE_CQ_ARMSE;
3006
Selvin Xavier499e4562017-06-29 12:28:18 -07003007 /* Poll to see if there are missed events */
3008 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3009 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3010 return 1;
3011
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003012 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3013
3014 return 0;
3015}
3016
3017/* Memory Regions */
3018struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3019{
3020 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3021 struct bnxt_re_dev *rdev = pd->rdev;
3022 struct bnxt_re_mr *mr;
3023 u64 pbl = 0;
3024 int rc;
3025
3026 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3027 if (!mr)
3028 return ERR_PTR(-ENOMEM);
3029
3030 mr->rdev = rdev;
3031 mr->qplib_mr.pd = &pd->qplib_pd;
3032 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3033 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3034
3035 /* Allocate and register 0 as the address */
3036 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3037 if (rc)
3038 goto fail;
3039
3040 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3041 mr->qplib_mr.total_size = -1; /* Infinte length */
3042 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3043 if (rc)
3044 goto fail_mr;
3045
3046 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3047 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3048 IB_ACCESS_REMOTE_ATOMIC))
3049 mr->ib_mr.rkey = mr->ib_mr.lkey;
3050 atomic_inc(&rdev->mr_count);
3051
3052 return &mr->ib_mr;
3053
3054fail_mr:
3055 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3056fail:
3057 kfree(mr);
3058 return ERR_PTR(rc);
3059}
3060
3061int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3062{
3063 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3064 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003065 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003066
Selvin Xavier1c980b02017-05-22 03:15:34 -07003067 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3068 if (rc) {
3069 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3070 return rc;
3071 }
3072
Selvin Xavier19935192017-08-31 09:27:34 +05303073 if (mr->pages) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003074 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3075 &mr->qplib_frpl);
3076 kfree(mr->pages);
3077 mr->npages = 0;
3078 mr->pages = NULL;
3079 }
Doug Ledford374cb862017-04-25 14:00:59 -04003080 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003081 ib_umem_release(mr->ib_umem);
3082
3083 kfree(mr);
3084 atomic_dec(&rdev->mr_count);
3085 return rc;
3086}
3087
3088static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3089{
3090 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3091
3092 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3093 return -ENOMEM;
3094
3095 mr->pages[mr->npages++] = addr;
3096 return 0;
3097}
3098
3099int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3100 unsigned int *sg_offset)
3101{
3102 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3103
3104 mr->npages = 0;
3105 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3106}
3107
3108struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3109 u32 max_num_sg)
3110{
3111 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3112 struct bnxt_re_dev *rdev = pd->rdev;
3113 struct bnxt_re_mr *mr = NULL;
3114 int rc;
3115
3116 if (type != IB_MR_TYPE_MEM_REG) {
3117 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3118 return ERR_PTR(-EINVAL);
3119 }
3120 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3121 return ERR_PTR(-EINVAL);
3122
3123 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3124 if (!mr)
3125 return ERR_PTR(-ENOMEM);
3126
3127 mr->rdev = rdev;
3128 mr->qplib_mr.pd = &pd->qplib_pd;
3129 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3130 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3131
3132 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3133 if (rc)
3134 goto fail;
3135
3136 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3137 mr->ib_mr.rkey = mr->ib_mr.lkey;
3138
3139 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3140 if (!mr->pages) {
3141 rc = -ENOMEM;
3142 goto fail;
3143 }
3144 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3145 &mr->qplib_frpl, max_num_sg);
3146 if (rc) {
3147 dev_err(rdev_to_dev(rdev),
3148 "Failed to allocate HW FR page list");
3149 goto fail_mr;
3150 }
3151
3152 atomic_inc(&rdev->mr_count);
3153 return &mr->ib_mr;
3154
3155fail_mr:
3156 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3157fail:
3158 kfree(mr->pages);
3159 kfree(mr);
3160 return ERR_PTR(rc);
3161}
3162
Eddie Wai9152e0b2017-06-14 03:26:23 -07003163struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3164 struct ib_udata *udata)
3165{
3166 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3167 struct bnxt_re_dev *rdev = pd->rdev;
3168 struct bnxt_re_mw *mw;
3169 int rc;
3170
3171 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3172 if (!mw)
3173 return ERR_PTR(-ENOMEM);
3174 mw->rdev = rdev;
3175 mw->qplib_mw.pd = &pd->qplib_pd;
3176
3177 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3178 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3179 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3180 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3181 if (rc) {
3182 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3183 goto fail;
3184 }
3185 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3186
3187 atomic_inc(&rdev->mw_count);
3188 return &mw->ib_mw;
3189
3190fail:
3191 kfree(mw);
3192 return ERR_PTR(rc);
3193}
3194
3195int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3196{
3197 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3198 struct bnxt_re_dev *rdev = mw->rdev;
3199 int rc;
3200
3201 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3202 if (rc) {
3203 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3204 return rc;
3205 }
3206
3207 kfree(mw);
3208 atomic_dec(&rdev->mw_count);
3209 return rc;
3210}
3211
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003212/* uverbs */
3213struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3214 u64 virt_addr, int mr_access_flags,
3215 struct ib_udata *udata)
3216{
3217 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3218 struct bnxt_re_dev *rdev = pd->rdev;
3219 struct bnxt_re_mr *mr;
3220 struct ib_umem *umem;
3221 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003222 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003223 struct scatterlist *sg;
3224 int entry;
3225
Selvin Xavier58d4a672017-06-29 12:28:12 -07003226 if (length > BNXT_RE_MAX_MR_SIZE) {
3227 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3228 length, BNXT_RE_MAX_MR_SIZE);
3229 return ERR_PTR(-ENOMEM);
3230 }
3231
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003232 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3233 if (!mr)
3234 return ERR_PTR(-ENOMEM);
3235
3236 mr->rdev = rdev;
3237 mr->qplib_mr.pd = &pd->qplib_pd;
3238 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3239 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3240
3241 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3242 mr_access_flags, 0);
3243 if (IS_ERR(umem)) {
3244 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3245 rc = -EFAULT;
3246 goto free_mr;
3247 }
3248 mr->ib_umem = umem;
3249
3250 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3251 if (rc) {
3252 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3253 goto release_umem;
3254 }
3255 /* The fixed portion of the rkey is the same as the lkey */
3256 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3257
3258 mr->qplib_mr.va = virt_addr;
3259 umem_pgs = ib_umem_page_count(umem);
3260 if (!umem_pgs) {
3261 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3262 rc = -EINVAL;
3263 goto free_mrw;
3264 }
3265 mr->qplib_mr.total_size = length;
3266
3267 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3268 if (!pbl_tbl) {
3269 rc = -EINVAL;
3270 goto free_mrw;
3271 }
3272 pbl_tbl_orig = pbl_tbl;
3273
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003274 if (umem->hugetlb) {
3275 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3276 rc = -EFAULT;
3277 goto fail;
3278 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003279
3280 if (umem->page_shift != PAGE_SHIFT) {
3281 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003282 rc = -EFAULT;
3283 goto fail;
3284 }
3285 /* Map umem buf ptrs to the PBL */
3286 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003287 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003288 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003289 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003290 }
3291 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3292 umem_pgs, false);
3293 if (rc) {
3294 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3295 goto fail;
3296 }
3297
3298 kfree(pbl_tbl_orig);
3299
3300 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3301 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3302 atomic_inc(&rdev->mr_count);
3303
3304 return &mr->ib_mr;
3305fail:
3306 kfree(pbl_tbl_orig);
3307free_mrw:
3308 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3309release_umem:
3310 ib_umem_release(umem);
3311free_mr:
3312 kfree(mr);
3313 return ERR_PTR(rc);
3314}
3315
3316struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3317 struct ib_udata *udata)
3318{
3319 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3320 struct bnxt_re_uctx_resp resp;
3321 struct bnxt_re_ucontext *uctx;
3322 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3323 int rc;
3324
3325 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3326 ibdev->uverbs_abi_ver);
3327
3328 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3329 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3330 BNXT_RE_ABI_VERSION);
3331 return ERR_PTR(-EPERM);
3332 }
3333
3334 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3335 if (!uctx)
3336 return ERR_PTR(-ENOMEM);
3337
3338 uctx->rdev = rdev;
3339
3340 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3341 if (!uctx->shpg) {
3342 rc = -ENOMEM;
3343 goto fail;
3344 }
3345 spin_lock_init(&uctx->sh_lock);
3346
3347 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3348 resp.max_qp = rdev->qplib_ctx.qpc_count;
3349 resp.pg_size = PAGE_SIZE;
3350 resp.cqe_sz = sizeof(struct cq_base);
3351 resp.max_cqd = dev_attr->max_cq_wqes;
3352 resp.rsvd = 0;
3353
3354 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3355 if (rc) {
3356 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3357 rc = -EFAULT;
3358 goto cfail;
3359 }
3360
3361 return &uctx->ib_uctx;
3362cfail:
3363 free_page((unsigned long)uctx->shpg);
3364 uctx->shpg = NULL;
3365fail:
3366 kfree(uctx);
3367 return ERR_PTR(rc);
3368}
3369
3370int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3371{
3372 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3373 struct bnxt_re_ucontext,
3374 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003375
3376 struct bnxt_re_dev *rdev = uctx->rdev;
3377 int rc = 0;
3378
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003379 if (uctx->shpg)
3380 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003381
3382 if (uctx->dpi.dbr) {
3383 /* Free DPI only if this is the first PD allocated by the
3384 * application and mark the context dpi as NULL
3385 */
3386 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3387 &rdev->qplib_res.dpi_tbl,
3388 &uctx->dpi);
3389 if (rc)
Colin Ian King24bb4d82017-07-14 08:30:10 +01003390 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003391 /* Don't fail, continue*/
3392 uctx->dpi.dbr = NULL;
3393 }
3394
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003395 kfree(uctx);
3396 return 0;
3397}
3398
3399/* Helper function to mmap the virtual memory from user app */
3400int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3401{
3402 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3403 struct bnxt_re_ucontext,
3404 ib_uctx);
3405 struct bnxt_re_dev *rdev = uctx->rdev;
3406 u64 pfn;
3407
3408 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3409 return -EINVAL;
3410
3411 if (vma->vm_pgoff) {
3412 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3413 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3414 PAGE_SIZE, vma->vm_page_prot)) {
3415 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3416 return -EAGAIN;
3417 }
3418 } else {
3419 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3420 if (remap_pfn_range(vma, vma->vm_start,
3421 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3422 dev_err(rdev_to_dev(rdev),
3423 "Failed to map shared page");
3424 return -EAGAIN;
3425 }
3426 }
3427
3428 return 0;
3429}