blob: ebcdfb4f5f75438985217b84c654b620067ca271 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
Somnath Kotur74828b12017-08-31 09:27:33 +0530262 port_attr->active_speed = rdev->active_speed;
263 port_attr->active_width = rdev->active_width;
264
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800265 return 0;
266}
267
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800268int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 struct ib_port_immutable *immutable)
270{
271 struct ib_port_attr port_attr;
272
273 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 return -EINVAL;
275
276 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 return 0;
282}
283
284int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
285 u16 index, u16 *pkey)
286{
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
288
289 /* Ignore port_num */
290
291 memset(pkey, 0, sizeof(*pkey));
292 return bnxt_qplib_get_pkey(&rdev->qplib_res,
293 &rdev->qplib_res.pkey_tbl, index, pkey);
294}
295
296int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
297 int index, union ib_gid *gid)
298{
299 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
300 int rc = 0;
301
302 /* Ignore port_num */
303 memset(gid, 0, sizeof(*gid));
304 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
305 &rdev->qplib_res.sgid_tbl, index,
306 (struct bnxt_qplib_gid *)gid);
307 return rc;
308}
309
310int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
311 unsigned int index, void **context)
312{
313 int rc = 0;
314 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
316 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530317 struct bnxt_qplib_gid *gid_to_del;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800318
319 /* Delete the entry from the hardware */
320 ctx = *context;
321 if (!ctx)
322 return -EINVAL;
323
324 if (sgid_tbl && sgid_tbl->active) {
325 if (ctx->idx >= sgid_tbl->max)
326 return -EINVAL;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530327 gid_to_del = &sgid_tbl->tbl[ctx->idx];
328 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
329 * or via the ib_unregister_device path. In the former case QP1
330 * may not be destroyed yet, in which case just return as FW
331 * needs that entry to be present and will fail it's deletion.
332 * We could get invoked again after QP1 is destroyed OR get an
333 * ADD_GID call with a different GID value for the same index
334 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
335 */
336 if (ctx->idx == 0 &&
337 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
338 ctx->refcnt == 1 && rdev->qp1_sqp) {
339 dev_dbg(rdev_to_dev(rdev),
340 "Trying to delete GID0 while QP1 is alive\n");
341 return -EFAULT;
342 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800343 ctx->refcnt--;
344 if (!ctx->refcnt) {
Somnath Kotur89aaca52017-08-31 09:27:35 +0530345 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700346 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800347 dev_err(rdev_to_dev(rdev),
348 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700349 } else {
350 ctx_tbl = sgid_tbl->ctx;
351 ctx_tbl[ctx->idx] = NULL;
352 kfree(ctx);
353 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800354 }
355 } else {
356 return -EINVAL;
357 }
358 return rc;
359}
360
361int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
362 unsigned int index, const union ib_gid *gid,
363 const struct ib_gid_attr *attr, void **context)
364{
365 int rc;
366 u32 tbl_idx = 0;
367 u16 vlan_id = 0xFFFF;
368 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
369 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
370 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
371
372 if ((attr->ndev) && is_vlan_dev(attr->ndev))
373 vlan_id = vlan_dev_vlan_id(attr->ndev);
374
375 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
376 rdev->qplib_res.netdev->dev_addr,
377 vlan_id, true, &tbl_idx);
378 if (rc == -EALREADY) {
379 ctx_tbl = sgid_tbl->ctx;
380 ctx_tbl[tbl_idx]->refcnt++;
381 *context = ctx_tbl[tbl_idx];
382 return 0;
383 }
384
385 if (rc < 0) {
386 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
387 return rc;
388 }
389
390 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
391 if (!ctx)
392 return -ENOMEM;
393 ctx_tbl = sgid_tbl->ctx;
394 ctx->idx = tbl_idx;
395 ctx->refcnt = 1;
396 ctx_tbl[tbl_idx] = ctx;
397
398 return rc;
399}
400
401enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
402 u8 port_num)
403{
404 return IB_LINK_LAYER_ETHERNET;
405}
406
Eddie Wai9152e0b2017-06-14 03:26:23 -0700407#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
408
409static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
410{
411 struct bnxt_re_fence_data *fence = &pd->fence;
412 struct ib_mr *ib_mr = &fence->mr->ib_mr;
413 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
414
415 memset(wqe, 0, sizeof(*wqe));
416 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
417 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
418 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
419 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
420 wqe->bind.zero_based = false;
421 wqe->bind.parent_l_key = ib_mr->lkey;
422 wqe->bind.va = (u64)(unsigned long)fence->va;
423 wqe->bind.length = fence->size;
424 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
425 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
426
427 /* Save the initial rkey in fence structure for now;
428 * wqe->bind.r_key will be set at (re)bind time.
429 */
430 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
431}
432
433static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
434{
435 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
436 qplib_qp);
437 struct ib_pd *ib_pd = qp->ib_qp.pd;
438 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
439 struct bnxt_re_fence_data *fence = &pd->fence;
440 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
441 struct bnxt_qplib_swqe wqe;
442 int rc;
443
444 memcpy(&wqe, fence_wqe, sizeof(wqe));
445 wqe.bind.r_key = fence->bind_rkey;
446 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
447
448 dev_dbg(rdev_to_dev(qp->rdev),
449 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
450 wqe.bind.r_key, qp->qplib_qp.id, pd);
451 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
452 if (rc) {
453 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
454 return rc;
455 }
456 bnxt_qplib_post_send_db(&qp->qplib_qp);
457
458 return rc;
459}
460
461static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
462{
463 struct bnxt_re_fence_data *fence = &pd->fence;
464 struct bnxt_re_dev *rdev = pd->rdev;
465 struct device *dev = &rdev->en_dev->pdev->dev;
466 struct bnxt_re_mr *mr = fence->mr;
467
468 if (fence->mw) {
469 bnxt_re_dealloc_mw(fence->mw);
470 fence->mw = NULL;
471 }
472 if (mr) {
473 if (mr->ib_mr.rkey)
474 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
475 true);
476 if (mr->ib_mr.lkey)
477 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
478 kfree(mr);
479 fence->mr = NULL;
480 }
481 if (fence->dma_addr) {
482 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
483 DMA_BIDIRECTIONAL);
484 fence->dma_addr = 0;
485 }
486}
487
488static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
489{
490 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
491 struct bnxt_re_fence_data *fence = &pd->fence;
492 struct bnxt_re_dev *rdev = pd->rdev;
493 struct device *dev = &rdev->en_dev->pdev->dev;
494 struct bnxt_re_mr *mr = NULL;
495 dma_addr_t dma_addr = 0;
496 struct ib_mw *mw;
497 u64 pbl_tbl;
498 int rc;
499
500 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
501 DMA_BIDIRECTIONAL);
502 rc = dma_mapping_error(dev, dma_addr);
503 if (rc) {
504 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
505 rc = -EIO;
506 fence->dma_addr = 0;
507 goto fail;
508 }
509 fence->dma_addr = dma_addr;
510
511 /* Allocate a MR */
512 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
513 if (!mr) {
514 rc = -ENOMEM;
515 goto fail;
516 }
517 fence->mr = mr;
518 mr->rdev = rdev;
519 mr->qplib_mr.pd = &pd->qplib_pd;
520 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
521 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
522 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
523 if (rc) {
524 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
525 goto fail;
526 }
527
528 /* Register MR */
529 mr->ib_mr.lkey = mr->qplib_mr.lkey;
530 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
531 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
532 pbl_tbl = dma_addr;
533 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
534 BNXT_RE_FENCE_PBL_SIZE, false);
535 if (rc) {
536 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
537 goto fail;
538 }
539 mr->ib_mr.rkey = mr->qplib_mr.rkey;
540
541 /* Create a fence MW only for kernel consumers */
542 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300543 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700544 dev_err(rdev_to_dev(rdev),
545 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300546 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700547 goto fail;
548 }
549 fence->mw = mw;
550
551 bnxt_re_create_fence_wqe(pd);
552 return 0;
553
554fail:
555 bnxt_re_destroy_fence_mr(pd);
556 return rc;
557}
558
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800559/* Protection Domains */
560int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
561{
562 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
563 struct bnxt_re_dev *rdev = pd->rdev;
564 int rc;
565
Eddie Wai9152e0b2017-06-14 03:26:23 -0700566 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800567
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700568 if (pd->qplib_pd.id) {
569 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
570 &rdev->qplib_res.pd_tbl,
571 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800572 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700573 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800574 }
575
576 kfree(pd);
577 return 0;
578}
579
580struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
581 struct ib_ucontext *ucontext,
582 struct ib_udata *udata)
583{
584 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
585 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
586 struct bnxt_re_ucontext,
587 ib_uctx);
588 struct bnxt_re_pd *pd;
589 int rc;
590
591 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
592 if (!pd)
593 return ERR_PTR(-ENOMEM);
594
595 pd->rdev = rdev;
596 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
597 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
598 rc = -ENOMEM;
599 goto fail;
600 }
601
602 if (udata) {
603 struct bnxt_re_pd_resp resp;
604
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700605 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800606 /* Allocate DPI in alloc_pd to avoid failing of
607 * ibv_devinfo and family of application when DPIs
608 * are depleted.
609 */
610 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700611 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800612 rc = -ENOMEM;
613 goto dbfail;
614 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800615 }
616
617 resp.pdid = pd->qplib_pd.id;
618 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700619 resp.dpi = ucntx->dpi.dpi;
620 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800621
622 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
623 if (rc) {
624 dev_err(rdev_to_dev(rdev),
625 "Failed to copy user response\n");
626 goto dbfail;
627 }
628 }
629
Eddie Wai9152e0b2017-06-14 03:26:23 -0700630 if (!udata)
631 if (bnxt_re_create_fence_mr(pd))
632 dev_warn(rdev_to_dev(rdev),
633 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800634 return &pd->ib_pd;
635dbfail:
636 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
637 &pd->qplib_pd);
638fail:
639 kfree(pd);
640 return ERR_PTR(rc);
641}
642
643/* Address Handles */
644int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
645{
646 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
647 struct bnxt_re_dev *rdev = ah->rdev;
648 int rc;
649
650 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
651 if (rc) {
652 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
653 return rc;
654 }
655 kfree(ah);
656 return 0;
657}
658
659struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400660 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800661 struct ib_udata *udata)
662{
663 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
664 struct bnxt_re_dev *rdev = pd->rdev;
665 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400666 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800667 int rc;
668 u16 vlan_tag;
669 u8 nw_type;
670
671 struct ib_gid_attr sgid_attr;
672
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400673 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800674 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
675 return ERR_PTR(-EINVAL);
676 }
677 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
678 if (!ah)
679 return ERR_PTR(-ENOMEM);
680
681 ah->rdev = rdev;
682 ah->qplib_ah.pd = &pd->qplib_pd;
683
684 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400685 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800686 sizeof(union ib_gid));
687 /*
688 * If RoCE V2 is enabled, stack will have two entries for
689 * each GID entry. Avoiding this duplicte entry in HW. Dividing
690 * the GID index by 2 for RoCE V2
691 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400692 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
693 ah->qplib_ah.host_sgid_index = grh->sgid_index;
694 ah->qplib_ah.traffic_class = grh->traffic_class;
695 ah->qplib_ah.flow_label = grh->flow_label;
696 ah->qplib_ah.hop_limit = grh->hop_limit;
697 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800698 if (ib_pd->uobject &&
699 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400700 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800701 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400702 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800703 union ib_gid sgid;
704
705 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400706 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800707 &sgid_attr);
708 if (rc) {
709 dev_err(rdev_to_dev(rdev),
710 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400711 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800712 goto fail;
713 }
714 if (sgid_attr.ndev) {
715 if (is_vlan_dev(sgid_attr.ndev))
716 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
717 dev_put(sgid_attr.ndev);
718 }
719 /* Get network header type for this GID */
720 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
721 switch (nw_type) {
722 case RDMA_NETWORK_IPV4:
723 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
724 break;
725 case RDMA_NETWORK_IPV6:
726 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
727 break;
728 default:
729 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
730 break;
731 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800732 }
733
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400734 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800735 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
736 if (rc) {
737 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
738 goto fail;
739 }
740
741 /* Write AVID to shared page. */
742 if (ib_pd->uobject) {
743 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
744 struct bnxt_re_ucontext *uctx;
745 unsigned long flag;
746 u32 *wrptr;
747
748 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
749 spin_lock_irqsave(&uctx->sh_lock, flag);
750 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
751 *wrptr = ah->qplib_ah.id;
752 wmb(); /* make sure cache is updated. */
753 spin_unlock_irqrestore(&uctx->sh_lock, flag);
754 }
755
756 return &ah->ib_ah;
757
758fail:
759 kfree(ah);
760 return ERR_PTR(rc);
761}
762
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400763int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800764{
765 return 0;
766}
767
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400768int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800769{
770 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
771
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400772 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400773 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400774 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400775 rdma_ah_set_grh(ah_attr, NULL, 0,
776 ah->qplib_ah.host_sgid_index,
777 0, ah->qplib_ah.traffic_class);
778 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
779 rdma_ah_set_port_num(ah_attr, 1);
780 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800781 return 0;
782}
783
784/* Queue Pairs */
785int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
786{
787 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
788 struct bnxt_re_dev *rdev = qp->rdev;
789 int rc;
790
Selvin Xavierf218d672017-06-29 12:28:15 -0700791 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800792 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
793 if (rc) {
794 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
795 return rc;
796 }
797 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
798 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
799 &rdev->sqp_ah->qplib_ah);
800 if (rc) {
801 dev_err(rdev_to_dev(rdev),
802 "Failed to destroy HW AH for shadow QP");
803 return rc;
804 }
805
Selvin Xavierf218d672017-06-29 12:28:15 -0700806 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800807 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
808 &rdev->qp1_sqp->qplib_qp);
809 if (rc) {
810 dev_err(rdev_to_dev(rdev),
811 "Failed to destroy Shadow QP");
812 return rc;
813 }
814 mutex_lock(&rdev->qp_lock);
815 list_del(&rdev->qp1_sqp->list);
816 atomic_dec(&rdev->qp_count);
817 mutex_unlock(&rdev->qp_lock);
818
819 kfree(rdev->sqp_ah);
820 kfree(rdev->qp1_sqp);
Somnath Kotur89aaca52017-08-31 09:27:35 +0530821 rdev->qp1_sqp = NULL;
822 rdev->sqp_ah = NULL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800823 }
824
Doug Ledford374cb862017-04-25 14:00:59 -0400825 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800826 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400827 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800828 ib_umem_release(qp->sumem);
829
830 mutex_lock(&rdev->qp_lock);
831 list_del(&qp->list);
832 atomic_dec(&rdev->qp_count);
833 mutex_unlock(&rdev->qp_lock);
834 kfree(qp);
835 return 0;
836}
837
838static u8 __from_ib_qp_type(enum ib_qp_type type)
839{
840 switch (type) {
841 case IB_QPT_GSI:
842 return CMDQ_CREATE_QP1_TYPE_GSI;
843 case IB_QPT_RC:
844 return CMDQ_CREATE_QP_TYPE_RC;
845 case IB_QPT_UD:
846 return CMDQ_CREATE_QP_TYPE_UD;
847 default:
848 return IB_QPT_MAX;
849 }
850}
851
852static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
853 struct bnxt_re_qp *qp, struct ib_udata *udata)
854{
855 struct bnxt_re_qp_req ureq;
856 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
857 struct ib_umem *umem;
858 int bytes = 0;
859 struct ib_ucontext *context = pd->ib_pd.uobject->context;
860 struct bnxt_re_ucontext *cntx = container_of(context,
861 struct bnxt_re_ucontext,
862 ib_uctx);
863 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
864 return -EFAULT;
865
866 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
867 /* Consider mapping PSN search memory only for RC QPs. */
868 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
869 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
870 bytes = PAGE_ALIGN(bytes);
871 umem = ib_umem_get(context, ureq.qpsva, bytes,
872 IB_ACCESS_LOCAL_WRITE, 1);
873 if (IS_ERR(umem))
874 return PTR_ERR(umem);
875
876 qp->sumem = umem;
877 qplib_qp->sq.sglist = umem->sg_head.sgl;
878 qplib_qp->sq.nmap = umem->nmap;
879 qplib_qp->qp_handle = ureq.qp_handle;
880
881 if (!qp->qplib_qp.srq) {
882 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
883 bytes = PAGE_ALIGN(bytes);
884 umem = ib_umem_get(context, ureq.qprva, bytes,
885 IB_ACCESS_LOCAL_WRITE, 1);
886 if (IS_ERR(umem))
887 goto rqfail;
888 qp->rumem = umem;
889 qplib_qp->rq.sglist = umem->sg_head.sgl;
890 qplib_qp->rq.nmap = umem->nmap;
891 }
892
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700893 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800894 return 0;
895rqfail:
896 ib_umem_release(qp->sumem);
897 qp->sumem = NULL;
898 qplib_qp->sq.sglist = NULL;
899 qplib_qp->sq.nmap = 0;
900
901 return PTR_ERR(umem);
902}
903
904static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
905 (struct bnxt_re_pd *pd,
906 struct bnxt_qplib_res *qp1_res,
907 struct bnxt_qplib_qp *qp1_qp)
908{
909 struct bnxt_re_dev *rdev = pd->rdev;
910 struct bnxt_re_ah *ah;
911 union ib_gid sgid;
912 int rc;
913
914 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
915 if (!ah)
916 return NULL;
917
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800918 ah->rdev = rdev;
919 ah->qplib_ah.pd = &pd->qplib_pd;
920
921 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
922 if (rc)
923 goto fail;
924
925 /* supply the dgid data same as sgid */
926 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
927 sizeof(union ib_gid));
928 ah->qplib_ah.sgid_index = 0;
929
930 ah->qplib_ah.traffic_class = 0;
931 ah->qplib_ah.flow_label = 0;
932 ah->qplib_ah.hop_limit = 1;
933 ah->qplib_ah.sl = 0;
934 /* Have DMAC same as SMAC */
935 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
936
937 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
938 if (rc) {
939 dev_err(rdev_to_dev(rdev),
940 "Failed to allocate HW AH for Shadow QP");
941 goto fail;
942 }
943
944 return ah;
945
946fail:
947 kfree(ah);
948 return NULL;
949}
950
951static struct bnxt_re_qp *bnxt_re_create_shadow_qp
952 (struct bnxt_re_pd *pd,
953 struct bnxt_qplib_res *qp1_res,
954 struct bnxt_qplib_qp *qp1_qp)
955{
956 struct bnxt_re_dev *rdev = pd->rdev;
957 struct bnxt_re_qp *qp;
958 int rc;
959
960 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
961 if (!qp)
962 return NULL;
963
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800964 qp->rdev = rdev;
965
966 /* Initialize the shadow QP structure from the QP1 values */
967 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
968
969 qp->qplib_qp.pd = &pd->qplib_pd;
970 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
971 qp->qplib_qp.type = IB_QPT_UD;
972
973 qp->qplib_qp.max_inline_data = 0;
974 qp->qplib_qp.sig_type = true;
975
976 /* Shadow QP SQ depth should be same as QP1 RQ depth */
977 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
978 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700979 /* Q full delta can be 1 since it is internal QP */
980 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800981
982 qp->qplib_qp.scq = qp1_qp->scq;
983 qp->qplib_qp.rcq = qp1_qp->rcq;
984
985 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
986 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700987 /* Q full delta can be 1 since it is internal QP */
988 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800989
990 qp->qplib_qp.mtu = qp1_qp->mtu;
991
992 qp->qplib_qp.sq_hdr_buf_size = 0;
993 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
994 qp->qplib_qp.dpi = &rdev->dpi_privileged;
995
996 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
997 if (rc)
998 goto fail;
999
1000 rdev->sqp_id = qp->qplib_qp.id;
1001
1002 spin_lock_init(&qp->sq_lock);
1003 INIT_LIST_HEAD(&qp->list);
1004 mutex_lock(&rdev->qp_lock);
1005 list_add_tail(&qp->list, &rdev->qp_list);
1006 atomic_inc(&rdev->qp_count);
1007 mutex_unlock(&rdev->qp_lock);
1008 return qp;
1009fail:
1010 kfree(qp);
1011 return NULL;
1012}
1013
1014struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1015 struct ib_qp_init_attr *qp_init_attr,
1016 struct ib_udata *udata)
1017{
1018 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1019 struct bnxt_re_dev *rdev = pd->rdev;
1020 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1021 struct bnxt_re_qp *qp;
1022 struct bnxt_re_cq *cq;
1023 int rc, entries;
1024
1025 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1026 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1027 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1028 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1029 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1030 return ERR_PTR(-EINVAL);
1031
1032 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1033 if (!qp)
1034 return ERR_PTR(-ENOMEM);
1035
1036 qp->rdev = rdev;
1037 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1038 qp->qplib_qp.pd = &pd->qplib_pd;
1039 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1040 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1041 if (qp->qplib_qp.type == IB_QPT_MAX) {
1042 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1043 qp->qplib_qp.type);
1044 rc = -EINVAL;
1045 goto fail;
1046 }
1047 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1048 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1049 IB_SIGNAL_ALL_WR) ? true : false);
1050
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001051 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1052 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1053 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1054
1055 if (qp_init_attr->send_cq) {
1056 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1057 ib_cq);
1058 if (!cq) {
1059 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1060 rc = -EINVAL;
1061 goto fail;
1062 }
1063 qp->qplib_qp.scq = &cq->qplib_cq;
1064 }
1065
1066 if (qp_init_attr->recv_cq) {
1067 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1068 ib_cq);
1069 if (!cq) {
1070 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1071 rc = -EINVAL;
1072 goto fail;
1073 }
1074 qp->qplib_qp.rcq = &cq->qplib_cq;
1075 }
1076
1077 if (qp_init_attr->srq) {
1078 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1079 rc = -ENOTSUPP;
1080 goto fail;
1081 } else {
1082 /* Allocate 1 more than what's provided so posting max doesn't
1083 * mean empty
1084 */
1085 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1086 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1087 dev_attr->max_qp_wqes + 1);
1088
Eddie Wai9152e0b2017-06-14 03:26:23 -07001089 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1090 qp_init_attr->cap.max_recv_wr;
1091
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001092 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1093 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1094 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1095 }
1096
1097 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1098
1099 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001100 /* Allocate 1 more than what's provided */
1101 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1102 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1103 dev_attr->max_qp_wqes + 1);
1104 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1105 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001106 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1107 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1108 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109 qp->qplib_qp.sq.max_sge++;
1110 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1111 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1112
1113 qp->qplib_qp.rq_hdr_buf_size =
1114 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1115
1116 qp->qplib_qp.sq_hdr_buf_size =
1117 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1118 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1119 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1120 if (rc) {
1121 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1122 goto fail;
1123 }
1124 /* Create a shadow QP to handle the QP1 traffic */
1125 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1126 &qp->qplib_qp);
1127 if (!rdev->qp1_sqp) {
1128 rc = -EINVAL;
1129 dev_err(rdev_to_dev(rdev),
1130 "Failed to create Shadow QP for QP1");
1131 goto qp_destroy;
1132 }
1133 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1134 &qp->qplib_qp);
1135 if (!rdev->sqp_ah) {
1136 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1137 &rdev->qp1_sqp->qplib_qp);
1138 rc = -EINVAL;
1139 dev_err(rdev_to_dev(rdev),
1140 "Failed to create AH entry for ShadowQP");
1141 goto qp_destroy;
1142 }
1143
1144 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001145 /* Allocate 128 + 1 more than what's provided */
1146 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1147 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1148 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1149 dev_attr->max_qp_wqes +
1150 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1151 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1152
1153 /*
1154 * Reserving one slot for Phantom WQE. Application can
1155 * post one extra entry in this case. But allowing this to avoid
1156 * unexpected Queue full condition
1157 */
1158
1159 qp->qplib_qp.sq.q_full_delta -= 1;
1160
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001161 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1162 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1163 if (udata) {
1164 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1165 if (rc)
1166 goto fail;
1167 } else {
1168 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1169 }
1170
1171 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1172 if (rc) {
1173 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1174 goto fail;
1175 }
1176 }
1177
1178 qp->ib_qp.qp_num = qp->qplib_qp.id;
1179 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001180 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001181
1182 if (udata) {
1183 struct bnxt_re_qp_resp resp;
1184
1185 resp.qpid = qp->ib_qp.qp_num;
1186 resp.rsvd = 0;
1187 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1188 if (rc) {
1189 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1190 goto qp_destroy;
1191 }
1192 }
1193 INIT_LIST_HEAD(&qp->list);
1194 mutex_lock(&rdev->qp_lock);
1195 list_add_tail(&qp->list, &rdev->qp_list);
1196 atomic_inc(&rdev->qp_count);
1197 mutex_unlock(&rdev->qp_lock);
1198
1199 return &qp->ib_qp;
1200qp_destroy:
1201 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1202fail:
1203 kfree(qp);
1204 return ERR_PTR(rc);
1205}
1206
1207static u8 __from_ib_qp_state(enum ib_qp_state state)
1208{
1209 switch (state) {
1210 case IB_QPS_RESET:
1211 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1212 case IB_QPS_INIT:
1213 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1214 case IB_QPS_RTR:
1215 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1216 case IB_QPS_RTS:
1217 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1218 case IB_QPS_SQD:
1219 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1220 case IB_QPS_SQE:
1221 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1222 case IB_QPS_ERR:
1223 default:
1224 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1225 }
1226}
1227
1228static enum ib_qp_state __to_ib_qp_state(u8 state)
1229{
1230 switch (state) {
1231 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1232 return IB_QPS_RESET;
1233 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1234 return IB_QPS_INIT;
1235 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1236 return IB_QPS_RTR;
1237 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1238 return IB_QPS_RTS;
1239 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1240 return IB_QPS_SQD;
1241 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1242 return IB_QPS_SQE;
1243 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1244 default:
1245 return IB_QPS_ERR;
1246 }
1247}
1248
1249static u32 __from_ib_mtu(enum ib_mtu mtu)
1250{
1251 switch (mtu) {
1252 case IB_MTU_256:
1253 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1254 case IB_MTU_512:
1255 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1256 case IB_MTU_1024:
1257 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1258 case IB_MTU_2048:
1259 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1260 case IB_MTU_4096:
1261 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1262 default:
1263 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1264 }
1265}
1266
1267static enum ib_mtu __to_ib_mtu(u32 mtu)
1268{
1269 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1270 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1271 return IB_MTU_256;
1272 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1273 return IB_MTU_512;
1274 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1275 return IB_MTU_1024;
1276 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1277 return IB_MTU_2048;
1278 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1279 return IB_MTU_4096;
1280 default:
1281 return IB_MTU_2048;
1282 }
1283}
1284
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001285static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1286 struct bnxt_re_qp *qp1_qp,
1287 int qp_attr_mask)
1288{
1289 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1290 int rc = 0;
1291
1292 if (qp_attr_mask & IB_QP_STATE) {
1293 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1294 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1295 }
1296 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1297 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1298 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1299 }
1300
1301 if (qp_attr_mask & IB_QP_QKEY) {
1302 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1303 /* Using a Random QKEY */
1304 qp->qplib_qp.qkey = 0x81818181;
1305 }
1306 if (qp_attr_mask & IB_QP_SQ_PSN) {
1307 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1308 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1309 }
1310
1311 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1312 if (rc)
1313 dev_err(rdev_to_dev(rdev),
1314 "Failed to modify Shadow QP for QP1");
1315 return rc;
1316}
1317
1318int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1319 int qp_attr_mask, struct ib_udata *udata)
1320{
1321 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1322 struct bnxt_re_dev *rdev = qp->rdev;
1323 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1324 enum ib_qp_state curr_qp_state, new_qp_state;
1325 int rc, entries;
1326 int status;
1327 union ib_gid sgid;
1328 struct ib_gid_attr sgid_attr;
1329 u8 nw_type;
1330
1331 qp->qplib_qp.modify_flags = 0;
1332 if (qp_attr_mask & IB_QP_STATE) {
1333 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1334 new_qp_state = qp_attr->qp_state;
1335 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1336 ib_qp->qp_type, qp_attr_mask,
1337 IB_LINK_LAYER_ETHERNET)) {
1338 dev_err(rdev_to_dev(rdev),
1339 "Invalid attribute mask: %#x specified ",
1340 qp_attr_mask);
1341 dev_err(rdev_to_dev(rdev),
1342 "for qpn: %#x type: %#x",
1343 ib_qp->qp_num, ib_qp->qp_type);
1344 dev_err(rdev_to_dev(rdev),
1345 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1346 curr_qp_state, new_qp_state);
1347 return -EINVAL;
1348 }
1349 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1350 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001351
1352 if (!qp->sumem &&
1353 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1354 dev_dbg(rdev_to_dev(rdev),
1355 "Move QP = %p to flush list\n",
1356 qp);
1357 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1358 }
1359 if (!qp->sumem &&
1360 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1361 dev_dbg(rdev_to_dev(rdev),
1362 "Move QP = %p out of flush list\n",
1363 qp);
1364 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1365 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001366 }
1367 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1368 qp->qplib_qp.modify_flags |=
1369 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1370 qp->qplib_qp.en_sqd_async_notify = true;
1371 }
1372 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1373 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1374 qp->qplib_qp.access =
1375 __from_ib_access_flags(qp_attr->qp_access_flags);
1376 /* LOCAL_WRITE access must be set to allow RC receive */
1377 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1378 }
1379 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1380 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1381 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1382 }
1383 if (qp_attr_mask & IB_QP_QKEY) {
1384 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1385 qp->qplib_qp.qkey = qp_attr->qkey;
1386 }
1387 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001388 const struct ib_global_route *grh =
1389 rdma_ah_read_grh(&qp_attr->ah_attr);
1390
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001391 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1392 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1393 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1394 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1395 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1396 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1397 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001398 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001399 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001400 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001401 /* If RoCE V2 is enabled, stack will have two entries for
1402 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1403 * the GID index by 2 for RoCE V2
1404 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001405 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1406 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1407 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1408 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1409 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001410 ether_addr_copy(qp->qplib_qp.ah.dmac,
1411 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001412
1413 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001414 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001415 &sgid, &sgid_attr);
1416 if (!status && sgid_attr.ndev) {
1417 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1418 ETH_ALEN);
1419 dev_put(sgid_attr.ndev);
1420 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1421 &sgid);
1422 switch (nw_type) {
1423 case RDMA_NETWORK_IPV4:
1424 qp->qplib_qp.nw_type =
1425 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1426 break;
1427 case RDMA_NETWORK_IPV6:
1428 qp->qplib_qp.nw_type =
1429 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1430 break;
1431 default:
1432 qp->qplib_qp.nw_type =
1433 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1434 break;
1435 }
1436 }
1437 }
1438
1439 if (qp_attr_mask & IB_QP_PATH_MTU) {
1440 qp->qplib_qp.modify_flags |=
1441 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1442 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301443 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001444 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1445 qp->qplib_qp.modify_flags |=
1446 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1447 qp->qplib_qp.path_mtu =
1448 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301449 qp->qplib_qp.mtu =
1450 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001451 }
1452
1453 if (qp_attr_mask & IB_QP_TIMEOUT) {
1454 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1455 qp->qplib_qp.timeout = qp_attr->timeout;
1456 }
1457 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1458 qp->qplib_qp.modify_flags |=
1459 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1460 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1461 }
1462 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1463 qp->qplib_qp.modify_flags |=
1464 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1465 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1466 }
1467 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1468 qp->qplib_qp.modify_flags |=
1469 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1470 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1471 }
1472 if (qp_attr_mask & IB_QP_RQ_PSN) {
1473 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1474 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1475 }
1476 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1477 qp->qplib_qp.modify_flags |=
1478 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001479 /* Cap the max_rd_atomic to device max */
1480 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1481 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001482 }
1483 if (qp_attr_mask & IB_QP_SQ_PSN) {
1484 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1485 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1486 }
1487 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001488 if (qp_attr->max_dest_rd_atomic >
1489 dev_attr->max_qp_init_rd_atom) {
1490 dev_err(rdev_to_dev(rdev),
1491 "max_dest_rd_atomic requested%d is > dev_max%d",
1492 qp_attr->max_dest_rd_atomic,
1493 dev_attr->max_qp_init_rd_atom);
1494 return -EINVAL;
1495 }
1496
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001497 qp->qplib_qp.modify_flags |=
1498 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1499 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1500 }
1501 if (qp_attr_mask & IB_QP_CAP) {
1502 qp->qplib_qp.modify_flags |=
1503 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1504 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1505 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1506 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1507 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1508 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1509 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1510 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1511 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1512 (qp_attr->cap.max_inline_data >=
1513 dev_attr->max_inline_data)) {
1514 dev_err(rdev_to_dev(rdev),
1515 "Create QP failed - max exceeded");
1516 return -EINVAL;
1517 }
1518 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1519 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1520 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001521 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1522 qp_attr->cap.max_send_wr;
1523 /*
1524 * Reserving one slot for Phantom WQE. Some application can
1525 * post one extra entry in this case. Allowing this to avoid
1526 * unexpected Queue full condition
1527 */
1528 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001529 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1530 if (qp->qplib_qp.rq.max_wqe) {
1531 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1532 qp->qplib_qp.rq.max_wqe =
1533 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001534 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1535 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001536 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1537 } else {
1538 /* SRQ was used prior, just ignore the RQ caps */
1539 }
1540 }
1541 if (qp_attr_mask & IB_QP_DEST_QPN) {
1542 qp->qplib_qp.modify_flags |=
1543 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1544 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1545 }
1546 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1547 if (rc) {
1548 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1549 return rc;
1550 }
1551 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1552 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1553 return rc;
1554}
1555
1556int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1557 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1558{
1559 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1560 struct bnxt_re_dev *rdev = qp->rdev;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001561 struct bnxt_qplib_qp *qplib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001562 int rc;
1563
Leon Romanovskye13547b2017-09-19 13:22:13 +03001564 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1565 if (!qplib_qp)
1566 return -ENOMEM;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001567
Leon Romanovskye13547b2017-09-19 13:22:13 +03001568 qplib_qp->id = qp->qplib_qp.id;
1569 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1570
1571 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001572 if (rc) {
1573 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
Leon Romanovskye13547b2017-09-19 13:22:13 +03001574 goto out;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001575 }
Leon Romanovskye13547b2017-09-19 13:22:13 +03001576 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1577 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1578 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1579 qp_attr->pkey_index = qplib_qp->pkey_index;
1580 qp_attr->qkey = qplib_qp->qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001581 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001582 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1583 qplib_qp->ah.host_sgid_index,
1584 qplib_qp->ah.hop_limit,
1585 qplib_qp->ah.traffic_class);
1586 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1587 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1588 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1589 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1590 qp_attr->timeout = qplib_qp->timeout;
1591 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1592 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1593 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1594 qp_attr->rq_psn = qplib_qp->rq.psn;
1595 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1596 qp_attr->sq_psn = qplib_qp->sq.psn;
1597 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1598 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1599 IB_SIGNAL_REQ_WR;
1600 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001601
1602 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1603 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1604 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1605 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1606 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1607 qp_init_attr->cap = qp_attr->cap;
1608
Leon Romanovskye13547b2017-09-19 13:22:13 +03001609out:
1610 kfree(qplib_qp);
1611 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001612}
1613
1614/* Routine for sending QP1 packets for RoCE V1 an V2
1615 */
1616static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1617 struct ib_send_wr *wr,
1618 struct bnxt_qplib_swqe *wqe,
1619 int payload_size)
1620{
1621 struct ib_device *ibdev = &qp->rdev->ibdev;
1622 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1623 ib_ah);
1624 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1625 struct bnxt_qplib_sge sge;
1626 union ib_gid sgid;
1627 u8 nw_type;
1628 u16 ether_type;
1629 struct ib_gid_attr sgid_attr;
1630 union ib_gid dgid;
1631 bool is_eth = false;
1632 bool is_vlan = false;
1633 bool is_grh = false;
1634 bool is_udp = false;
1635 u8 ip_version = 0;
1636 u16 vlan_id = 0xFFFF;
1637 void *buf;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001638 int i, rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001639
1640 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1641
1642 rc = ib_get_cached_gid(ibdev, 1,
1643 qplib_ah->host_sgid_index, &sgid,
1644 &sgid_attr);
1645 if (rc) {
1646 dev_err(rdev_to_dev(qp->rdev),
1647 "Failed to query gid at index %d",
1648 qplib_ah->host_sgid_index);
1649 return rc;
1650 }
1651 if (sgid_attr.ndev) {
1652 if (is_vlan_dev(sgid_attr.ndev))
1653 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1654 dev_put(sgid_attr.ndev);
1655 }
1656 /* Get network header type for this GID */
1657 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1658 switch (nw_type) {
1659 case RDMA_NETWORK_IPV4:
1660 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1661 break;
1662 case RDMA_NETWORK_IPV6:
1663 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1664 break;
1665 default:
1666 nw_type = BNXT_RE_ROCE_V1_PACKET;
1667 break;
1668 }
1669 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1670 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1671 if (is_udp) {
1672 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1673 ip_version = 4;
1674 ether_type = ETH_P_IP;
1675 } else {
1676 ip_version = 6;
1677 ether_type = ETH_P_IPV6;
1678 }
1679 is_grh = false;
1680 } else {
1681 ether_type = ETH_P_IBOE;
1682 is_grh = true;
1683 }
1684
1685 is_eth = true;
1686 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1687
1688 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1689 ip_version, is_udp, 0, &qp->qp1_hdr);
1690
1691 /* ETH */
1692 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1693 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1694
1695 /* For vlan, check the sgid for vlan existence */
1696
1697 if (!is_vlan) {
1698 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1699 } else {
1700 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1701 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1702 }
1703
1704 if (is_grh || (ip_version == 6)) {
1705 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1706 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1707 sizeof(sgid));
1708 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1709 }
1710
1711 if (ip_version == 4) {
1712 qp->qp1_hdr.ip4.tos = 0;
1713 qp->qp1_hdr.ip4.id = 0;
1714 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1715 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1716
1717 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1718 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1719 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1720 }
1721
1722 if (is_udp) {
1723 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1724 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1725 qp->qp1_hdr.udp.csum = 0;
1726 }
1727
1728 /* BTH */
1729 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1730 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1731 qp->qp1_hdr.immediate_present = 1;
1732 } else {
1733 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1734 }
1735 if (wr->send_flags & IB_SEND_SOLICITED)
1736 qp->qp1_hdr.bth.solicited_event = 1;
1737 /* pad_count */
1738 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1739
1740 /* P_key for QP1 is for all members */
1741 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1742 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1743 qp->qp1_hdr.bth.ack_req = 0;
1744 qp->send_psn++;
1745 qp->send_psn &= BTH_PSN_MASK;
1746 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1747 /* DETH */
1748 /* Use the priviledged Q_Key for QP1 */
1749 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1750 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1751
1752 /* Pack the QP1 to the transmit buffer */
1753 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1754 if (buf) {
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001755 ib_ud_header_pack(&qp->qp1_hdr, buf);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001756 for (i = wqe->num_sge; i; i--) {
1757 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1758 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1759 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1760 }
1761
1762 /*
1763 * Max Header buf size for IPV6 RoCE V2 is 86,
1764 * which is same as the QP1 SQ header buffer.
1765 * Header buf size for IPV4 RoCE V2 can be 66.
1766 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1767 * Subtract 20 bytes from QP1 SQ header buf size
1768 */
1769 if (is_udp && ip_version == 4)
1770 sge.size -= 20;
1771 /*
1772 * Max Header buf size for RoCE V1 is 78.
1773 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1774 * Subtract 8 bytes from QP1 SQ header buf size
1775 */
1776 if (!is_udp)
1777 sge.size -= 8;
1778
1779 /* Subtract 4 bytes for non vlan packets */
1780 if (!is_vlan)
1781 sge.size -= 4;
1782
1783 wqe->sg_list[0].addr = sge.addr;
1784 wqe->sg_list[0].lkey = sge.lkey;
1785 wqe->sg_list[0].size = sge.size;
1786 wqe->num_sge++;
1787
1788 } else {
1789 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1790 rc = -ENOMEM;
1791 }
1792 return rc;
1793}
1794
1795/* For the MAD layer, it only provides the recv SGE the size of
1796 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1797 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1798 * receive packet (334 bytes) with no VLAN and then copy the GRH
1799 * and the MAD datagram out to the provided SGE.
1800 */
1801static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1802 struct ib_recv_wr *wr,
1803 struct bnxt_qplib_swqe *wqe,
1804 int payload_size)
1805{
1806 struct bnxt_qplib_sge ref, sge;
1807 u32 rq_prod_index;
1808 struct bnxt_re_sqp_entries *sqp_entry;
1809
1810 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1811
1812 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1813 return -ENOMEM;
1814
1815 /* Create 1 SGE to receive the entire
1816 * ethernet packet
1817 */
1818 /* Save the reference from ULP */
1819 ref.addr = wqe->sg_list[0].addr;
1820 ref.lkey = wqe->sg_list[0].lkey;
1821 ref.size = wqe->sg_list[0].size;
1822
1823 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1824
1825 /* SGE 1 */
1826 wqe->sg_list[0].addr = sge.addr;
1827 wqe->sg_list[0].lkey = sge.lkey;
1828 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1829 sge.size -= wqe->sg_list[0].size;
1830
1831 sqp_entry->sge.addr = ref.addr;
1832 sqp_entry->sge.lkey = ref.lkey;
1833 sqp_entry->sge.size = ref.size;
1834 /* Store the wrid for reporting completion */
1835 sqp_entry->wrid = wqe->wr_id;
1836 /* change the wqe->wrid to table index */
1837 wqe->wr_id = rq_prod_index;
1838 return 0;
1839}
1840
1841static int is_ud_qp(struct bnxt_re_qp *qp)
1842{
1843 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1844}
1845
1846static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1847 struct ib_send_wr *wr,
1848 struct bnxt_qplib_swqe *wqe)
1849{
1850 struct bnxt_re_ah *ah = NULL;
1851
1852 if (is_ud_qp(qp)) {
1853 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1854 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1855 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1856 wqe->send.avid = ah->qplib_ah.id;
1857 }
1858 switch (wr->opcode) {
1859 case IB_WR_SEND:
1860 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1861 break;
1862 case IB_WR_SEND_WITH_IMM:
1863 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1864 wqe->send.imm_data = wr->ex.imm_data;
1865 break;
1866 case IB_WR_SEND_WITH_INV:
1867 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1868 wqe->send.inv_key = wr->ex.invalidate_rkey;
1869 break;
1870 default:
1871 return -EINVAL;
1872 }
1873 if (wr->send_flags & IB_SEND_SIGNALED)
1874 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1875 if (wr->send_flags & IB_SEND_FENCE)
1876 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1877 if (wr->send_flags & IB_SEND_SOLICITED)
1878 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1879 if (wr->send_flags & IB_SEND_INLINE)
1880 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1881
1882 return 0;
1883}
1884
1885static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1886 struct bnxt_qplib_swqe *wqe)
1887{
1888 switch (wr->opcode) {
1889 case IB_WR_RDMA_WRITE:
1890 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1891 break;
1892 case IB_WR_RDMA_WRITE_WITH_IMM:
1893 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1894 wqe->rdma.imm_data = wr->ex.imm_data;
1895 break;
1896 case IB_WR_RDMA_READ:
1897 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1898 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1899 break;
1900 default:
1901 return -EINVAL;
1902 }
1903 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1904 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1905 if (wr->send_flags & IB_SEND_SIGNALED)
1906 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1907 if (wr->send_flags & IB_SEND_FENCE)
1908 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1909 if (wr->send_flags & IB_SEND_SOLICITED)
1910 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1911 if (wr->send_flags & IB_SEND_INLINE)
1912 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1913
1914 return 0;
1915}
1916
1917static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1918 struct bnxt_qplib_swqe *wqe)
1919{
1920 switch (wr->opcode) {
1921 case IB_WR_ATOMIC_CMP_AND_SWP:
1922 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
Devesh Sharma55311d02017-08-31 09:27:30 +05301923 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001924 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1925 break;
1926 case IB_WR_ATOMIC_FETCH_AND_ADD:
1927 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1928 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1929 break;
1930 default:
1931 return -EINVAL;
1932 }
1933 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1934 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1935 if (wr->send_flags & IB_SEND_SIGNALED)
1936 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1937 if (wr->send_flags & IB_SEND_FENCE)
1938 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1939 if (wr->send_flags & IB_SEND_SOLICITED)
1940 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1941 return 0;
1942}
1943
1944static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1945 struct bnxt_qplib_swqe *wqe)
1946{
1947 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1948 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1949
1950 if (wr->send_flags & IB_SEND_SIGNALED)
1951 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1952 if (wr->send_flags & IB_SEND_FENCE)
1953 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1954 if (wr->send_flags & IB_SEND_SOLICITED)
1955 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1956
1957 return 0;
1958}
1959
1960static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1961 struct bnxt_qplib_swqe *wqe)
1962{
1963 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1964 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1965 int access = wr->access;
1966
1967 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1968 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1969 wqe->frmr.page_list = mr->pages;
1970 wqe->frmr.page_list_len = mr->npages;
1971 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1972 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1973
1974 if (wr->wr.send_flags & IB_SEND_FENCE)
1975 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1976 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1977 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1978
1979 if (access & IB_ACCESS_LOCAL_WRITE)
1980 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1981 if (access & IB_ACCESS_REMOTE_READ)
1982 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1983 if (access & IB_ACCESS_REMOTE_WRITE)
1984 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1985 if (access & IB_ACCESS_REMOTE_ATOMIC)
1986 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1987 if (access & IB_ACCESS_MW_BIND)
1988 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1989
1990 wqe->frmr.l_key = wr->key;
1991 wqe->frmr.length = wr->mr->length;
1992 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1993 wqe->frmr.va = wr->mr->iova;
1994 return 0;
1995}
1996
1997static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1998 struct ib_send_wr *wr,
1999 struct bnxt_qplib_swqe *wqe)
2000{
2001 /* Copy the inline data to the data field */
2002 u8 *in_data;
2003 u32 i, sge_len;
2004 void *sge_addr;
2005
2006 in_data = wqe->inline_data;
2007 for (i = 0; i < wr->num_sge; i++) {
2008 sge_addr = (void *)(unsigned long)
2009 wr->sg_list[i].addr;
2010 sge_len = wr->sg_list[i].length;
2011
2012 if ((sge_len + wqe->inline_len) >
2013 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2014 dev_err(rdev_to_dev(rdev),
2015 "Inline data size requested > supported value");
2016 return -EINVAL;
2017 }
2018 sge_len = wr->sg_list[i].length;
2019
2020 memcpy(in_data, sge_addr, sge_len);
2021 in_data += wr->sg_list[i].length;
2022 wqe->inline_len += wr->sg_list[i].length;
2023 }
2024 return wqe->inline_len;
2025}
2026
2027static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2028 struct ib_send_wr *wr,
2029 struct bnxt_qplib_swqe *wqe)
2030{
2031 int payload_sz = 0;
2032
2033 if (wr->send_flags & IB_SEND_INLINE)
2034 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2035 else
2036 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2037 wqe->num_sge);
2038
2039 return payload_sz;
2040}
2041
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002042static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2043{
2044 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2045 qp->ib_qp.qp_type == IB_QPT_GSI ||
2046 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2047 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2048 int qp_attr_mask;
2049 struct ib_qp_attr qp_attr;
2050
2051 qp_attr_mask = IB_QP_STATE;
2052 qp_attr.qp_state = IB_QPS_RTS;
2053 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2054 qp->qplib_qp.wqe_cnt = 0;
2055 }
2056}
2057
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002058static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2059 struct bnxt_re_qp *qp,
2060 struct ib_send_wr *wr)
2061{
2062 struct bnxt_qplib_swqe wqe;
2063 int rc = 0, payload_sz = 0;
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&qp->sq_lock, flags);
2067 memset(&wqe, 0, sizeof(wqe));
2068 while (wr) {
2069 /* House keeping */
2070 memset(&wqe, 0, sizeof(wqe));
2071
2072 /* Common */
2073 wqe.num_sge = wr->num_sge;
2074 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2075 dev_err(rdev_to_dev(rdev),
2076 "Limit exceeded for Send SGEs");
2077 rc = -EINVAL;
2078 goto bad;
2079 }
2080
2081 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2082 if (payload_sz < 0) {
2083 rc = -EINVAL;
2084 goto bad;
2085 }
2086 wqe.wr_id = wr->wr_id;
2087
2088 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2089
2090 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2091 if (!rc)
2092 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2093bad:
2094 if (rc) {
2095 dev_err(rdev_to_dev(rdev),
2096 "Post send failed opcode = %#x rc = %d",
2097 wr->opcode, rc);
2098 break;
2099 }
2100 wr = wr->next;
2101 }
2102 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002103 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002104 spin_unlock_irqrestore(&qp->sq_lock, flags);
2105 return rc;
2106}
2107
2108int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2109 struct ib_send_wr **bad_wr)
2110{
2111 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2112 struct bnxt_qplib_swqe wqe;
2113 int rc = 0, payload_sz = 0;
2114 unsigned long flags;
2115
2116 spin_lock_irqsave(&qp->sq_lock, flags);
2117 while (wr) {
2118 /* House keeping */
2119 memset(&wqe, 0, sizeof(wqe));
2120
2121 /* Common */
2122 wqe.num_sge = wr->num_sge;
2123 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2124 dev_err(rdev_to_dev(qp->rdev),
2125 "Limit exceeded for Send SGEs");
2126 rc = -EINVAL;
2127 goto bad;
2128 }
2129
2130 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2131 if (payload_sz < 0) {
2132 rc = -EINVAL;
2133 goto bad;
2134 }
2135 wqe.wr_id = wr->wr_id;
2136
2137 switch (wr->opcode) {
2138 case IB_WR_SEND:
2139 case IB_WR_SEND_WITH_IMM:
2140 if (ib_qp->qp_type == IB_QPT_GSI) {
2141 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2142 payload_sz);
2143 if (rc)
2144 goto bad;
2145 wqe.rawqp1.lflags |=
2146 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2147 }
2148 switch (wr->send_flags) {
2149 case IB_SEND_IP_CSUM:
2150 wqe.rawqp1.lflags |=
2151 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2152 break;
2153 default:
2154 break;
2155 }
2156 /* Fall thru to build the wqe */
2157 case IB_WR_SEND_WITH_INV:
2158 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2159 break;
2160 case IB_WR_RDMA_WRITE:
2161 case IB_WR_RDMA_WRITE_WITH_IMM:
2162 case IB_WR_RDMA_READ:
2163 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2164 break;
2165 case IB_WR_ATOMIC_CMP_AND_SWP:
2166 case IB_WR_ATOMIC_FETCH_AND_ADD:
2167 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2168 break;
2169 case IB_WR_RDMA_READ_WITH_INV:
2170 dev_err(rdev_to_dev(qp->rdev),
2171 "RDMA Read with Invalidate is not supported");
2172 rc = -EINVAL;
2173 goto bad;
2174 case IB_WR_LOCAL_INV:
2175 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2176 break;
2177 case IB_WR_REG_MR:
2178 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2179 break;
2180 default:
2181 /* Unsupported WRs */
2182 dev_err(rdev_to_dev(qp->rdev),
2183 "WR (%#x) is not supported", wr->opcode);
2184 rc = -EINVAL;
2185 goto bad;
2186 }
2187 if (!rc)
2188 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2189bad:
2190 if (rc) {
2191 dev_err(rdev_to_dev(qp->rdev),
2192 "post_send failed op:%#x qps = %#x rc = %d\n",
2193 wr->opcode, qp->qplib_qp.state, rc);
2194 *bad_wr = wr;
2195 break;
2196 }
2197 wr = wr->next;
2198 }
2199 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002200 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002201 spin_unlock_irqrestore(&qp->sq_lock, flags);
2202
2203 return rc;
2204}
2205
2206static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2207 struct bnxt_re_qp *qp,
2208 struct ib_recv_wr *wr)
2209{
2210 struct bnxt_qplib_swqe wqe;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002211 int rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002212
2213 memset(&wqe, 0, sizeof(wqe));
2214 while (wr) {
2215 /* House keeping */
2216 memset(&wqe, 0, sizeof(wqe));
2217
2218 /* Common */
2219 wqe.num_sge = wr->num_sge;
2220 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2221 dev_err(rdev_to_dev(rdev),
2222 "Limit exceeded for Receive SGEs");
2223 rc = -EINVAL;
2224 break;
2225 }
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002226 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002227 wqe.wr_id = wr->wr_id;
2228 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2229
2230 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2231 if (rc)
2232 break;
2233
2234 wr = wr->next;
2235 }
2236 if (!rc)
2237 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2238 return rc;
2239}
2240
2241int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2242 struct ib_recv_wr **bad_wr)
2243{
2244 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2245 struct bnxt_qplib_swqe wqe;
2246 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002247 unsigned long flags;
2248 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002249
Devesh Sharma018cf592017-05-22 03:15:40 -07002250 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002251 while (wr) {
2252 /* House keeping */
2253 memset(&wqe, 0, sizeof(wqe));
2254
2255 /* Common */
2256 wqe.num_sge = wr->num_sge;
2257 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2258 dev_err(rdev_to_dev(qp->rdev),
2259 "Limit exceeded for Receive SGEs");
2260 rc = -EINVAL;
2261 *bad_wr = wr;
2262 break;
2263 }
2264
2265 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2266 wr->num_sge);
2267 wqe.wr_id = wr->wr_id;
2268 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2269
2270 if (ib_qp->qp_type == IB_QPT_GSI)
2271 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2272 payload_sz);
2273 if (!rc)
2274 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2275 if (rc) {
2276 *bad_wr = wr;
2277 break;
2278 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002279
2280 /* Ring DB if the RQEs posted reaches a threshold value */
2281 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2282 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2283 count = 0;
2284 }
2285
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002286 wr = wr->next;
2287 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002288
2289 if (count)
2290 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2291
2292 spin_unlock_irqrestore(&qp->rq_lock, flags);
2293
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002294 return rc;
2295}
2296
2297/* Completion Queues */
2298int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2299{
2300 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2301 struct bnxt_re_dev *rdev = cq->rdev;
2302 int rc;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002303 struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002304
2305 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2306 if (rc) {
2307 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2308 return rc;
2309 }
Doug Ledford374cb862017-04-25 14:00:59 -04002310 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002311 ib_umem_release(cq->umem);
2312
2313 if (cq) {
2314 kfree(cq->cql);
2315 kfree(cq);
2316 }
2317 atomic_dec(&rdev->cq_count);
Selvin Xavier6a5df912017-08-02 01:46:18 -07002318 nq->budget--;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002319 return 0;
2320}
2321
2322struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2323 const struct ib_cq_init_attr *attr,
2324 struct ib_ucontext *context,
2325 struct ib_udata *udata)
2326{
2327 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2328 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2329 struct bnxt_re_cq *cq = NULL;
2330 int rc, entries;
2331 int cqe = attr->cqe;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002332 struct bnxt_qplib_nq *nq = NULL;
2333 unsigned int nq_alloc_cnt;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002334
2335 /* Validate CQ fields */
2336 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2337 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2338 return ERR_PTR(-EINVAL);
2339 }
2340 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2341 if (!cq)
2342 return ERR_PTR(-ENOMEM);
2343
2344 cq->rdev = rdev;
2345 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2346
2347 entries = roundup_pow_of_two(cqe + 1);
2348 if (entries > dev_attr->max_cq_wqes + 1)
2349 entries = dev_attr->max_cq_wqes + 1;
2350
2351 if (context) {
2352 struct bnxt_re_cq_req req;
2353 struct bnxt_re_ucontext *uctx = container_of
2354 (context,
2355 struct bnxt_re_ucontext,
2356 ib_uctx);
2357 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2358 rc = -EFAULT;
2359 goto fail;
2360 }
2361
2362 cq->umem = ib_umem_get(context, req.cq_va,
2363 entries * sizeof(struct cq_base),
2364 IB_ACCESS_LOCAL_WRITE, 1);
2365 if (IS_ERR(cq->umem)) {
2366 rc = PTR_ERR(cq->umem);
2367 goto fail;
2368 }
2369 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2370 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002371 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002372 } else {
2373 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2374 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2375 GFP_KERNEL);
2376 if (!cq->cql) {
2377 rc = -ENOMEM;
2378 goto fail;
2379 }
2380
2381 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2382 cq->qplib_cq.sghead = NULL;
2383 cq->qplib_cq.nmap = 0;
2384 }
Selvin Xavier6a5df912017-08-02 01:46:18 -07002385 /*
2386 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2387 * used for getting the NQ index.
2388 */
2389 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2390 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002391 cq->qplib_cq.max_wqe = entries;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002392 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2393 cq->qplib_cq.nq = nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002394
2395 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2396 if (rc) {
2397 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2398 goto fail;
2399 }
2400
2401 cq->ib_cq.cqe = entries;
2402 cq->cq_period = cq->qplib_cq.period;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002403 nq->budget++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002404
2405 atomic_inc(&rdev->cq_count);
2406
2407 if (context) {
2408 struct bnxt_re_cq_resp resp;
2409
2410 resp.cqid = cq->qplib_cq.id;
2411 resp.tail = cq->qplib_cq.hwq.cons;
2412 resp.phase = cq->qplib_cq.period;
2413 resp.rsvd = 0;
2414 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2415 if (rc) {
2416 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2417 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2418 goto c2fail;
2419 }
2420 }
2421
2422 return &cq->ib_cq;
2423
2424c2fail:
2425 if (context)
2426 ib_umem_release(cq->umem);
2427fail:
2428 kfree(cq->cql);
2429 kfree(cq);
2430 return ERR_PTR(rc);
2431}
2432
2433static u8 __req_to_ib_wc_status(u8 qstatus)
2434{
2435 switch (qstatus) {
2436 case CQ_REQ_STATUS_OK:
2437 return IB_WC_SUCCESS;
2438 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2439 return IB_WC_BAD_RESP_ERR;
2440 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2441 return IB_WC_LOC_LEN_ERR;
2442 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2443 return IB_WC_LOC_QP_OP_ERR;
2444 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2445 return IB_WC_LOC_PROT_ERR;
2446 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2447 return IB_WC_GENERAL_ERR;
2448 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2449 return IB_WC_REM_INV_REQ_ERR;
2450 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2451 return IB_WC_REM_ACCESS_ERR;
2452 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2453 return IB_WC_REM_OP_ERR;
2454 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2455 return IB_WC_RNR_RETRY_EXC_ERR;
2456 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2457 return IB_WC_RETRY_EXC_ERR;
2458 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2459 return IB_WC_WR_FLUSH_ERR;
2460 default:
2461 return IB_WC_GENERAL_ERR;
2462 }
2463 return 0;
2464}
2465
2466static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2467{
2468 switch (qstatus) {
2469 case CQ_RES_RAWETH_QP1_STATUS_OK:
2470 return IB_WC_SUCCESS;
2471 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2472 return IB_WC_LOC_ACCESS_ERR;
2473 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2474 return IB_WC_LOC_LEN_ERR;
2475 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2476 return IB_WC_LOC_PROT_ERR;
2477 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2478 return IB_WC_LOC_QP_OP_ERR;
2479 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2480 return IB_WC_GENERAL_ERR;
2481 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2482 return IB_WC_WR_FLUSH_ERR;
2483 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2484 return IB_WC_WR_FLUSH_ERR;
2485 default:
2486 return IB_WC_GENERAL_ERR;
2487 }
2488}
2489
2490static u8 __rc_to_ib_wc_status(u8 qstatus)
2491{
2492 switch (qstatus) {
2493 case CQ_RES_RC_STATUS_OK:
2494 return IB_WC_SUCCESS;
2495 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2496 return IB_WC_LOC_ACCESS_ERR;
2497 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2498 return IB_WC_LOC_LEN_ERR;
2499 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2500 return IB_WC_LOC_PROT_ERR;
2501 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2502 return IB_WC_LOC_QP_OP_ERR;
2503 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2504 return IB_WC_GENERAL_ERR;
2505 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2506 return IB_WC_REM_INV_REQ_ERR;
2507 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2508 return IB_WC_WR_FLUSH_ERR;
2509 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2510 return IB_WC_WR_FLUSH_ERR;
2511 default:
2512 return IB_WC_GENERAL_ERR;
2513 }
2514}
2515
2516static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2517{
2518 switch (cqe->type) {
2519 case BNXT_QPLIB_SWQE_TYPE_SEND:
2520 wc->opcode = IB_WC_SEND;
2521 break;
2522 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2523 wc->opcode = IB_WC_SEND;
2524 wc->wc_flags |= IB_WC_WITH_IMM;
2525 break;
2526 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2527 wc->opcode = IB_WC_SEND;
2528 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2529 break;
2530 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2531 wc->opcode = IB_WC_RDMA_WRITE;
2532 break;
2533 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2534 wc->opcode = IB_WC_RDMA_WRITE;
2535 wc->wc_flags |= IB_WC_WITH_IMM;
2536 break;
2537 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2538 wc->opcode = IB_WC_RDMA_READ;
2539 break;
2540 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2541 wc->opcode = IB_WC_COMP_SWAP;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2544 wc->opcode = IB_WC_FETCH_ADD;
2545 break;
2546 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2547 wc->opcode = IB_WC_LOCAL_INV;
2548 break;
2549 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2550 wc->opcode = IB_WC_REG_MR;
2551 break;
2552 default:
2553 wc->opcode = IB_WC_SEND;
2554 break;
2555 }
2556
2557 wc->status = __req_to_ib_wc_status(cqe->status);
2558}
2559
2560static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2561 u16 raweth_qp1_flags2)
2562{
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002563 bool is_ipv6 = false, is_ipv4 = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002564
2565 /* raweth_qp1_flags Bit 9-6 indicates itype */
2566 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2567 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2568 return -1;
2569
2570 if (raweth_qp1_flags2 &
2571 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2572 raweth_qp1_flags2 &
2573 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002574 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2575 (raweth_qp1_flags2 &
2576 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2577 (is_ipv6 = true) : (is_ipv4 = true);
2578 return ((is_ipv6) ?
2579 BNXT_RE_ROCEV2_IPV6_PACKET :
2580 BNXT_RE_ROCEV2_IPV4_PACKET);
2581 } else {
2582 return BNXT_RE_ROCE_V1_PACKET;
2583 }
2584}
2585
2586static int bnxt_re_to_ib_nw_type(int nw_type)
2587{
2588 u8 nw_hdr_type = 0xFF;
2589
2590 switch (nw_type) {
2591 case BNXT_RE_ROCE_V1_PACKET:
2592 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2593 break;
2594 case BNXT_RE_ROCEV2_IPV4_PACKET:
2595 nw_hdr_type = RDMA_NETWORK_IPV4;
2596 break;
2597 case BNXT_RE_ROCEV2_IPV6_PACKET:
2598 nw_hdr_type = RDMA_NETWORK_IPV6;
2599 break;
2600 }
2601 return nw_hdr_type;
2602}
2603
2604static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2605 void *rq_hdr_buf)
2606{
2607 u8 *tmp_buf = NULL;
2608 struct ethhdr *eth_hdr;
2609 u16 eth_type;
2610 bool rc = false;
2611
2612 tmp_buf = (u8 *)rq_hdr_buf;
2613 /*
2614 * If dest mac is not same as I/F mac, this could be a
2615 * loopback address or multicast address, check whether
2616 * it is a loopback packet
2617 */
2618 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2619 tmp_buf += 4;
2620 /* Check the ether type */
2621 eth_hdr = (struct ethhdr *)tmp_buf;
2622 eth_type = ntohs(eth_hdr->h_proto);
2623 switch (eth_type) {
2624 case ETH_P_IBOE:
2625 rc = true;
2626 break;
2627 case ETH_P_IP:
2628 case ETH_P_IPV6: {
2629 u32 len;
2630 struct udphdr *udp_hdr;
2631
2632 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2633 sizeof(struct ipv6hdr));
2634 tmp_buf += sizeof(struct ethhdr) + len;
2635 udp_hdr = (struct udphdr *)tmp_buf;
2636 if (ntohs(udp_hdr->dest) ==
2637 ROCE_V2_UDP_DPORT)
2638 rc = true;
2639 break;
2640 }
2641 default:
2642 break;
2643 }
2644 }
2645
2646 return rc;
2647}
2648
2649static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2650 struct bnxt_qplib_cqe *cqe)
2651{
2652 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2653 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2654 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2655 struct ib_send_wr *swr;
2656 struct ib_ud_wr udwr;
2657 struct ib_recv_wr rwr;
2658 int pkt_type = 0;
2659 u32 tbl_idx;
2660 void *rq_hdr_buf;
2661 dma_addr_t rq_hdr_buf_map;
2662 dma_addr_t shrq_hdr_buf_map;
2663 u32 offset = 0;
2664 u32 skip_bytes = 0;
2665 struct ib_sge s_sge[2];
2666 struct ib_sge r_sge[2];
2667 int rc;
2668
2669 memset(&udwr, 0, sizeof(udwr));
2670 memset(&rwr, 0, sizeof(rwr));
2671 memset(&s_sge, 0, sizeof(s_sge));
2672 memset(&r_sge, 0, sizeof(r_sge));
2673
2674 swr = &udwr.wr;
2675 tbl_idx = cqe->wr_id;
2676
2677 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2678 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2679 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2680 tbl_idx);
2681
2682 /* Shadow QP header buffer */
2683 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2684 tbl_idx);
2685 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2686
2687 /* Store this cqe */
2688 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2689 sqp_entry->qp1_qp = qp1_qp;
2690
2691 /* Find packet type from the cqe */
2692
2693 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2694 cqe->raweth_qp1_flags2);
2695 if (pkt_type < 0) {
2696 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2697 return -EINVAL;
2698 }
2699
2700 /* Adjust the offset for the user buffer and post in the rq */
2701
2702 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2703 offset = 20;
2704
2705 /*
2706 * QP1 loopback packet has 4 bytes of internal header before
2707 * ether header. Skip these four bytes.
2708 */
2709 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2710 skip_bytes = 4;
2711
2712 /* First send SGE . Skip the ether header*/
2713 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2714 + skip_bytes;
2715 s_sge[0].lkey = 0xFFFFFFFF;
2716 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2717 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2718
2719 /* Second Send SGE */
2720 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2721 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2722 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2723 s_sge[1].addr += 8;
2724 s_sge[1].lkey = 0xFFFFFFFF;
2725 s_sge[1].length = 256;
2726
2727 /* First recv SGE */
2728
2729 r_sge[0].addr = shrq_hdr_buf_map;
2730 r_sge[0].lkey = 0xFFFFFFFF;
2731 r_sge[0].length = 40;
2732
2733 r_sge[1].addr = sqp_entry->sge.addr + offset;
2734 r_sge[1].lkey = sqp_entry->sge.lkey;
2735 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2736
2737 /* Create receive work request */
2738 rwr.num_sge = 2;
2739 rwr.sg_list = r_sge;
2740 rwr.wr_id = tbl_idx;
2741 rwr.next = NULL;
2742
2743 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2744 if (rc) {
2745 dev_err(rdev_to_dev(rdev),
2746 "Failed to post Rx buffers to shadow QP");
2747 return -ENOMEM;
2748 }
2749
2750 swr->num_sge = 2;
2751 swr->sg_list = s_sge;
2752 swr->wr_id = tbl_idx;
2753 swr->opcode = IB_WR_SEND;
2754 swr->next = NULL;
2755
2756 udwr.ah = &rdev->sqp_ah->ib_ah;
2757 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2758 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2759
2760 /* post data received in the send queue */
2761 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2762
2763 return 0;
2764}
2765
2766static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2767 struct bnxt_qplib_cqe *cqe)
2768{
2769 wc->opcode = IB_WC_RECV;
2770 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2771 wc->wc_flags |= IB_WC_GRH;
2772}
2773
2774static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2775 struct bnxt_qplib_cqe *cqe)
2776{
2777 wc->opcode = IB_WC_RECV;
2778 wc->status = __rc_to_ib_wc_status(cqe->status);
2779
2780 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2781 wc->wc_flags |= IB_WC_WITH_IMM;
2782 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2783 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2784 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2785 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2786 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2787}
2788
2789static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2790 struct ib_wc *wc,
2791 struct bnxt_qplib_cqe *cqe)
2792{
2793 u32 tbl_idx;
2794 struct bnxt_re_dev *rdev = qp->rdev;
2795 struct bnxt_re_qp *qp1_qp = NULL;
2796 struct bnxt_qplib_cqe *orig_cqe = NULL;
2797 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2798 int nw_type;
2799
2800 tbl_idx = cqe->wr_id;
2801
2802 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2803 qp1_qp = sqp_entry->qp1_qp;
2804 orig_cqe = &sqp_entry->cqe;
2805
2806 wc->wr_id = sqp_entry->wrid;
2807 wc->byte_len = orig_cqe->length;
2808 wc->qp = &qp1_qp->ib_qp;
2809
2810 wc->ex.imm_data = orig_cqe->immdata;
2811 wc->src_qp = orig_cqe->src_qp;
2812 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2813 wc->port_num = 1;
2814 wc->vendor_err = orig_cqe->status;
2815
2816 wc->opcode = IB_WC_RECV;
2817 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2818 wc->wc_flags |= IB_WC_GRH;
2819
2820 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2821 orig_cqe->raweth_qp1_flags2);
2822 if (nw_type >= 0) {
2823 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2824 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2825 }
2826}
2827
2828static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2829 struct bnxt_qplib_cqe *cqe)
2830{
2831 wc->opcode = IB_WC_RECV;
2832 wc->status = __rc_to_ib_wc_status(cqe->status);
2833
2834 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2835 wc->wc_flags |= IB_WC_WITH_IMM;
2836 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2837 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2838 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2839 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2840 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2841}
2842
Eddie Wai9152e0b2017-06-14 03:26:23 -07002843static int send_phantom_wqe(struct bnxt_re_qp *qp)
2844{
2845 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2846 unsigned long flags;
2847 int rc = 0;
2848
2849 spin_lock_irqsave(&qp->sq_lock, flags);
2850
2851 rc = bnxt_re_bind_fence_mw(lib_qp);
2852 if (!rc) {
2853 lib_qp->sq.phantom_wqe_cnt++;
2854 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2855 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2856 lib_qp->id, lib_qp->sq.hwq.prod,
2857 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2858 lib_qp->sq.phantom_wqe_cnt);
2859 }
2860
2861 spin_unlock_irqrestore(&qp->sq_lock, flags);
2862 return rc;
2863}
2864
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002865int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2866{
2867 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2868 struct bnxt_re_qp *qp;
2869 struct bnxt_qplib_cqe *cqe;
2870 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002871 struct bnxt_qplib_q *sq;
2872 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002873 u32 tbl_idx;
2874 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2875 unsigned long flags;
2876
2877 spin_lock_irqsave(&cq->cq_lock, flags);
2878 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002879 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002880 if (!cq->cql) {
2881 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2882 goto exit;
2883 }
2884 cqe = &cq->cql[0];
2885 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002886 lib_qp = NULL;
2887 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2888 if (lib_qp) {
2889 sq = &lib_qp->sq;
2890 if (sq->send_phantom) {
2891 qp = container_of(lib_qp,
2892 struct bnxt_re_qp, qplib_qp);
2893 if (send_phantom_wqe(qp) == -ENOMEM)
2894 dev_err(rdev_to_dev(cq->rdev),
2895 "Phantom failed! Scheduled to send again\n");
2896 else
2897 sq->send_phantom = false;
2898 }
2899 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002900 if (ncqe < budget)
2901 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2902 cqe + ncqe,
2903 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002904
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002905 if (!ncqe)
2906 break;
2907
2908 for (i = 0; i < ncqe; i++, cqe++) {
2909 /* Transcribe each qplib_wqe back to ib_wc */
2910 memset(wc, 0, sizeof(*wc));
2911
2912 wc->wr_id = cqe->wr_id;
2913 wc->byte_len = cqe->length;
2914 qp = container_of
2915 ((struct bnxt_qplib_qp *)
2916 (unsigned long)(cqe->qp_handle),
2917 struct bnxt_re_qp, qplib_qp);
2918 if (!qp) {
2919 dev_err(rdev_to_dev(cq->rdev),
2920 "POLL CQ : bad QP handle");
2921 continue;
2922 }
2923 wc->qp = &qp->ib_qp;
2924 wc->ex.imm_data = cqe->immdata;
2925 wc->src_qp = cqe->src_qp;
2926 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2927 wc->port_num = 1;
2928 wc->vendor_err = cqe->status;
2929
2930 switch (cqe->opcode) {
2931 case CQ_BASE_CQE_TYPE_REQ:
2932 if (qp->qplib_qp.id ==
2933 qp->rdev->qp1_sqp->qplib_qp.id) {
2934 /* Handle this completion with
2935 * the stored completion
2936 */
2937 memset(wc, 0, sizeof(*wc));
2938 continue;
2939 }
2940 bnxt_re_process_req_wc(wc, cqe);
2941 break;
2942 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2943 if (!cqe->status) {
2944 int rc = 0;
2945
2946 rc = bnxt_re_process_raw_qp_pkt_rx
2947 (qp, cqe);
2948 if (!rc) {
2949 memset(wc, 0, sizeof(*wc));
2950 continue;
2951 }
2952 cqe->status = -1;
2953 }
2954 /* Errors need not be looped back.
2955 * But change the wr_id to the one
2956 * stored in the table
2957 */
2958 tbl_idx = cqe->wr_id;
2959 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2960 wc->wr_id = sqp_entry->wrid;
2961 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2962 break;
2963 case CQ_BASE_CQE_TYPE_RES_RC:
2964 bnxt_re_process_res_rc_wc(wc, cqe);
2965 break;
2966 case CQ_BASE_CQE_TYPE_RES_UD:
2967 if (qp->qplib_qp.id ==
2968 qp->rdev->qp1_sqp->qplib_qp.id) {
2969 /* Handle this completion with
2970 * the stored completion
2971 */
2972 if (cqe->status) {
2973 continue;
2974 } else {
2975 bnxt_re_process_res_shadow_qp_wc
2976 (qp, wc, cqe);
2977 break;
2978 }
2979 }
2980 bnxt_re_process_res_ud_wc(wc, cqe);
2981 break;
2982 default:
2983 dev_err(rdev_to_dev(cq->rdev),
2984 "POLL CQ : type 0x%x not handled",
2985 cqe->opcode);
2986 continue;
2987 }
2988 wc++;
2989 budget--;
2990 }
2991 }
2992exit:
2993 spin_unlock_irqrestore(&cq->cq_lock, flags);
2994 return num_entries - budget;
2995}
2996
2997int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2998 enum ib_cq_notify_flags ib_cqn_flags)
2999{
3000 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3001 int type = 0;
3002
3003 /* Trigger on the very next completion */
3004 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3005 type = DBR_DBR_TYPE_CQ_ARMALL;
3006 /* Trigger on the next solicited completion */
3007 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3008 type = DBR_DBR_TYPE_CQ_ARMSE;
3009
Selvin Xavier499e4562017-06-29 12:28:18 -07003010 /* Poll to see if there are missed events */
3011 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3012 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3013 return 1;
3014
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003015 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3016
3017 return 0;
3018}
3019
3020/* Memory Regions */
3021struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3022{
3023 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3024 struct bnxt_re_dev *rdev = pd->rdev;
3025 struct bnxt_re_mr *mr;
3026 u64 pbl = 0;
3027 int rc;
3028
3029 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3030 if (!mr)
3031 return ERR_PTR(-ENOMEM);
3032
3033 mr->rdev = rdev;
3034 mr->qplib_mr.pd = &pd->qplib_pd;
3035 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3036 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3037
3038 /* Allocate and register 0 as the address */
3039 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3040 if (rc)
3041 goto fail;
3042
3043 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3044 mr->qplib_mr.total_size = -1; /* Infinte length */
3045 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3046 if (rc)
3047 goto fail_mr;
3048
3049 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3050 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3051 IB_ACCESS_REMOTE_ATOMIC))
3052 mr->ib_mr.rkey = mr->ib_mr.lkey;
3053 atomic_inc(&rdev->mr_count);
3054
3055 return &mr->ib_mr;
3056
3057fail_mr:
3058 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3059fail:
3060 kfree(mr);
3061 return ERR_PTR(rc);
3062}
3063
3064int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3065{
3066 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3067 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003068 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003069
Selvin Xavier1c980b02017-05-22 03:15:34 -07003070 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3071 if (rc) {
3072 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3073 return rc;
3074 }
3075
Selvin Xavier19935192017-08-31 09:27:34 +05303076 if (mr->pages) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003077 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3078 &mr->qplib_frpl);
3079 kfree(mr->pages);
3080 mr->npages = 0;
3081 mr->pages = NULL;
3082 }
Doug Ledford374cb862017-04-25 14:00:59 -04003083 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003084 ib_umem_release(mr->ib_umem);
3085
3086 kfree(mr);
3087 atomic_dec(&rdev->mr_count);
3088 return rc;
3089}
3090
3091static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3092{
3093 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3094
3095 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3096 return -ENOMEM;
3097
3098 mr->pages[mr->npages++] = addr;
3099 return 0;
3100}
3101
3102int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3103 unsigned int *sg_offset)
3104{
3105 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3106
3107 mr->npages = 0;
3108 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3109}
3110
3111struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3112 u32 max_num_sg)
3113{
3114 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3115 struct bnxt_re_dev *rdev = pd->rdev;
3116 struct bnxt_re_mr *mr = NULL;
3117 int rc;
3118
3119 if (type != IB_MR_TYPE_MEM_REG) {
3120 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3121 return ERR_PTR(-EINVAL);
3122 }
3123 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3124 return ERR_PTR(-EINVAL);
3125
3126 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3127 if (!mr)
3128 return ERR_PTR(-ENOMEM);
3129
3130 mr->rdev = rdev;
3131 mr->qplib_mr.pd = &pd->qplib_pd;
3132 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3133 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3134
3135 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3136 if (rc)
3137 goto fail;
3138
3139 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3140 mr->ib_mr.rkey = mr->ib_mr.lkey;
3141
3142 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3143 if (!mr->pages) {
3144 rc = -ENOMEM;
3145 goto fail;
3146 }
3147 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3148 &mr->qplib_frpl, max_num_sg);
3149 if (rc) {
3150 dev_err(rdev_to_dev(rdev),
3151 "Failed to allocate HW FR page list");
3152 goto fail_mr;
3153 }
3154
3155 atomic_inc(&rdev->mr_count);
3156 return &mr->ib_mr;
3157
3158fail_mr:
3159 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3160fail:
3161 kfree(mr->pages);
3162 kfree(mr);
3163 return ERR_PTR(rc);
3164}
3165
Eddie Wai9152e0b2017-06-14 03:26:23 -07003166struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3167 struct ib_udata *udata)
3168{
3169 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3170 struct bnxt_re_dev *rdev = pd->rdev;
3171 struct bnxt_re_mw *mw;
3172 int rc;
3173
3174 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3175 if (!mw)
3176 return ERR_PTR(-ENOMEM);
3177 mw->rdev = rdev;
3178 mw->qplib_mw.pd = &pd->qplib_pd;
3179
3180 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3181 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3182 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3183 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3184 if (rc) {
3185 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3186 goto fail;
3187 }
3188 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3189
3190 atomic_inc(&rdev->mw_count);
3191 return &mw->ib_mw;
3192
3193fail:
3194 kfree(mw);
3195 return ERR_PTR(rc);
3196}
3197
3198int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3199{
3200 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3201 struct bnxt_re_dev *rdev = mw->rdev;
3202 int rc;
3203
3204 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3205 if (rc) {
3206 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3207 return rc;
3208 }
3209
3210 kfree(mw);
3211 atomic_dec(&rdev->mw_count);
3212 return rc;
3213}
3214
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003215/* uverbs */
3216struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3217 u64 virt_addr, int mr_access_flags,
3218 struct ib_udata *udata)
3219{
3220 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3221 struct bnxt_re_dev *rdev = pd->rdev;
3222 struct bnxt_re_mr *mr;
3223 struct ib_umem *umem;
3224 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003225 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003226 struct scatterlist *sg;
3227 int entry;
3228
Selvin Xavier58d4a672017-06-29 12:28:12 -07003229 if (length > BNXT_RE_MAX_MR_SIZE) {
3230 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3231 length, BNXT_RE_MAX_MR_SIZE);
3232 return ERR_PTR(-ENOMEM);
3233 }
3234
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003235 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3236 if (!mr)
3237 return ERR_PTR(-ENOMEM);
3238
3239 mr->rdev = rdev;
3240 mr->qplib_mr.pd = &pd->qplib_pd;
3241 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3242 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3243
3244 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3245 mr_access_flags, 0);
3246 if (IS_ERR(umem)) {
3247 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3248 rc = -EFAULT;
3249 goto free_mr;
3250 }
3251 mr->ib_umem = umem;
3252
3253 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3254 if (rc) {
3255 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3256 goto release_umem;
3257 }
3258 /* The fixed portion of the rkey is the same as the lkey */
3259 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3260
3261 mr->qplib_mr.va = virt_addr;
3262 umem_pgs = ib_umem_page_count(umem);
3263 if (!umem_pgs) {
3264 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3265 rc = -EINVAL;
3266 goto free_mrw;
3267 }
3268 mr->qplib_mr.total_size = length;
3269
3270 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3271 if (!pbl_tbl) {
3272 rc = -EINVAL;
3273 goto free_mrw;
3274 }
3275 pbl_tbl_orig = pbl_tbl;
3276
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003277 if (umem->hugetlb) {
3278 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3279 rc = -EFAULT;
3280 goto fail;
3281 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003282
3283 if (umem->page_shift != PAGE_SHIFT) {
3284 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003285 rc = -EFAULT;
3286 goto fail;
3287 }
3288 /* Map umem buf ptrs to the PBL */
3289 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003290 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003291 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003292 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003293 }
3294 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3295 umem_pgs, false);
3296 if (rc) {
3297 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3298 goto fail;
3299 }
3300
3301 kfree(pbl_tbl_orig);
3302
3303 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3304 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3305 atomic_inc(&rdev->mr_count);
3306
3307 return &mr->ib_mr;
3308fail:
3309 kfree(pbl_tbl_orig);
3310free_mrw:
3311 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3312release_umem:
3313 ib_umem_release(umem);
3314free_mr:
3315 kfree(mr);
3316 return ERR_PTR(rc);
3317}
3318
3319struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3320 struct ib_udata *udata)
3321{
3322 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3323 struct bnxt_re_uctx_resp resp;
3324 struct bnxt_re_ucontext *uctx;
3325 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3326 int rc;
3327
3328 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3329 ibdev->uverbs_abi_ver);
3330
3331 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3332 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3333 BNXT_RE_ABI_VERSION);
3334 return ERR_PTR(-EPERM);
3335 }
3336
3337 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3338 if (!uctx)
3339 return ERR_PTR(-ENOMEM);
3340
3341 uctx->rdev = rdev;
3342
3343 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3344 if (!uctx->shpg) {
3345 rc = -ENOMEM;
3346 goto fail;
3347 }
3348 spin_lock_init(&uctx->sh_lock);
3349
3350 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3351 resp.max_qp = rdev->qplib_ctx.qpc_count;
3352 resp.pg_size = PAGE_SIZE;
3353 resp.cqe_sz = sizeof(struct cq_base);
3354 resp.max_cqd = dev_attr->max_cq_wqes;
3355 resp.rsvd = 0;
3356
3357 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3358 if (rc) {
3359 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3360 rc = -EFAULT;
3361 goto cfail;
3362 }
3363
3364 return &uctx->ib_uctx;
3365cfail:
3366 free_page((unsigned long)uctx->shpg);
3367 uctx->shpg = NULL;
3368fail:
3369 kfree(uctx);
3370 return ERR_PTR(rc);
3371}
3372
3373int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3374{
3375 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3376 struct bnxt_re_ucontext,
3377 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003378
3379 struct bnxt_re_dev *rdev = uctx->rdev;
3380 int rc = 0;
3381
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003382 if (uctx->shpg)
3383 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003384
3385 if (uctx->dpi.dbr) {
3386 /* Free DPI only if this is the first PD allocated by the
3387 * application and mark the context dpi as NULL
3388 */
3389 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3390 &rdev->qplib_res.dpi_tbl,
3391 &uctx->dpi);
3392 if (rc)
Colin Ian King24bb4d82017-07-14 08:30:10 +01003393 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003394 /* Don't fail, continue*/
3395 uctx->dpi.dbr = NULL;
3396 }
3397
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003398 kfree(uctx);
3399 return 0;
3400}
3401
3402/* Helper function to mmap the virtual memory from user app */
3403int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3404{
3405 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3406 struct bnxt_re_ucontext,
3407 ib_uctx);
3408 struct bnxt_re_dev *rdev = uctx->rdev;
3409 u64 pfn;
3410
3411 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3412 return -EINVAL;
3413
3414 if (vma->vm_pgoff) {
3415 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3416 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3417 PAGE_SIZE, vma->vm_page_prot)) {
3418 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3419 return -EAGAIN;
3420 }
3421 } else {
3422 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3423 if (remap_pfn_range(vma, vma->vm_start,
3424 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3425 dev_err(rdev_to_dev(rdev),
3426 "Failed to map shared page");
3427 return -EAGAIN;
3428 }
3429 }
3430
3431 return 0;
3432}