blob: d94b1b3041359c5f06e9c29aae4fe8bf57934dca [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
148 ib_attr->max_mr_size = ~0ull;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
150 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
151 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
152
153 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
154 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
155 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
156 ib_attr->max_qp = dev_attr->max_qp;
157 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
158 ib_attr->device_cap_flags =
159 IB_DEVICE_CURR_QP_STATE_MOD
160 | IB_DEVICE_RC_RNR_NAK_GEN
161 | IB_DEVICE_SHUTDOWN_PORT
162 | IB_DEVICE_SYS_IMAGE_GUID
163 | IB_DEVICE_LOCAL_DMA_LKEY
164 | IB_DEVICE_RESIZE_MAX_WR
165 | IB_DEVICE_PORT_ACTIVE_EVENT
166 | IB_DEVICE_N_NOTIFY_CQ
167 | IB_DEVICE_MEM_WINDOW
168 | IB_DEVICE_MEM_WINDOW_TYPE_2B
169 | IB_DEVICE_MEM_MGT_EXTENSIONS;
170 ib_attr->max_sge = dev_attr->max_qp_sges;
171 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
172 ib_attr->max_cq = dev_attr->max_cq;
173 ib_attr->max_cqe = dev_attr->max_cq_wqes;
174 ib_attr->max_mr = dev_attr->max_mr;
175 ib_attr->max_pd = dev_attr->max_pd;
176 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
177 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
178 ib_attr->atomic_cap = IB_ATOMIC_HCA;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
194 ib_attr->max_fmr = dev_attr->max_fmr;
195 ib_attr->max_map_per_fmr = 1; /* ? */
196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
204 ib_attr->local_ca_ack_delay = 0;
205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
226static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
227{
228 struct ethtool_link_ksettings lksettings;
229 u32 espeed;
230
231 if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
232 memset(&lksettings, 0, sizeof(lksettings));
233 rtnl_lock();
234 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
235 rtnl_unlock();
236 espeed = lksettings.base.speed;
237 } else {
238 espeed = SPEED_UNKNOWN;
239 }
240 switch (espeed) {
241 case SPEED_1000:
242 *speed = IB_SPEED_SDR;
243 *width = IB_WIDTH_1X;
244 break;
245 case SPEED_10000:
246 *speed = IB_SPEED_QDR;
247 *width = IB_WIDTH_1X;
248 break;
249 case SPEED_20000:
250 *speed = IB_SPEED_DDR;
251 *width = IB_WIDTH_4X;
252 break;
253 case SPEED_25000:
254 *speed = IB_SPEED_EDR;
255 *width = IB_WIDTH_1X;
256 break;
257 case SPEED_40000:
258 *speed = IB_SPEED_QDR;
259 *width = IB_WIDTH_4X;
260 break;
261 case SPEED_50000:
262 break;
263 default:
264 *speed = IB_SPEED_SDR;
265 *width = IB_WIDTH_1X;
266 break;
267 }
268}
269
270/* Port */
271int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
272 struct ib_port_attr *port_attr)
273{
274 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
276
277 memset(port_attr, 0, sizeof(*port_attr));
278
279 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
280 port_attr->state = IB_PORT_ACTIVE;
281 port_attr->phys_state = 5;
282 } else {
283 port_attr->state = IB_PORT_DOWN;
284 port_attr->phys_state = 3;
285 }
286 port_attr->max_mtu = IB_MTU_4096;
287 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
288 port_attr->gid_tbl_len = dev_attr->max_sgid;
289 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
290 IB_PORT_DEVICE_MGMT_SUP |
291 IB_PORT_VENDOR_CLASS_SUP |
292 IB_PORT_IP_BASED_GIDS;
293
294 /* Max MSG size set to 2G for now */
295 port_attr->max_msg_sz = 0x80000000;
296 port_attr->bad_pkey_cntr = 0;
297 port_attr->qkey_viol_cntr = 0;
298 port_attr->pkey_tbl_len = dev_attr->max_pkey;
299 port_attr->lid = 0;
300 port_attr->sm_lid = 0;
301 port_attr->lmc = 0;
302 port_attr->max_vl_num = 4;
303 port_attr->sm_sl = 0;
304 port_attr->subnet_timeout = 0;
305 port_attr->init_type_reply = 0;
306 /* call the underlying netdev's ethtool hooks to query speed settings
307 * for which we acquire rtnl_lock _only_ if it's registered with
308 * IB stack to avoid race in the NETDEV_UNREG path
309 */
310 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
311 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
312 &port_attr->active_width);
313 return 0;
314}
315
316int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
317 int port_modify_mask,
318 struct ib_port_modify *port_modify)
319{
320 switch (port_modify_mask) {
321 case IB_PORT_SHUTDOWN:
322 break;
323 case IB_PORT_INIT_TYPE:
324 break;
325 case IB_PORT_RESET_QKEY_CNTR:
326 break;
327 default:
328 break;
329 }
330 return 0;
331}
332
333int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
334 struct ib_port_immutable *immutable)
335{
336 struct ib_port_attr port_attr;
337
338 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
339 return -EINVAL;
340
341 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
342 immutable->gid_tbl_len = port_attr.gid_tbl_len;
343 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
344 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
345 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
346 return 0;
347}
348
349int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
350 u16 index, u16 *pkey)
351{
352 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
353
354 /* Ignore port_num */
355
356 memset(pkey, 0, sizeof(*pkey));
357 return bnxt_qplib_get_pkey(&rdev->qplib_res,
358 &rdev->qplib_res.pkey_tbl, index, pkey);
359}
360
361int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
362 int index, union ib_gid *gid)
363{
364 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
365 int rc = 0;
366
367 /* Ignore port_num */
368 memset(gid, 0, sizeof(*gid));
369 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
370 &rdev->qplib_res.sgid_tbl, index,
371 (struct bnxt_qplib_gid *)gid);
372 return rc;
373}
374
375int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
376 unsigned int index, void **context)
377{
378 int rc = 0;
379 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
380 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
381 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
382
383 /* Delete the entry from the hardware */
384 ctx = *context;
385 if (!ctx)
386 return -EINVAL;
387
388 if (sgid_tbl && sgid_tbl->active) {
389 if (ctx->idx >= sgid_tbl->max)
390 return -EINVAL;
391 ctx->refcnt--;
392 if (!ctx->refcnt) {
393 rc = bnxt_qplib_del_sgid
394 (sgid_tbl,
395 &sgid_tbl->tbl[ctx->idx], true);
396 if (rc)
397 dev_err(rdev_to_dev(rdev),
398 "Failed to remove GID: %#x", rc);
399 ctx_tbl = sgid_tbl->ctx;
400 ctx_tbl[ctx->idx] = NULL;
401 kfree(ctx);
402 }
403 } else {
404 return -EINVAL;
405 }
406 return rc;
407}
408
409int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
410 unsigned int index, const union ib_gid *gid,
411 const struct ib_gid_attr *attr, void **context)
412{
413 int rc;
414 u32 tbl_idx = 0;
415 u16 vlan_id = 0xFFFF;
416 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
417 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
418 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
419
420 if ((attr->ndev) && is_vlan_dev(attr->ndev))
421 vlan_id = vlan_dev_vlan_id(attr->ndev);
422
423 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
424 rdev->qplib_res.netdev->dev_addr,
425 vlan_id, true, &tbl_idx);
426 if (rc == -EALREADY) {
427 ctx_tbl = sgid_tbl->ctx;
428 ctx_tbl[tbl_idx]->refcnt++;
429 *context = ctx_tbl[tbl_idx];
430 return 0;
431 }
432
433 if (rc < 0) {
434 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
435 return rc;
436 }
437
438 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
439 if (!ctx)
440 return -ENOMEM;
441 ctx_tbl = sgid_tbl->ctx;
442 ctx->idx = tbl_idx;
443 ctx->refcnt = 1;
444 ctx_tbl[tbl_idx] = ctx;
445
446 return rc;
447}
448
449enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
450 u8 port_num)
451{
452 return IB_LINK_LAYER_ETHERNET;
453}
454
Eddie Wai9152e0b2017-06-14 03:26:23 -0700455#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
456
457static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
458{
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct ib_mr *ib_mr = &fence->mr->ib_mr;
461 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
462
463 memset(wqe, 0, sizeof(*wqe));
464 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
465 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
466 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
467 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
468 wqe->bind.zero_based = false;
469 wqe->bind.parent_l_key = ib_mr->lkey;
470 wqe->bind.va = (u64)(unsigned long)fence->va;
471 wqe->bind.length = fence->size;
472 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
473 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
474
475 /* Save the initial rkey in fence structure for now;
476 * wqe->bind.r_key will be set at (re)bind time.
477 */
478 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
479}
480
481static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
482{
483 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
484 qplib_qp);
485 struct ib_pd *ib_pd = qp->ib_qp.pd;
486 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
489 struct bnxt_qplib_swqe wqe;
490 int rc;
491
492 memcpy(&wqe, fence_wqe, sizeof(wqe));
493 wqe.bind.r_key = fence->bind_rkey;
494 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
495
496 dev_dbg(rdev_to_dev(qp->rdev),
497 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
498 wqe.bind.r_key, qp->qplib_qp.id, pd);
499 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
500 if (rc) {
501 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
502 return rc;
503 }
504 bnxt_qplib_post_send_db(&qp->qplib_qp);
505
506 return rc;
507}
508
509static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
510{
511 struct bnxt_re_fence_data *fence = &pd->fence;
512 struct bnxt_re_dev *rdev = pd->rdev;
513 struct device *dev = &rdev->en_dev->pdev->dev;
514 struct bnxt_re_mr *mr = fence->mr;
515
516 if (fence->mw) {
517 bnxt_re_dealloc_mw(fence->mw);
518 fence->mw = NULL;
519 }
520 if (mr) {
521 if (mr->ib_mr.rkey)
522 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
523 true);
524 if (mr->ib_mr.lkey)
525 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
526 kfree(mr);
527 fence->mr = NULL;
528 }
529 if (fence->dma_addr) {
530 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
531 DMA_BIDIRECTIONAL);
532 fence->dma_addr = 0;
533 }
534}
535
536static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
537{
538 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
539 struct bnxt_re_fence_data *fence = &pd->fence;
540 struct bnxt_re_dev *rdev = pd->rdev;
541 struct device *dev = &rdev->en_dev->pdev->dev;
542 struct bnxt_re_mr *mr = NULL;
543 dma_addr_t dma_addr = 0;
544 struct ib_mw *mw;
545 u64 pbl_tbl;
546 int rc;
547
548 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
549 DMA_BIDIRECTIONAL);
550 rc = dma_mapping_error(dev, dma_addr);
551 if (rc) {
552 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
553 rc = -EIO;
554 fence->dma_addr = 0;
555 goto fail;
556 }
557 fence->dma_addr = dma_addr;
558
559 /* Allocate a MR */
560 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
561 if (!mr) {
562 rc = -ENOMEM;
563 goto fail;
564 }
565 fence->mr = mr;
566 mr->rdev = rdev;
567 mr->qplib_mr.pd = &pd->qplib_pd;
568 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
569 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
570 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
571 if (rc) {
572 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
573 goto fail;
574 }
575
576 /* Register MR */
577 mr->ib_mr.lkey = mr->qplib_mr.lkey;
578 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
579 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
580 pbl_tbl = dma_addr;
581 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
582 BNXT_RE_FENCE_PBL_SIZE, false);
583 if (rc) {
584 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
585 goto fail;
586 }
587 mr->ib_mr.rkey = mr->qplib_mr.rkey;
588
589 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
591 if (!mw) {
592 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd);
594 rc = -EINVAL;
595 goto fail;
596 }
597 fence->mw = mw;
598
599 bnxt_re_create_fence_wqe(pd);
600 return 0;
601
602fail:
603 bnxt_re_destroy_fence_mr(pd);
604 return rc;
605}
606
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800607/* Protection Domains */
608int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
609{
610 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
611 struct bnxt_re_dev *rdev = pd->rdev;
612 int rc;
613
Eddie Wai9152e0b2017-06-14 03:26:23 -0700614 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800615 if (ib_pd->uobject && pd->dpi.dbr) {
616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
617 struct bnxt_re_ucontext *ucntx;
618
619 /* Free DPI only if this is the first PD allocated by the
620 * application and mark the context dpi as NULL
621 */
622 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
623
624 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
625 &rdev->qplib_res.dpi_tbl,
626 &pd->dpi);
627 if (rc)
628 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
629 /* Don't fail, continue*/
630 ucntx->dpi = NULL;
631 }
632
633 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
634 &rdev->qplib_res.pd_tbl,
635 &pd->qplib_pd);
636 if (rc) {
637 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
638 return rc;
639 }
640
641 kfree(pd);
642 return 0;
643}
644
645struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
646 struct ib_ucontext *ucontext,
647 struct ib_udata *udata)
648{
649 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
650 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
651 struct bnxt_re_ucontext,
652 ib_uctx);
653 struct bnxt_re_pd *pd;
654 int rc;
655
656 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
657 if (!pd)
658 return ERR_PTR(-ENOMEM);
659
660 pd->rdev = rdev;
661 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
662 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
663 rc = -ENOMEM;
664 goto fail;
665 }
666
667 if (udata) {
668 struct bnxt_re_pd_resp resp;
669
670 if (!ucntx->dpi) {
671 /* Allocate DPI in alloc_pd to avoid failing of
672 * ibv_devinfo and family of application when DPIs
673 * are depleted.
674 */
675 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
676 &pd->dpi, ucntx)) {
677 rc = -ENOMEM;
678 goto dbfail;
679 }
680 ucntx->dpi = &pd->dpi;
681 }
682
683 resp.pdid = pd->qplib_pd.id;
684 /* Still allow mapping this DBR to the new user PD. */
685 resp.dpi = ucntx->dpi->dpi;
686 resp.dbr = (u64)ucntx->dpi->umdbr;
687
688 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
689 if (rc) {
690 dev_err(rdev_to_dev(rdev),
691 "Failed to copy user response\n");
692 goto dbfail;
693 }
694 }
695
Eddie Wai9152e0b2017-06-14 03:26:23 -0700696 if (!udata)
697 if (bnxt_re_create_fence_mr(pd))
698 dev_warn(rdev_to_dev(rdev),
699 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800700 return &pd->ib_pd;
701dbfail:
702 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
703 &pd->qplib_pd);
704fail:
705 kfree(pd);
706 return ERR_PTR(rc);
707}
708
709/* Address Handles */
710int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
711{
712 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
713 struct bnxt_re_dev *rdev = ah->rdev;
714 int rc;
715
716 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
717 if (rc) {
718 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
719 return rc;
720 }
721 kfree(ah);
722 return 0;
723}
724
725struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400726 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800727 struct ib_udata *udata)
728{
729 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
730 struct bnxt_re_dev *rdev = pd->rdev;
731 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400732 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800733 int rc;
734 u16 vlan_tag;
735 u8 nw_type;
736
737 struct ib_gid_attr sgid_attr;
738
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400739 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800740 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
741 return ERR_PTR(-EINVAL);
742 }
743 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
744 if (!ah)
745 return ERR_PTR(-ENOMEM);
746
747 ah->rdev = rdev;
748 ah->qplib_ah.pd = &pd->qplib_pd;
749
750 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400751 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800752 sizeof(union ib_gid));
753 /*
754 * If RoCE V2 is enabled, stack will have two entries for
755 * each GID entry. Avoiding this duplicte entry in HW. Dividing
756 * the GID index by 2 for RoCE V2
757 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400758 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
759 ah->qplib_ah.host_sgid_index = grh->sgid_index;
760 ah->qplib_ah.traffic_class = grh->traffic_class;
761 ah->qplib_ah.flow_label = grh->flow_label;
762 ah->qplib_ah.hop_limit = grh->hop_limit;
763 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800764 if (ib_pd->uobject &&
765 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400766 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800767 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400768 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800769 union ib_gid sgid;
770
771 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400772 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800773 &sgid_attr);
774 if (rc) {
775 dev_err(rdev_to_dev(rdev),
776 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400777 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800778 goto fail;
779 }
780 if (sgid_attr.ndev) {
781 if (is_vlan_dev(sgid_attr.ndev))
782 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
783 dev_put(sgid_attr.ndev);
784 }
785 /* Get network header type for this GID */
786 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
787 switch (nw_type) {
788 case RDMA_NETWORK_IPV4:
789 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
790 break;
791 case RDMA_NETWORK_IPV6:
792 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
793 break;
794 default:
795 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
796 break;
797 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400798 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400799 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800800 &sgid_attr.ndev->ifindex,
801 NULL);
802 if (rc) {
803 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
804 goto fail;
805 }
806 }
807
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400808 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800809 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
810 if (rc) {
811 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
812 goto fail;
813 }
814
815 /* Write AVID to shared page. */
816 if (ib_pd->uobject) {
817 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
818 struct bnxt_re_ucontext *uctx;
819 unsigned long flag;
820 u32 *wrptr;
821
822 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
823 spin_lock_irqsave(&uctx->sh_lock, flag);
824 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
825 *wrptr = ah->qplib_ah.id;
826 wmb(); /* make sure cache is updated. */
827 spin_unlock_irqrestore(&uctx->sh_lock, flag);
828 }
829
830 return &ah->ib_ah;
831
832fail:
833 kfree(ah);
834 return ERR_PTR(rc);
835}
836
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400837int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800838{
839 return 0;
840}
841
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400842int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800843{
844 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
845
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400846 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400847 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400848 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400849 rdma_ah_set_grh(ah_attr, NULL, 0,
850 ah->qplib_ah.host_sgid_index,
851 0, ah->qplib_ah.traffic_class);
852 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
853 rdma_ah_set_port_num(ah_attr, 1);
854 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800855 return 0;
856}
857
858/* Queue Pairs */
859int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
860{
861 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
862 struct bnxt_re_dev *rdev = qp->rdev;
863 int rc;
864
865 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
866 if (rc) {
867 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
868 return rc;
869 }
870 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
871 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
872 &rdev->sqp_ah->qplib_ah);
873 if (rc) {
874 dev_err(rdev_to_dev(rdev),
875 "Failed to destroy HW AH for shadow QP");
876 return rc;
877 }
878
879 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
880 &rdev->qp1_sqp->qplib_qp);
881 if (rc) {
882 dev_err(rdev_to_dev(rdev),
883 "Failed to destroy Shadow QP");
884 return rc;
885 }
886 mutex_lock(&rdev->qp_lock);
887 list_del(&rdev->qp1_sqp->list);
888 atomic_dec(&rdev->qp_count);
889 mutex_unlock(&rdev->qp_lock);
890
891 kfree(rdev->sqp_ah);
892 kfree(rdev->qp1_sqp);
893 }
894
Doug Ledford374cb862017-04-25 14:00:59 -0400895 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800896 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400897 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800898 ib_umem_release(qp->sumem);
899
900 mutex_lock(&rdev->qp_lock);
901 list_del(&qp->list);
902 atomic_dec(&rdev->qp_count);
903 mutex_unlock(&rdev->qp_lock);
904 kfree(qp);
905 return 0;
906}
907
908static u8 __from_ib_qp_type(enum ib_qp_type type)
909{
910 switch (type) {
911 case IB_QPT_GSI:
912 return CMDQ_CREATE_QP1_TYPE_GSI;
913 case IB_QPT_RC:
914 return CMDQ_CREATE_QP_TYPE_RC;
915 case IB_QPT_UD:
916 return CMDQ_CREATE_QP_TYPE_UD;
917 default:
918 return IB_QPT_MAX;
919 }
920}
921
922static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
923 struct bnxt_re_qp *qp, struct ib_udata *udata)
924{
925 struct bnxt_re_qp_req ureq;
926 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
927 struct ib_umem *umem;
928 int bytes = 0;
929 struct ib_ucontext *context = pd->ib_pd.uobject->context;
930 struct bnxt_re_ucontext *cntx = container_of(context,
931 struct bnxt_re_ucontext,
932 ib_uctx);
933 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
934 return -EFAULT;
935
936 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
937 /* Consider mapping PSN search memory only for RC QPs. */
938 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
939 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
940 bytes = PAGE_ALIGN(bytes);
941 umem = ib_umem_get(context, ureq.qpsva, bytes,
942 IB_ACCESS_LOCAL_WRITE, 1);
943 if (IS_ERR(umem))
944 return PTR_ERR(umem);
945
946 qp->sumem = umem;
947 qplib_qp->sq.sglist = umem->sg_head.sgl;
948 qplib_qp->sq.nmap = umem->nmap;
949 qplib_qp->qp_handle = ureq.qp_handle;
950
951 if (!qp->qplib_qp.srq) {
952 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
953 bytes = PAGE_ALIGN(bytes);
954 umem = ib_umem_get(context, ureq.qprva, bytes,
955 IB_ACCESS_LOCAL_WRITE, 1);
956 if (IS_ERR(umem))
957 goto rqfail;
958 qp->rumem = umem;
959 qplib_qp->rq.sglist = umem->sg_head.sgl;
960 qplib_qp->rq.nmap = umem->nmap;
961 }
962
963 qplib_qp->dpi = cntx->dpi;
964 return 0;
965rqfail:
966 ib_umem_release(qp->sumem);
967 qp->sumem = NULL;
968 qplib_qp->sq.sglist = NULL;
969 qplib_qp->sq.nmap = 0;
970
971 return PTR_ERR(umem);
972}
973
974static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
975 (struct bnxt_re_pd *pd,
976 struct bnxt_qplib_res *qp1_res,
977 struct bnxt_qplib_qp *qp1_qp)
978{
979 struct bnxt_re_dev *rdev = pd->rdev;
980 struct bnxt_re_ah *ah;
981 union ib_gid sgid;
982 int rc;
983
984 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
985 if (!ah)
986 return NULL;
987
988 memset(ah, 0, sizeof(*ah));
989 ah->rdev = rdev;
990 ah->qplib_ah.pd = &pd->qplib_pd;
991
992 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
993 if (rc)
994 goto fail;
995
996 /* supply the dgid data same as sgid */
997 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
998 sizeof(union ib_gid));
999 ah->qplib_ah.sgid_index = 0;
1000
1001 ah->qplib_ah.traffic_class = 0;
1002 ah->qplib_ah.flow_label = 0;
1003 ah->qplib_ah.hop_limit = 1;
1004 ah->qplib_ah.sl = 0;
1005 /* Have DMAC same as SMAC */
1006 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1007
1008 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
1009 if (rc) {
1010 dev_err(rdev_to_dev(rdev),
1011 "Failed to allocate HW AH for Shadow QP");
1012 goto fail;
1013 }
1014
1015 return ah;
1016
1017fail:
1018 kfree(ah);
1019 return NULL;
1020}
1021
1022static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1023 (struct bnxt_re_pd *pd,
1024 struct bnxt_qplib_res *qp1_res,
1025 struct bnxt_qplib_qp *qp1_qp)
1026{
1027 struct bnxt_re_dev *rdev = pd->rdev;
1028 struct bnxt_re_qp *qp;
1029 int rc;
1030
1031 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1032 if (!qp)
1033 return NULL;
1034
1035 memset(qp, 0, sizeof(*qp));
1036 qp->rdev = rdev;
1037
1038 /* Initialize the shadow QP structure from the QP1 values */
1039 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1040
1041 qp->qplib_qp.pd = &pd->qplib_pd;
1042 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1043 qp->qplib_qp.type = IB_QPT_UD;
1044
1045 qp->qplib_qp.max_inline_data = 0;
1046 qp->qplib_qp.sig_type = true;
1047
1048 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1049 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1050 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001051 /* Q full delta can be 1 since it is internal QP */
1052 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001053
1054 qp->qplib_qp.scq = qp1_qp->scq;
1055 qp->qplib_qp.rcq = qp1_qp->rcq;
1056
1057 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1058 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001059 /* Q full delta can be 1 since it is internal QP */
1060 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001061
1062 qp->qplib_qp.mtu = qp1_qp->mtu;
1063
1064 qp->qplib_qp.sq_hdr_buf_size = 0;
1065 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1066 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1067
1068 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1069 if (rc)
1070 goto fail;
1071
1072 rdev->sqp_id = qp->qplib_qp.id;
1073
1074 spin_lock_init(&qp->sq_lock);
1075 INIT_LIST_HEAD(&qp->list);
1076 mutex_lock(&rdev->qp_lock);
1077 list_add_tail(&qp->list, &rdev->qp_list);
1078 atomic_inc(&rdev->qp_count);
1079 mutex_unlock(&rdev->qp_lock);
1080 return qp;
1081fail:
1082 kfree(qp);
1083 return NULL;
1084}
1085
1086struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1087 struct ib_qp_init_attr *qp_init_attr,
1088 struct ib_udata *udata)
1089{
1090 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1091 struct bnxt_re_dev *rdev = pd->rdev;
1092 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1093 struct bnxt_re_qp *qp;
1094 struct bnxt_re_cq *cq;
1095 int rc, entries;
1096
1097 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1098 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1099 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1100 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1101 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1102 return ERR_PTR(-EINVAL);
1103
1104 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1105 if (!qp)
1106 return ERR_PTR(-ENOMEM);
1107
1108 qp->rdev = rdev;
1109 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1110 qp->qplib_qp.pd = &pd->qplib_pd;
1111 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1112 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1113 if (qp->qplib_qp.type == IB_QPT_MAX) {
1114 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1115 qp->qplib_qp.type);
1116 rc = -EINVAL;
1117 goto fail;
1118 }
1119 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1120 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1121 IB_SIGNAL_ALL_WR) ? true : false);
1122
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001123 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1124 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1125 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1126
1127 if (qp_init_attr->send_cq) {
1128 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1129 ib_cq);
1130 if (!cq) {
1131 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1132 rc = -EINVAL;
1133 goto fail;
1134 }
1135 qp->qplib_qp.scq = &cq->qplib_cq;
1136 }
1137
1138 if (qp_init_attr->recv_cq) {
1139 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1140 ib_cq);
1141 if (!cq) {
1142 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1143 rc = -EINVAL;
1144 goto fail;
1145 }
1146 qp->qplib_qp.rcq = &cq->qplib_cq;
1147 }
1148
1149 if (qp_init_attr->srq) {
1150 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1151 rc = -ENOTSUPP;
1152 goto fail;
1153 } else {
1154 /* Allocate 1 more than what's provided so posting max doesn't
1155 * mean empty
1156 */
1157 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1158 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1159 dev_attr->max_qp_wqes + 1);
1160
Eddie Wai9152e0b2017-06-14 03:26:23 -07001161 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1162 qp_init_attr->cap.max_recv_wr;
1163
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001164 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1165 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1166 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1167 }
1168
1169 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1170
1171 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001172 /* Allocate 1 more than what's provided */
1173 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1174 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1175 dev_attr->max_qp_wqes + 1);
1176 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1177 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001178 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1179 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1180 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1181 qp->qplib_qp.sq.max_sge++;
1182 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1183 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1184
1185 qp->qplib_qp.rq_hdr_buf_size =
1186 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1187
1188 qp->qplib_qp.sq_hdr_buf_size =
1189 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1190 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1191 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1192 if (rc) {
1193 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1194 goto fail;
1195 }
1196 /* Create a shadow QP to handle the QP1 traffic */
1197 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1198 &qp->qplib_qp);
1199 if (!rdev->qp1_sqp) {
1200 rc = -EINVAL;
1201 dev_err(rdev_to_dev(rdev),
1202 "Failed to create Shadow QP for QP1");
1203 goto qp_destroy;
1204 }
1205 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1206 &qp->qplib_qp);
1207 if (!rdev->sqp_ah) {
1208 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1209 &rdev->qp1_sqp->qplib_qp);
1210 rc = -EINVAL;
1211 dev_err(rdev_to_dev(rdev),
1212 "Failed to create AH entry for ShadowQP");
1213 goto qp_destroy;
1214 }
1215
1216 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001217 /* Allocate 128 + 1 more than what's provided */
1218 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1219 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1220 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1221 dev_attr->max_qp_wqes +
1222 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1223 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1224
1225 /*
1226 * Reserving one slot for Phantom WQE. Application can
1227 * post one extra entry in this case. But allowing this to avoid
1228 * unexpected Queue full condition
1229 */
1230
1231 qp->qplib_qp.sq.q_full_delta -= 1;
1232
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001233 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1234 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1235 if (udata) {
1236 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1237 if (rc)
1238 goto fail;
1239 } else {
1240 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1241 }
1242
1243 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1244 if (rc) {
1245 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1246 goto fail;
1247 }
1248 }
1249
1250 qp->ib_qp.qp_num = qp->qplib_qp.id;
1251 spin_lock_init(&qp->sq_lock);
1252
1253 if (udata) {
1254 struct bnxt_re_qp_resp resp;
1255
1256 resp.qpid = qp->ib_qp.qp_num;
1257 resp.rsvd = 0;
1258 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1259 if (rc) {
1260 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1261 goto qp_destroy;
1262 }
1263 }
1264 INIT_LIST_HEAD(&qp->list);
1265 mutex_lock(&rdev->qp_lock);
1266 list_add_tail(&qp->list, &rdev->qp_list);
1267 atomic_inc(&rdev->qp_count);
1268 mutex_unlock(&rdev->qp_lock);
1269
1270 return &qp->ib_qp;
1271qp_destroy:
1272 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1273fail:
1274 kfree(qp);
1275 return ERR_PTR(rc);
1276}
1277
1278static u8 __from_ib_qp_state(enum ib_qp_state state)
1279{
1280 switch (state) {
1281 case IB_QPS_RESET:
1282 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1283 case IB_QPS_INIT:
1284 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1285 case IB_QPS_RTR:
1286 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1287 case IB_QPS_RTS:
1288 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1289 case IB_QPS_SQD:
1290 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1291 case IB_QPS_SQE:
1292 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1293 case IB_QPS_ERR:
1294 default:
1295 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1296 }
1297}
1298
1299static enum ib_qp_state __to_ib_qp_state(u8 state)
1300{
1301 switch (state) {
1302 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1303 return IB_QPS_RESET;
1304 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1305 return IB_QPS_INIT;
1306 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1307 return IB_QPS_RTR;
1308 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1309 return IB_QPS_RTS;
1310 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1311 return IB_QPS_SQD;
1312 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1313 return IB_QPS_SQE;
1314 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1315 default:
1316 return IB_QPS_ERR;
1317 }
1318}
1319
1320static u32 __from_ib_mtu(enum ib_mtu mtu)
1321{
1322 switch (mtu) {
1323 case IB_MTU_256:
1324 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1325 case IB_MTU_512:
1326 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1327 case IB_MTU_1024:
1328 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1329 case IB_MTU_2048:
1330 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1331 case IB_MTU_4096:
1332 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1333 default:
1334 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1335 }
1336}
1337
1338static enum ib_mtu __to_ib_mtu(u32 mtu)
1339{
1340 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1341 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1342 return IB_MTU_256;
1343 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1344 return IB_MTU_512;
1345 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1346 return IB_MTU_1024;
1347 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1348 return IB_MTU_2048;
1349 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1350 return IB_MTU_4096;
1351 default:
1352 return IB_MTU_2048;
1353 }
1354}
1355
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001356static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1357 struct bnxt_re_qp *qp1_qp,
1358 int qp_attr_mask)
1359{
1360 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1361 int rc = 0;
1362
1363 if (qp_attr_mask & IB_QP_STATE) {
1364 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1365 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1366 }
1367 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1368 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1369 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1370 }
1371
1372 if (qp_attr_mask & IB_QP_QKEY) {
1373 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1374 /* Using a Random QKEY */
1375 qp->qplib_qp.qkey = 0x81818181;
1376 }
1377 if (qp_attr_mask & IB_QP_SQ_PSN) {
1378 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1379 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1380 }
1381
1382 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1383 if (rc)
1384 dev_err(rdev_to_dev(rdev),
1385 "Failed to modify Shadow QP for QP1");
1386 return rc;
1387}
1388
1389int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1390 int qp_attr_mask, struct ib_udata *udata)
1391{
1392 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1393 struct bnxt_re_dev *rdev = qp->rdev;
1394 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1395 enum ib_qp_state curr_qp_state, new_qp_state;
1396 int rc, entries;
1397 int status;
1398 union ib_gid sgid;
1399 struct ib_gid_attr sgid_attr;
1400 u8 nw_type;
1401
1402 qp->qplib_qp.modify_flags = 0;
1403 if (qp_attr_mask & IB_QP_STATE) {
1404 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1405 new_qp_state = qp_attr->qp_state;
1406 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1407 ib_qp->qp_type, qp_attr_mask,
1408 IB_LINK_LAYER_ETHERNET)) {
1409 dev_err(rdev_to_dev(rdev),
1410 "Invalid attribute mask: %#x specified ",
1411 qp_attr_mask);
1412 dev_err(rdev_to_dev(rdev),
1413 "for qpn: %#x type: %#x",
1414 ib_qp->qp_num, ib_qp->qp_type);
1415 dev_err(rdev_to_dev(rdev),
1416 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1417 curr_qp_state, new_qp_state);
1418 return -EINVAL;
1419 }
1420 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1421 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1422 }
1423 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1424 qp->qplib_qp.modify_flags |=
1425 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1426 qp->qplib_qp.en_sqd_async_notify = true;
1427 }
1428 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1429 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1430 qp->qplib_qp.access =
1431 __from_ib_access_flags(qp_attr->qp_access_flags);
1432 /* LOCAL_WRITE access must be set to allow RC receive */
1433 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1434 }
1435 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1436 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1437 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1438 }
1439 if (qp_attr_mask & IB_QP_QKEY) {
1440 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1441 qp->qplib_qp.qkey = qp_attr->qkey;
1442 }
1443 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001444 const struct ib_global_route *grh =
1445 rdma_ah_read_grh(&qp_attr->ah_attr);
1446
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001447 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1448 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1449 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1450 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1451 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1452 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1453 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001454 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001455 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001456 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001457 /* If RoCE V2 is enabled, stack will have two entries for
1458 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1459 * the GID index by 2 for RoCE V2
1460 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001461 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1462 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1463 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1464 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1465 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001466 ether_addr_copy(qp->qplib_qp.ah.dmac,
1467 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001468
1469 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001470 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001471 &sgid, &sgid_attr);
1472 if (!status && sgid_attr.ndev) {
1473 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1474 ETH_ALEN);
1475 dev_put(sgid_attr.ndev);
1476 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1477 &sgid);
1478 switch (nw_type) {
1479 case RDMA_NETWORK_IPV4:
1480 qp->qplib_qp.nw_type =
1481 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1482 break;
1483 case RDMA_NETWORK_IPV6:
1484 qp->qplib_qp.nw_type =
1485 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1486 break;
1487 default:
1488 qp->qplib_qp.nw_type =
1489 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1490 break;
1491 }
1492 }
1493 }
1494
1495 if (qp_attr_mask & IB_QP_PATH_MTU) {
1496 qp->qplib_qp.modify_flags |=
1497 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1498 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1499 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1500 qp->qplib_qp.modify_flags |=
1501 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1502 qp->qplib_qp.path_mtu =
1503 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1504 }
1505
1506 if (qp_attr_mask & IB_QP_TIMEOUT) {
1507 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1508 qp->qplib_qp.timeout = qp_attr->timeout;
1509 }
1510 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1511 qp->qplib_qp.modify_flags |=
1512 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1513 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1514 }
1515 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1516 qp->qplib_qp.modify_flags |=
1517 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1518 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1519 }
1520 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1521 qp->qplib_qp.modify_flags |=
1522 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1523 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1524 }
1525 if (qp_attr_mask & IB_QP_RQ_PSN) {
1526 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1527 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1528 }
1529 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1530 qp->qplib_qp.modify_flags |=
1531 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1532 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
1533 }
1534 if (qp_attr_mask & IB_QP_SQ_PSN) {
1535 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1536 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1537 }
1538 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1539 qp->qplib_qp.modify_flags |=
1540 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1541 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1542 }
1543 if (qp_attr_mask & IB_QP_CAP) {
1544 qp->qplib_qp.modify_flags |=
1545 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1546 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1547 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1548 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1549 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1550 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1551 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1552 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1553 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1554 (qp_attr->cap.max_inline_data >=
1555 dev_attr->max_inline_data)) {
1556 dev_err(rdev_to_dev(rdev),
1557 "Create QP failed - max exceeded");
1558 return -EINVAL;
1559 }
1560 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1561 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1562 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001563 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1564 qp_attr->cap.max_send_wr;
1565 /*
1566 * Reserving one slot for Phantom WQE. Some application can
1567 * post one extra entry in this case. Allowing this to avoid
1568 * unexpected Queue full condition
1569 */
1570 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001571 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1572 if (qp->qplib_qp.rq.max_wqe) {
1573 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1574 qp->qplib_qp.rq.max_wqe =
1575 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001576 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1577 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001578 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1579 } else {
1580 /* SRQ was used prior, just ignore the RQ caps */
1581 }
1582 }
1583 if (qp_attr_mask & IB_QP_DEST_QPN) {
1584 qp->qplib_qp.modify_flags |=
1585 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1586 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1587 }
1588 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1589 if (rc) {
1590 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1591 return rc;
1592 }
1593 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1594 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1595 return rc;
1596}
1597
1598int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1599 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1600{
1601 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1602 struct bnxt_re_dev *rdev = qp->rdev;
1603 struct bnxt_qplib_qp qplib_qp;
1604 int rc;
1605
1606 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1607 qplib_qp.id = qp->qplib_qp.id;
1608 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1609
1610 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1611 if (rc) {
1612 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1613 return rc;
1614 }
1615 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1616 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1617 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1618 qp_attr->pkey_index = qplib_qp.pkey_index;
1619 qp_attr->qkey = qplib_qp.qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001620 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001621 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1622 qplib_qp.ah.host_sgid_index,
1623 qplib_qp.ah.hop_limit,
1624 qplib_qp.ah.traffic_class);
1625 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1626 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001627 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001628 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1629 qp_attr->timeout = qplib_qp.timeout;
1630 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1631 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1632 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1633 qp_attr->rq_psn = qplib_qp.rq.psn;
1634 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1635 qp_attr->sq_psn = qplib_qp.sq.psn;
1636 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1637 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1638 IB_SIGNAL_REQ_WR;
1639 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1640
1641 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1642 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1643 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1644 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1645 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1646 qp_init_attr->cap = qp_attr->cap;
1647
1648 return 0;
1649}
1650
1651/* Routine for sending QP1 packets for RoCE V1 an V2
1652 */
1653static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1654 struct ib_send_wr *wr,
1655 struct bnxt_qplib_swqe *wqe,
1656 int payload_size)
1657{
1658 struct ib_device *ibdev = &qp->rdev->ibdev;
1659 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1660 ib_ah);
1661 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1662 struct bnxt_qplib_sge sge;
1663 union ib_gid sgid;
1664 u8 nw_type;
1665 u16 ether_type;
1666 struct ib_gid_attr sgid_attr;
1667 union ib_gid dgid;
1668 bool is_eth = false;
1669 bool is_vlan = false;
1670 bool is_grh = false;
1671 bool is_udp = false;
1672 u8 ip_version = 0;
1673 u16 vlan_id = 0xFFFF;
1674 void *buf;
1675 int i, rc = 0, size;
1676
1677 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1678
1679 rc = ib_get_cached_gid(ibdev, 1,
1680 qplib_ah->host_sgid_index, &sgid,
1681 &sgid_attr);
1682 if (rc) {
1683 dev_err(rdev_to_dev(qp->rdev),
1684 "Failed to query gid at index %d",
1685 qplib_ah->host_sgid_index);
1686 return rc;
1687 }
1688 if (sgid_attr.ndev) {
1689 if (is_vlan_dev(sgid_attr.ndev))
1690 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1691 dev_put(sgid_attr.ndev);
1692 }
1693 /* Get network header type for this GID */
1694 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1695 switch (nw_type) {
1696 case RDMA_NETWORK_IPV4:
1697 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1698 break;
1699 case RDMA_NETWORK_IPV6:
1700 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1701 break;
1702 default:
1703 nw_type = BNXT_RE_ROCE_V1_PACKET;
1704 break;
1705 }
1706 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1707 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1708 if (is_udp) {
1709 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1710 ip_version = 4;
1711 ether_type = ETH_P_IP;
1712 } else {
1713 ip_version = 6;
1714 ether_type = ETH_P_IPV6;
1715 }
1716 is_grh = false;
1717 } else {
1718 ether_type = ETH_P_IBOE;
1719 is_grh = true;
1720 }
1721
1722 is_eth = true;
1723 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1724
1725 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1726 ip_version, is_udp, 0, &qp->qp1_hdr);
1727
1728 /* ETH */
1729 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1730 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1731
1732 /* For vlan, check the sgid for vlan existence */
1733
1734 if (!is_vlan) {
1735 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1736 } else {
1737 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1738 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1739 }
1740
1741 if (is_grh || (ip_version == 6)) {
1742 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1743 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1744 sizeof(sgid));
1745 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1746 }
1747
1748 if (ip_version == 4) {
1749 qp->qp1_hdr.ip4.tos = 0;
1750 qp->qp1_hdr.ip4.id = 0;
1751 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1752 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1753
1754 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1755 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1756 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1757 }
1758
1759 if (is_udp) {
1760 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1761 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1762 qp->qp1_hdr.udp.csum = 0;
1763 }
1764
1765 /* BTH */
1766 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1767 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1768 qp->qp1_hdr.immediate_present = 1;
1769 } else {
1770 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1771 }
1772 if (wr->send_flags & IB_SEND_SOLICITED)
1773 qp->qp1_hdr.bth.solicited_event = 1;
1774 /* pad_count */
1775 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1776
1777 /* P_key for QP1 is for all members */
1778 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1779 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1780 qp->qp1_hdr.bth.ack_req = 0;
1781 qp->send_psn++;
1782 qp->send_psn &= BTH_PSN_MASK;
1783 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1784 /* DETH */
1785 /* Use the priviledged Q_Key for QP1 */
1786 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1787 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1788
1789 /* Pack the QP1 to the transmit buffer */
1790 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1791 if (buf) {
1792 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1793 for (i = wqe->num_sge; i; i--) {
1794 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1795 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1796 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1797 }
1798
1799 /*
1800 * Max Header buf size for IPV6 RoCE V2 is 86,
1801 * which is same as the QP1 SQ header buffer.
1802 * Header buf size for IPV4 RoCE V2 can be 66.
1803 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1804 * Subtract 20 bytes from QP1 SQ header buf size
1805 */
1806 if (is_udp && ip_version == 4)
1807 sge.size -= 20;
1808 /*
1809 * Max Header buf size for RoCE V1 is 78.
1810 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1811 * Subtract 8 bytes from QP1 SQ header buf size
1812 */
1813 if (!is_udp)
1814 sge.size -= 8;
1815
1816 /* Subtract 4 bytes for non vlan packets */
1817 if (!is_vlan)
1818 sge.size -= 4;
1819
1820 wqe->sg_list[0].addr = sge.addr;
1821 wqe->sg_list[0].lkey = sge.lkey;
1822 wqe->sg_list[0].size = sge.size;
1823 wqe->num_sge++;
1824
1825 } else {
1826 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1827 rc = -ENOMEM;
1828 }
1829 return rc;
1830}
1831
1832/* For the MAD layer, it only provides the recv SGE the size of
1833 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1834 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1835 * receive packet (334 bytes) with no VLAN and then copy the GRH
1836 * and the MAD datagram out to the provided SGE.
1837 */
1838static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1839 struct ib_recv_wr *wr,
1840 struct bnxt_qplib_swqe *wqe,
1841 int payload_size)
1842{
1843 struct bnxt_qplib_sge ref, sge;
1844 u32 rq_prod_index;
1845 struct bnxt_re_sqp_entries *sqp_entry;
1846
1847 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1848
1849 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1850 return -ENOMEM;
1851
1852 /* Create 1 SGE to receive the entire
1853 * ethernet packet
1854 */
1855 /* Save the reference from ULP */
1856 ref.addr = wqe->sg_list[0].addr;
1857 ref.lkey = wqe->sg_list[0].lkey;
1858 ref.size = wqe->sg_list[0].size;
1859
1860 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1861
1862 /* SGE 1 */
1863 wqe->sg_list[0].addr = sge.addr;
1864 wqe->sg_list[0].lkey = sge.lkey;
1865 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1866 sge.size -= wqe->sg_list[0].size;
1867
1868 sqp_entry->sge.addr = ref.addr;
1869 sqp_entry->sge.lkey = ref.lkey;
1870 sqp_entry->sge.size = ref.size;
1871 /* Store the wrid for reporting completion */
1872 sqp_entry->wrid = wqe->wr_id;
1873 /* change the wqe->wrid to table index */
1874 wqe->wr_id = rq_prod_index;
1875 return 0;
1876}
1877
1878static int is_ud_qp(struct bnxt_re_qp *qp)
1879{
1880 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1881}
1882
1883static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1884 struct ib_send_wr *wr,
1885 struct bnxt_qplib_swqe *wqe)
1886{
1887 struct bnxt_re_ah *ah = NULL;
1888
1889 if (is_ud_qp(qp)) {
1890 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1891 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1892 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1893 wqe->send.avid = ah->qplib_ah.id;
1894 }
1895 switch (wr->opcode) {
1896 case IB_WR_SEND:
1897 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1898 break;
1899 case IB_WR_SEND_WITH_IMM:
1900 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1901 wqe->send.imm_data = wr->ex.imm_data;
1902 break;
1903 case IB_WR_SEND_WITH_INV:
1904 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1905 wqe->send.inv_key = wr->ex.invalidate_rkey;
1906 break;
1907 default:
1908 return -EINVAL;
1909 }
1910 if (wr->send_flags & IB_SEND_SIGNALED)
1911 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1912 if (wr->send_flags & IB_SEND_FENCE)
1913 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1914 if (wr->send_flags & IB_SEND_SOLICITED)
1915 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1916 if (wr->send_flags & IB_SEND_INLINE)
1917 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1918
1919 return 0;
1920}
1921
1922static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1923 struct bnxt_qplib_swqe *wqe)
1924{
1925 switch (wr->opcode) {
1926 case IB_WR_RDMA_WRITE:
1927 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1928 break;
1929 case IB_WR_RDMA_WRITE_WITH_IMM:
1930 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1931 wqe->rdma.imm_data = wr->ex.imm_data;
1932 break;
1933 case IB_WR_RDMA_READ:
1934 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1935 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1936 break;
1937 default:
1938 return -EINVAL;
1939 }
1940 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1941 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1942 if (wr->send_flags & IB_SEND_SIGNALED)
1943 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1944 if (wr->send_flags & IB_SEND_FENCE)
1945 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1946 if (wr->send_flags & IB_SEND_SOLICITED)
1947 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1948 if (wr->send_flags & IB_SEND_INLINE)
1949 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1950
1951 return 0;
1952}
1953
1954static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1955 struct bnxt_qplib_swqe *wqe)
1956{
1957 switch (wr->opcode) {
1958 case IB_WR_ATOMIC_CMP_AND_SWP:
1959 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1960 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1961 break;
1962 case IB_WR_ATOMIC_FETCH_AND_ADD:
1963 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1964 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1965 break;
1966 default:
1967 return -EINVAL;
1968 }
1969 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1970 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1971 if (wr->send_flags & IB_SEND_SIGNALED)
1972 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1973 if (wr->send_flags & IB_SEND_FENCE)
1974 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1975 if (wr->send_flags & IB_SEND_SOLICITED)
1976 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1977 return 0;
1978}
1979
1980static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1981 struct bnxt_qplib_swqe *wqe)
1982{
1983 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1984 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1985
1986 if (wr->send_flags & IB_SEND_SIGNALED)
1987 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1988 if (wr->send_flags & IB_SEND_FENCE)
1989 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1990 if (wr->send_flags & IB_SEND_SOLICITED)
1991 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1992
1993 return 0;
1994}
1995
1996static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1997 struct bnxt_qplib_swqe *wqe)
1998{
1999 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2000 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2001 int access = wr->access;
2002
2003 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2004 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2005 wqe->frmr.page_list = mr->pages;
2006 wqe->frmr.page_list_len = mr->npages;
2007 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2008 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2009
2010 if (wr->wr.send_flags & IB_SEND_FENCE)
2011 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2012 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2013 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2014
2015 if (access & IB_ACCESS_LOCAL_WRITE)
2016 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2017 if (access & IB_ACCESS_REMOTE_READ)
2018 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2019 if (access & IB_ACCESS_REMOTE_WRITE)
2020 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2021 if (access & IB_ACCESS_REMOTE_ATOMIC)
2022 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2023 if (access & IB_ACCESS_MW_BIND)
2024 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2025
2026 wqe->frmr.l_key = wr->key;
2027 wqe->frmr.length = wr->mr->length;
2028 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2029 wqe->frmr.va = wr->mr->iova;
2030 return 0;
2031}
2032
2033static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2034 struct ib_send_wr *wr,
2035 struct bnxt_qplib_swqe *wqe)
2036{
2037 /* Copy the inline data to the data field */
2038 u8 *in_data;
2039 u32 i, sge_len;
2040 void *sge_addr;
2041
2042 in_data = wqe->inline_data;
2043 for (i = 0; i < wr->num_sge; i++) {
2044 sge_addr = (void *)(unsigned long)
2045 wr->sg_list[i].addr;
2046 sge_len = wr->sg_list[i].length;
2047
2048 if ((sge_len + wqe->inline_len) >
2049 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2050 dev_err(rdev_to_dev(rdev),
2051 "Inline data size requested > supported value");
2052 return -EINVAL;
2053 }
2054 sge_len = wr->sg_list[i].length;
2055
2056 memcpy(in_data, sge_addr, sge_len);
2057 in_data += wr->sg_list[i].length;
2058 wqe->inline_len += wr->sg_list[i].length;
2059 }
2060 return wqe->inline_len;
2061}
2062
2063static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2064 struct ib_send_wr *wr,
2065 struct bnxt_qplib_swqe *wqe)
2066{
2067 int payload_sz = 0;
2068
2069 if (wr->send_flags & IB_SEND_INLINE)
2070 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2071 else
2072 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2073 wqe->num_sge);
2074
2075 return payload_sz;
2076}
2077
2078static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2079 struct bnxt_re_qp *qp,
2080 struct ib_send_wr *wr)
2081{
2082 struct bnxt_qplib_swqe wqe;
2083 int rc = 0, payload_sz = 0;
2084 unsigned long flags;
2085
2086 spin_lock_irqsave(&qp->sq_lock, flags);
2087 memset(&wqe, 0, sizeof(wqe));
2088 while (wr) {
2089 /* House keeping */
2090 memset(&wqe, 0, sizeof(wqe));
2091
2092 /* Common */
2093 wqe.num_sge = wr->num_sge;
2094 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2095 dev_err(rdev_to_dev(rdev),
2096 "Limit exceeded for Send SGEs");
2097 rc = -EINVAL;
2098 goto bad;
2099 }
2100
2101 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2102 if (payload_sz < 0) {
2103 rc = -EINVAL;
2104 goto bad;
2105 }
2106 wqe.wr_id = wr->wr_id;
2107
2108 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2109
2110 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2111 if (!rc)
2112 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2113bad:
2114 if (rc) {
2115 dev_err(rdev_to_dev(rdev),
2116 "Post send failed opcode = %#x rc = %d",
2117 wr->opcode, rc);
2118 break;
2119 }
2120 wr = wr->next;
2121 }
2122 bnxt_qplib_post_send_db(&qp->qplib_qp);
2123 spin_unlock_irqrestore(&qp->sq_lock, flags);
2124 return rc;
2125}
2126
2127int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2128 struct ib_send_wr **bad_wr)
2129{
2130 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2131 struct bnxt_qplib_swqe wqe;
2132 int rc = 0, payload_sz = 0;
2133 unsigned long flags;
2134
2135 spin_lock_irqsave(&qp->sq_lock, flags);
2136 while (wr) {
2137 /* House keeping */
2138 memset(&wqe, 0, sizeof(wqe));
2139
2140 /* Common */
2141 wqe.num_sge = wr->num_sge;
2142 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2143 dev_err(rdev_to_dev(qp->rdev),
2144 "Limit exceeded for Send SGEs");
2145 rc = -EINVAL;
2146 goto bad;
2147 }
2148
2149 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2150 if (payload_sz < 0) {
2151 rc = -EINVAL;
2152 goto bad;
2153 }
2154 wqe.wr_id = wr->wr_id;
2155
2156 switch (wr->opcode) {
2157 case IB_WR_SEND:
2158 case IB_WR_SEND_WITH_IMM:
2159 if (ib_qp->qp_type == IB_QPT_GSI) {
2160 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2161 payload_sz);
2162 if (rc)
2163 goto bad;
2164 wqe.rawqp1.lflags |=
2165 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2166 }
2167 switch (wr->send_flags) {
2168 case IB_SEND_IP_CSUM:
2169 wqe.rawqp1.lflags |=
2170 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2171 break;
2172 default:
2173 break;
2174 }
2175 /* Fall thru to build the wqe */
2176 case IB_WR_SEND_WITH_INV:
2177 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2178 break;
2179 case IB_WR_RDMA_WRITE:
2180 case IB_WR_RDMA_WRITE_WITH_IMM:
2181 case IB_WR_RDMA_READ:
2182 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2183 break;
2184 case IB_WR_ATOMIC_CMP_AND_SWP:
2185 case IB_WR_ATOMIC_FETCH_AND_ADD:
2186 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2187 break;
2188 case IB_WR_RDMA_READ_WITH_INV:
2189 dev_err(rdev_to_dev(qp->rdev),
2190 "RDMA Read with Invalidate is not supported");
2191 rc = -EINVAL;
2192 goto bad;
2193 case IB_WR_LOCAL_INV:
2194 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2195 break;
2196 case IB_WR_REG_MR:
2197 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2198 break;
2199 default:
2200 /* Unsupported WRs */
2201 dev_err(rdev_to_dev(qp->rdev),
2202 "WR (%#x) is not supported", wr->opcode);
2203 rc = -EINVAL;
2204 goto bad;
2205 }
2206 if (!rc)
2207 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2208bad:
2209 if (rc) {
2210 dev_err(rdev_to_dev(qp->rdev),
2211 "post_send failed op:%#x qps = %#x rc = %d\n",
2212 wr->opcode, qp->qplib_qp.state, rc);
2213 *bad_wr = wr;
2214 break;
2215 }
2216 wr = wr->next;
2217 }
2218 bnxt_qplib_post_send_db(&qp->qplib_qp);
2219 spin_unlock_irqrestore(&qp->sq_lock, flags);
2220
2221 return rc;
2222}
2223
2224static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2225 struct bnxt_re_qp *qp,
2226 struct ib_recv_wr *wr)
2227{
2228 struct bnxt_qplib_swqe wqe;
2229 int rc = 0, payload_sz = 0;
2230
2231 memset(&wqe, 0, sizeof(wqe));
2232 while (wr) {
2233 /* House keeping */
2234 memset(&wqe, 0, sizeof(wqe));
2235
2236 /* Common */
2237 wqe.num_sge = wr->num_sge;
2238 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2239 dev_err(rdev_to_dev(rdev),
2240 "Limit exceeded for Receive SGEs");
2241 rc = -EINVAL;
2242 break;
2243 }
2244 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2245 wr->num_sge);
2246 wqe.wr_id = wr->wr_id;
2247 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2248
2249 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2250 if (rc)
2251 break;
2252
2253 wr = wr->next;
2254 }
2255 if (!rc)
2256 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2257 return rc;
2258}
2259
2260int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2261 struct ib_recv_wr **bad_wr)
2262{
2263 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2264 struct bnxt_qplib_swqe wqe;
2265 int rc = 0, payload_sz = 0;
2266
2267 while (wr) {
2268 /* House keeping */
2269 memset(&wqe, 0, sizeof(wqe));
2270
2271 /* Common */
2272 wqe.num_sge = wr->num_sge;
2273 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2274 dev_err(rdev_to_dev(qp->rdev),
2275 "Limit exceeded for Receive SGEs");
2276 rc = -EINVAL;
2277 *bad_wr = wr;
2278 break;
2279 }
2280
2281 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2282 wr->num_sge);
2283 wqe.wr_id = wr->wr_id;
2284 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2285
2286 if (ib_qp->qp_type == IB_QPT_GSI)
2287 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2288 payload_sz);
2289 if (!rc)
2290 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2291 if (rc) {
2292 *bad_wr = wr;
2293 break;
2294 }
2295 wr = wr->next;
2296 }
2297 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2298 return rc;
2299}
2300
2301/* Completion Queues */
2302int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2303{
2304 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2305 struct bnxt_re_dev *rdev = cq->rdev;
2306 int rc;
2307
2308 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2309 if (rc) {
2310 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2311 return rc;
2312 }
Doug Ledford374cb862017-04-25 14:00:59 -04002313 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002314 ib_umem_release(cq->umem);
2315
2316 if (cq) {
2317 kfree(cq->cql);
2318 kfree(cq);
2319 }
2320 atomic_dec(&rdev->cq_count);
2321 rdev->nq.budget--;
2322 return 0;
2323}
2324
2325struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2326 const struct ib_cq_init_attr *attr,
2327 struct ib_ucontext *context,
2328 struct ib_udata *udata)
2329{
2330 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2331 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2332 struct bnxt_re_cq *cq = NULL;
2333 int rc, entries;
2334 int cqe = attr->cqe;
2335
2336 /* Validate CQ fields */
2337 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2338 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2339 return ERR_PTR(-EINVAL);
2340 }
2341 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2342 if (!cq)
2343 return ERR_PTR(-ENOMEM);
2344
2345 cq->rdev = rdev;
2346 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2347
2348 entries = roundup_pow_of_two(cqe + 1);
2349 if (entries > dev_attr->max_cq_wqes + 1)
2350 entries = dev_attr->max_cq_wqes + 1;
2351
2352 if (context) {
2353 struct bnxt_re_cq_req req;
2354 struct bnxt_re_ucontext *uctx = container_of
2355 (context,
2356 struct bnxt_re_ucontext,
2357 ib_uctx);
2358 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2359 rc = -EFAULT;
2360 goto fail;
2361 }
2362
2363 cq->umem = ib_umem_get(context, req.cq_va,
2364 entries * sizeof(struct cq_base),
2365 IB_ACCESS_LOCAL_WRITE, 1);
2366 if (IS_ERR(cq->umem)) {
2367 rc = PTR_ERR(cq->umem);
2368 goto fail;
2369 }
2370 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2371 cq->qplib_cq.nmap = cq->umem->nmap;
2372 cq->qplib_cq.dpi = uctx->dpi;
2373 } else {
2374 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2375 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2376 GFP_KERNEL);
2377 if (!cq->cql) {
2378 rc = -ENOMEM;
2379 goto fail;
2380 }
2381
2382 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2383 cq->qplib_cq.sghead = NULL;
2384 cq->qplib_cq.nmap = 0;
2385 }
2386 cq->qplib_cq.max_wqe = entries;
2387 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2388
2389 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2390 if (rc) {
2391 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2392 goto fail;
2393 }
2394
2395 cq->ib_cq.cqe = entries;
2396 cq->cq_period = cq->qplib_cq.period;
2397 rdev->nq.budget++;
2398
2399 atomic_inc(&rdev->cq_count);
2400
2401 if (context) {
2402 struct bnxt_re_cq_resp resp;
2403
2404 resp.cqid = cq->qplib_cq.id;
2405 resp.tail = cq->qplib_cq.hwq.cons;
2406 resp.phase = cq->qplib_cq.period;
2407 resp.rsvd = 0;
2408 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2409 if (rc) {
2410 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2411 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2412 goto c2fail;
2413 }
2414 }
2415
2416 return &cq->ib_cq;
2417
2418c2fail:
2419 if (context)
2420 ib_umem_release(cq->umem);
2421fail:
2422 kfree(cq->cql);
2423 kfree(cq);
2424 return ERR_PTR(rc);
2425}
2426
2427static u8 __req_to_ib_wc_status(u8 qstatus)
2428{
2429 switch (qstatus) {
2430 case CQ_REQ_STATUS_OK:
2431 return IB_WC_SUCCESS;
2432 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2433 return IB_WC_BAD_RESP_ERR;
2434 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2435 return IB_WC_LOC_LEN_ERR;
2436 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2437 return IB_WC_LOC_QP_OP_ERR;
2438 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2439 return IB_WC_LOC_PROT_ERR;
2440 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2441 return IB_WC_GENERAL_ERR;
2442 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2443 return IB_WC_REM_INV_REQ_ERR;
2444 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2445 return IB_WC_REM_ACCESS_ERR;
2446 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2447 return IB_WC_REM_OP_ERR;
2448 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2449 return IB_WC_RNR_RETRY_EXC_ERR;
2450 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2451 return IB_WC_RETRY_EXC_ERR;
2452 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2453 return IB_WC_WR_FLUSH_ERR;
2454 default:
2455 return IB_WC_GENERAL_ERR;
2456 }
2457 return 0;
2458}
2459
2460static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2461{
2462 switch (qstatus) {
2463 case CQ_RES_RAWETH_QP1_STATUS_OK:
2464 return IB_WC_SUCCESS;
2465 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2466 return IB_WC_LOC_ACCESS_ERR;
2467 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2468 return IB_WC_LOC_LEN_ERR;
2469 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2470 return IB_WC_LOC_PROT_ERR;
2471 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2472 return IB_WC_LOC_QP_OP_ERR;
2473 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2474 return IB_WC_GENERAL_ERR;
2475 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2476 return IB_WC_WR_FLUSH_ERR;
2477 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2478 return IB_WC_WR_FLUSH_ERR;
2479 default:
2480 return IB_WC_GENERAL_ERR;
2481 }
2482}
2483
2484static u8 __rc_to_ib_wc_status(u8 qstatus)
2485{
2486 switch (qstatus) {
2487 case CQ_RES_RC_STATUS_OK:
2488 return IB_WC_SUCCESS;
2489 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2490 return IB_WC_LOC_ACCESS_ERR;
2491 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2492 return IB_WC_LOC_LEN_ERR;
2493 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2494 return IB_WC_LOC_PROT_ERR;
2495 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2496 return IB_WC_LOC_QP_OP_ERR;
2497 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2498 return IB_WC_GENERAL_ERR;
2499 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2500 return IB_WC_REM_INV_REQ_ERR;
2501 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2502 return IB_WC_WR_FLUSH_ERR;
2503 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2504 return IB_WC_WR_FLUSH_ERR;
2505 default:
2506 return IB_WC_GENERAL_ERR;
2507 }
2508}
2509
2510static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2511{
2512 switch (cqe->type) {
2513 case BNXT_QPLIB_SWQE_TYPE_SEND:
2514 wc->opcode = IB_WC_SEND;
2515 break;
2516 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2517 wc->opcode = IB_WC_SEND;
2518 wc->wc_flags |= IB_WC_WITH_IMM;
2519 break;
2520 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2521 wc->opcode = IB_WC_SEND;
2522 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2523 break;
2524 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2525 wc->opcode = IB_WC_RDMA_WRITE;
2526 break;
2527 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2528 wc->opcode = IB_WC_RDMA_WRITE;
2529 wc->wc_flags |= IB_WC_WITH_IMM;
2530 break;
2531 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2532 wc->opcode = IB_WC_RDMA_READ;
2533 break;
2534 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2535 wc->opcode = IB_WC_COMP_SWAP;
2536 break;
2537 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2538 wc->opcode = IB_WC_FETCH_ADD;
2539 break;
2540 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2541 wc->opcode = IB_WC_LOCAL_INV;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2544 wc->opcode = IB_WC_REG_MR;
2545 break;
2546 default:
2547 wc->opcode = IB_WC_SEND;
2548 break;
2549 }
2550
2551 wc->status = __req_to_ib_wc_status(cqe->status);
2552}
2553
2554static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2555 u16 raweth_qp1_flags2)
2556{
2557 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2558
2559 /* raweth_qp1_flags Bit 9-6 indicates itype */
2560 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2561 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2562 return -1;
2563
2564 if (raweth_qp1_flags2 &
2565 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2566 raweth_qp1_flags2 &
2567 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2568 is_udp = true;
2569 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2570 (raweth_qp1_flags2 &
2571 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2572 (is_ipv6 = true) : (is_ipv4 = true);
2573 return ((is_ipv6) ?
2574 BNXT_RE_ROCEV2_IPV6_PACKET :
2575 BNXT_RE_ROCEV2_IPV4_PACKET);
2576 } else {
2577 return BNXT_RE_ROCE_V1_PACKET;
2578 }
2579}
2580
2581static int bnxt_re_to_ib_nw_type(int nw_type)
2582{
2583 u8 nw_hdr_type = 0xFF;
2584
2585 switch (nw_type) {
2586 case BNXT_RE_ROCE_V1_PACKET:
2587 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2588 break;
2589 case BNXT_RE_ROCEV2_IPV4_PACKET:
2590 nw_hdr_type = RDMA_NETWORK_IPV4;
2591 break;
2592 case BNXT_RE_ROCEV2_IPV6_PACKET:
2593 nw_hdr_type = RDMA_NETWORK_IPV6;
2594 break;
2595 }
2596 return nw_hdr_type;
2597}
2598
2599static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2600 void *rq_hdr_buf)
2601{
2602 u8 *tmp_buf = NULL;
2603 struct ethhdr *eth_hdr;
2604 u16 eth_type;
2605 bool rc = false;
2606
2607 tmp_buf = (u8 *)rq_hdr_buf;
2608 /*
2609 * If dest mac is not same as I/F mac, this could be a
2610 * loopback address or multicast address, check whether
2611 * it is a loopback packet
2612 */
2613 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2614 tmp_buf += 4;
2615 /* Check the ether type */
2616 eth_hdr = (struct ethhdr *)tmp_buf;
2617 eth_type = ntohs(eth_hdr->h_proto);
2618 switch (eth_type) {
2619 case ETH_P_IBOE:
2620 rc = true;
2621 break;
2622 case ETH_P_IP:
2623 case ETH_P_IPV6: {
2624 u32 len;
2625 struct udphdr *udp_hdr;
2626
2627 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2628 sizeof(struct ipv6hdr));
2629 tmp_buf += sizeof(struct ethhdr) + len;
2630 udp_hdr = (struct udphdr *)tmp_buf;
2631 if (ntohs(udp_hdr->dest) ==
2632 ROCE_V2_UDP_DPORT)
2633 rc = true;
2634 break;
2635 }
2636 default:
2637 break;
2638 }
2639 }
2640
2641 return rc;
2642}
2643
2644static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2645 struct bnxt_qplib_cqe *cqe)
2646{
2647 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2648 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2649 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2650 struct ib_send_wr *swr;
2651 struct ib_ud_wr udwr;
2652 struct ib_recv_wr rwr;
2653 int pkt_type = 0;
2654 u32 tbl_idx;
2655 void *rq_hdr_buf;
2656 dma_addr_t rq_hdr_buf_map;
2657 dma_addr_t shrq_hdr_buf_map;
2658 u32 offset = 0;
2659 u32 skip_bytes = 0;
2660 struct ib_sge s_sge[2];
2661 struct ib_sge r_sge[2];
2662 int rc;
2663
2664 memset(&udwr, 0, sizeof(udwr));
2665 memset(&rwr, 0, sizeof(rwr));
2666 memset(&s_sge, 0, sizeof(s_sge));
2667 memset(&r_sge, 0, sizeof(r_sge));
2668
2669 swr = &udwr.wr;
2670 tbl_idx = cqe->wr_id;
2671
2672 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2673 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2674 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2675 tbl_idx);
2676
2677 /* Shadow QP header buffer */
2678 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2679 tbl_idx);
2680 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2681
2682 /* Store this cqe */
2683 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2684 sqp_entry->qp1_qp = qp1_qp;
2685
2686 /* Find packet type from the cqe */
2687
2688 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2689 cqe->raweth_qp1_flags2);
2690 if (pkt_type < 0) {
2691 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2692 return -EINVAL;
2693 }
2694
2695 /* Adjust the offset for the user buffer and post in the rq */
2696
2697 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2698 offset = 20;
2699
2700 /*
2701 * QP1 loopback packet has 4 bytes of internal header before
2702 * ether header. Skip these four bytes.
2703 */
2704 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2705 skip_bytes = 4;
2706
2707 /* First send SGE . Skip the ether header*/
2708 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2709 + skip_bytes;
2710 s_sge[0].lkey = 0xFFFFFFFF;
2711 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2712 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2713
2714 /* Second Send SGE */
2715 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2716 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2717 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2718 s_sge[1].addr += 8;
2719 s_sge[1].lkey = 0xFFFFFFFF;
2720 s_sge[1].length = 256;
2721
2722 /* First recv SGE */
2723
2724 r_sge[0].addr = shrq_hdr_buf_map;
2725 r_sge[0].lkey = 0xFFFFFFFF;
2726 r_sge[0].length = 40;
2727
2728 r_sge[1].addr = sqp_entry->sge.addr + offset;
2729 r_sge[1].lkey = sqp_entry->sge.lkey;
2730 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2731
2732 /* Create receive work request */
2733 rwr.num_sge = 2;
2734 rwr.sg_list = r_sge;
2735 rwr.wr_id = tbl_idx;
2736 rwr.next = NULL;
2737
2738 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2739 if (rc) {
2740 dev_err(rdev_to_dev(rdev),
2741 "Failed to post Rx buffers to shadow QP");
2742 return -ENOMEM;
2743 }
2744
2745 swr->num_sge = 2;
2746 swr->sg_list = s_sge;
2747 swr->wr_id = tbl_idx;
2748 swr->opcode = IB_WR_SEND;
2749 swr->next = NULL;
2750
2751 udwr.ah = &rdev->sqp_ah->ib_ah;
2752 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2753 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2754
2755 /* post data received in the send queue */
2756 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2757
2758 return 0;
2759}
2760
2761static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2762 struct bnxt_qplib_cqe *cqe)
2763{
2764 wc->opcode = IB_WC_RECV;
2765 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2766 wc->wc_flags |= IB_WC_GRH;
2767}
2768
2769static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2770 struct bnxt_qplib_cqe *cqe)
2771{
2772 wc->opcode = IB_WC_RECV;
2773 wc->status = __rc_to_ib_wc_status(cqe->status);
2774
2775 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2776 wc->wc_flags |= IB_WC_WITH_IMM;
2777 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2778 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2779 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2780 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2781 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2782}
2783
2784static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2785 struct ib_wc *wc,
2786 struct bnxt_qplib_cqe *cqe)
2787{
2788 u32 tbl_idx;
2789 struct bnxt_re_dev *rdev = qp->rdev;
2790 struct bnxt_re_qp *qp1_qp = NULL;
2791 struct bnxt_qplib_cqe *orig_cqe = NULL;
2792 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2793 int nw_type;
2794
2795 tbl_idx = cqe->wr_id;
2796
2797 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2798 qp1_qp = sqp_entry->qp1_qp;
2799 orig_cqe = &sqp_entry->cqe;
2800
2801 wc->wr_id = sqp_entry->wrid;
2802 wc->byte_len = orig_cqe->length;
2803 wc->qp = &qp1_qp->ib_qp;
2804
2805 wc->ex.imm_data = orig_cqe->immdata;
2806 wc->src_qp = orig_cqe->src_qp;
2807 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2808 wc->port_num = 1;
2809 wc->vendor_err = orig_cqe->status;
2810
2811 wc->opcode = IB_WC_RECV;
2812 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2813 wc->wc_flags |= IB_WC_GRH;
2814
2815 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2816 orig_cqe->raweth_qp1_flags2);
2817 if (nw_type >= 0) {
2818 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2819 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2820 }
2821}
2822
2823static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2824 struct bnxt_qplib_cqe *cqe)
2825{
2826 wc->opcode = IB_WC_RECV;
2827 wc->status = __rc_to_ib_wc_status(cqe->status);
2828
2829 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2830 wc->wc_flags |= IB_WC_WITH_IMM;
2831 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2832 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2833 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2834 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2835 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2836}
2837
Eddie Wai9152e0b2017-06-14 03:26:23 -07002838static int send_phantom_wqe(struct bnxt_re_qp *qp)
2839{
2840 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2841 unsigned long flags;
2842 int rc = 0;
2843
2844 spin_lock_irqsave(&qp->sq_lock, flags);
2845
2846 rc = bnxt_re_bind_fence_mw(lib_qp);
2847 if (!rc) {
2848 lib_qp->sq.phantom_wqe_cnt++;
2849 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2850 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2851 lib_qp->id, lib_qp->sq.hwq.prod,
2852 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2853 lib_qp->sq.phantom_wqe_cnt);
2854 }
2855
2856 spin_unlock_irqrestore(&qp->sq_lock, flags);
2857 return rc;
2858}
2859
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002860int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2861{
2862 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2863 struct bnxt_re_qp *qp;
2864 struct bnxt_qplib_cqe *cqe;
2865 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002866 struct bnxt_qplib_q *sq;
2867 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002868 u32 tbl_idx;
2869 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2870 unsigned long flags;
2871
2872 spin_lock_irqsave(&cq->cq_lock, flags);
2873 budget = min_t(u32, num_entries, cq->max_cql);
2874 if (!cq->cql) {
2875 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2876 goto exit;
2877 }
2878 cqe = &cq->cql[0];
2879 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002880 lib_qp = NULL;
2881 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2882 if (lib_qp) {
2883 sq = &lib_qp->sq;
2884 if (sq->send_phantom) {
2885 qp = container_of(lib_qp,
2886 struct bnxt_re_qp, qplib_qp);
2887 if (send_phantom_wqe(qp) == -ENOMEM)
2888 dev_err(rdev_to_dev(cq->rdev),
2889 "Phantom failed! Scheduled to send again\n");
2890 else
2891 sq->send_phantom = false;
2892 }
2893 }
2894
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002895 if (!ncqe)
2896 break;
2897
2898 for (i = 0; i < ncqe; i++, cqe++) {
2899 /* Transcribe each qplib_wqe back to ib_wc */
2900 memset(wc, 0, sizeof(*wc));
2901
2902 wc->wr_id = cqe->wr_id;
2903 wc->byte_len = cqe->length;
2904 qp = container_of
2905 ((struct bnxt_qplib_qp *)
2906 (unsigned long)(cqe->qp_handle),
2907 struct bnxt_re_qp, qplib_qp);
2908 if (!qp) {
2909 dev_err(rdev_to_dev(cq->rdev),
2910 "POLL CQ : bad QP handle");
2911 continue;
2912 }
2913 wc->qp = &qp->ib_qp;
2914 wc->ex.imm_data = cqe->immdata;
2915 wc->src_qp = cqe->src_qp;
2916 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2917 wc->port_num = 1;
2918 wc->vendor_err = cqe->status;
2919
2920 switch (cqe->opcode) {
2921 case CQ_BASE_CQE_TYPE_REQ:
2922 if (qp->qplib_qp.id ==
2923 qp->rdev->qp1_sqp->qplib_qp.id) {
2924 /* Handle this completion with
2925 * the stored completion
2926 */
2927 memset(wc, 0, sizeof(*wc));
2928 continue;
2929 }
2930 bnxt_re_process_req_wc(wc, cqe);
2931 break;
2932 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2933 if (!cqe->status) {
2934 int rc = 0;
2935
2936 rc = bnxt_re_process_raw_qp_pkt_rx
2937 (qp, cqe);
2938 if (!rc) {
2939 memset(wc, 0, sizeof(*wc));
2940 continue;
2941 }
2942 cqe->status = -1;
2943 }
2944 /* Errors need not be looped back.
2945 * But change the wr_id to the one
2946 * stored in the table
2947 */
2948 tbl_idx = cqe->wr_id;
2949 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2950 wc->wr_id = sqp_entry->wrid;
2951 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2952 break;
2953 case CQ_BASE_CQE_TYPE_RES_RC:
2954 bnxt_re_process_res_rc_wc(wc, cqe);
2955 break;
2956 case CQ_BASE_CQE_TYPE_RES_UD:
2957 if (qp->qplib_qp.id ==
2958 qp->rdev->qp1_sqp->qplib_qp.id) {
2959 /* Handle this completion with
2960 * the stored completion
2961 */
2962 if (cqe->status) {
2963 continue;
2964 } else {
2965 bnxt_re_process_res_shadow_qp_wc
2966 (qp, wc, cqe);
2967 break;
2968 }
2969 }
2970 bnxt_re_process_res_ud_wc(wc, cqe);
2971 break;
2972 default:
2973 dev_err(rdev_to_dev(cq->rdev),
2974 "POLL CQ : type 0x%x not handled",
2975 cqe->opcode);
2976 continue;
2977 }
2978 wc++;
2979 budget--;
2980 }
2981 }
2982exit:
2983 spin_unlock_irqrestore(&cq->cq_lock, flags);
2984 return num_entries - budget;
2985}
2986
2987int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2988 enum ib_cq_notify_flags ib_cqn_flags)
2989{
2990 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2991 int type = 0;
2992
2993 /* Trigger on the very next completion */
2994 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2995 type = DBR_DBR_TYPE_CQ_ARMALL;
2996 /* Trigger on the next solicited completion */
2997 else if (ib_cqn_flags & IB_CQ_SOLICITED)
2998 type = DBR_DBR_TYPE_CQ_ARMSE;
2999
3000 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3001
3002 return 0;
3003}
3004
3005/* Memory Regions */
3006struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3007{
3008 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3009 struct bnxt_re_dev *rdev = pd->rdev;
3010 struct bnxt_re_mr *mr;
3011 u64 pbl = 0;
3012 int rc;
3013
3014 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3015 if (!mr)
3016 return ERR_PTR(-ENOMEM);
3017
3018 mr->rdev = rdev;
3019 mr->qplib_mr.pd = &pd->qplib_pd;
3020 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3021 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3022
3023 /* Allocate and register 0 as the address */
3024 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3025 if (rc)
3026 goto fail;
3027
3028 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3029 mr->qplib_mr.total_size = -1; /* Infinte length */
3030 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3031 if (rc)
3032 goto fail_mr;
3033
3034 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3035 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3036 IB_ACCESS_REMOTE_ATOMIC))
3037 mr->ib_mr.rkey = mr->ib_mr.lkey;
3038 atomic_inc(&rdev->mr_count);
3039
3040 return &mr->ib_mr;
3041
3042fail_mr:
3043 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3044fail:
3045 kfree(mr);
3046 return ERR_PTR(rc);
3047}
3048
3049int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3050{
3051 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3052 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003053 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003054
Selvin Xavier1c980b02017-05-22 03:15:34 -07003055 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3056 if (rc) {
3057 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3058 return rc;
3059 }
3060
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003061 if (mr->npages && mr->pages) {
3062 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3063 &mr->qplib_frpl);
3064 kfree(mr->pages);
3065 mr->npages = 0;
3066 mr->pages = NULL;
3067 }
Doug Ledford374cb862017-04-25 14:00:59 -04003068 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003069 ib_umem_release(mr->ib_umem);
3070
3071 kfree(mr);
3072 atomic_dec(&rdev->mr_count);
3073 return rc;
3074}
3075
3076static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3077{
3078 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3079
3080 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3081 return -ENOMEM;
3082
3083 mr->pages[mr->npages++] = addr;
3084 return 0;
3085}
3086
3087int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3088 unsigned int *sg_offset)
3089{
3090 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3091
3092 mr->npages = 0;
3093 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3094}
3095
3096struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3097 u32 max_num_sg)
3098{
3099 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3100 struct bnxt_re_dev *rdev = pd->rdev;
3101 struct bnxt_re_mr *mr = NULL;
3102 int rc;
3103
3104 if (type != IB_MR_TYPE_MEM_REG) {
3105 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3106 return ERR_PTR(-EINVAL);
3107 }
3108 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3109 return ERR_PTR(-EINVAL);
3110
3111 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3112 if (!mr)
3113 return ERR_PTR(-ENOMEM);
3114
3115 mr->rdev = rdev;
3116 mr->qplib_mr.pd = &pd->qplib_pd;
3117 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3118 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3119
3120 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3121 if (rc)
3122 goto fail;
3123
3124 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3125 mr->ib_mr.rkey = mr->ib_mr.lkey;
3126
3127 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3128 if (!mr->pages) {
3129 rc = -ENOMEM;
3130 goto fail;
3131 }
3132 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3133 &mr->qplib_frpl, max_num_sg);
3134 if (rc) {
3135 dev_err(rdev_to_dev(rdev),
3136 "Failed to allocate HW FR page list");
3137 goto fail_mr;
3138 }
3139
3140 atomic_inc(&rdev->mr_count);
3141 return &mr->ib_mr;
3142
3143fail_mr:
3144 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3145fail:
3146 kfree(mr->pages);
3147 kfree(mr);
3148 return ERR_PTR(rc);
3149}
3150
Eddie Wai9152e0b2017-06-14 03:26:23 -07003151struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3152 struct ib_udata *udata)
3153{
3154 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3155 struct bnxt_re_dev *rdev = pd->rdev;
3156 struct bnxt_re_mw *mw;
3157 int rc;
3158
3159 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3160 if (!mw)
3161 return ERR_PTR(-ENOMEM);
3162 mw->rdev = rdev;
3163 mw->qplib_mw.pd = &pd->qplib_pd;
3164
3165 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3166 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3167 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3168 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3169 if (rc) {
3170 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3171 goto fail;
3172 }
3173 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3174
3175 atomic_inc(&rdev->mw_count);
3176 return &mw->ib_mw;
3177
3178fail:
3179 kfree(mw);
3180 return ERR_PTR(rc);
3181}
3182
3183int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3184{
3185 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3186 struct bnxt_re_dev *rdev = mw->rdev;
3187 int rc;
3188
3189 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3190 if (rc) {
3191 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3192 return rc;
3193 }
3194
3195 kfree(mw);
3196 atomic_dec(&rdev->mw_count);
3197 return rc;
3198}
3199
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003200/* Fast Memory Regions */
3201struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
3202 struct ib_fmr_attr *fmr_attr)
3203{
3204 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3205 struct bnxt_re_dev *rdev = pd->rdev;
3206 struct bnxt_re_fmr *fmr;
3207 int rc;
3208
3209 if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
3210 fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
3211 dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
3212 return ERR_PTR(-ENOMEM);
3213 }
3214 fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
3215 if (!fmr)
3216 return ERR_PTR(-ENOMEM);
3217
3218 fmr->rdev = rdev;
3219 fmr->qplib_fmr.pd = &pd->qplib_pd;
3220 fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3221
3222 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
3223 if (rc)
3224 goto fail;
3225
3226 fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
3227 fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
3228 fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
3229
3230 atomic_inc(&rdev->mr_count);
3231 return &fmr->ib_fmr;
3232fail:
3233 kfree(fmr);
3234 return ERR_PTR(rc);
3235}
3236
3237int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
3238 u64 iova)
3239{
3240 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
3241 ib_fmr);
3242 struct bnxt_re_dev *rdev = fmr->rdev;
3243 int rc;
3244
3245 fmr->qplib_fmr.va = iova;
3246 fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
3247
3248 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
3249 list_len, true);
3250 if (rc)
3251 dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
3252 fmr->ib_fmr.lkey);
3253 return rc;
3254}
3255
3256int bnxt_re_unmap_fmr(struct list_head *fmr_list)
3257{
3258 struct bnxt_re_dev *rdev;
3259 struct bnxt_re_fmr *fmr;
3260 struct ib_fmr *ib_fmr;
3261 int rc = 0;
3262
3263 /* Validate each FMRs inside the fmr_list */
3264 list_for_each_entry(ib_fmr, fmr_list, list) {
3265 fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
3266 rdev = fmr->rdev;
3267
3268 if (rdev) {
3269 rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
3270 &fmr->qplib_fmr, true);
3271 if (rc)
3272 break;
3273 }
3274 }
3275 return rc;
3276}
3277
3278int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
3279{
3280 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
3281 ib_fmr);
3282 struct bnxt_re_dev *rdev = fmr->rdev;
3283 int rc;
3284
3285 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
3286 if (rc)
3287 dev_err(rdev_to_dev(rdev), "Failed to free FMR");
3288
3289 kfree(fmr);
3290 atomic_dec(&rdev->mr_count);
3291 return rc;
3292}
3293
3294/* uverbs */
3295struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3296 u64 virt_addr, int mr_access_flags,
3297 struct ib_udata *udata)
3298{
3299 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3300 struct bnxt_re_dev *rdev = pd->rdev;
3301 struct bnxt_re_mr *mr;
3302 struct ib_umem *umem;
3303 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003304 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003305 struct scatterlist *sg;
3306 int entry;
3307
3308 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3309 if (!mr)
3310 return ERR_PTR(-ENOMEM);
3311
3312 mr->rdev = rdev;
3313 mr->qplib_mr.pd = &pd->qplib_pd;
3314 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3315 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3316
3317 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3318 mr_access_flags, 0);
3319 if (IS_ERR(umem)) {
3320 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3321 rc = -EFAULT;
3322 goto free_mr;
3323 }
3324 mr->ib_umem = umem;
3325
3326 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3327 if (rc) {
3328 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3329 goto release_umem;
3330 }
3331 /* The fixed portion of the rkey is the same as the lkey */
3332 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3333
3334 mr->qplib_mr.va = virt_addr;
3335 umem_pgs = ib_umem_page_count(umem);
3336 if (!umem_pgs) {
3337 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3338 rc = -EINVAL;
3339 goto free_mrw;
3340 }
3341 mr->qplib_mr.total_size = length;
3342
3343 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3344 if (!pbl_tbl) {
3345 rc = -EINVAL;
3346 goto free_mrw;
3347 }
3348 pbl_tbl_orig = pbl_tbl;
3349
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003350 if (umem->hugetlb) {
3351 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3352 rc = -EFAULT;
3353 goto fail;
3354 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003355
3356 if (umem->page_shift != PAGE_SHIFT) {
3357 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003358 rc = -EFAULT;
3359 goto fail;
3360 }
3361 /* Map umem buf ptrs to the PBL */
3362 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003363 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003364 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003365 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003366 }
3367 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3368 umem_pgs, false);
3369 if (rc) {
3370 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3371 goto fail;
3372 }
3373
3374 kfree(pbl_tbl_orig);
3375
3376 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3377 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3378 atomic_inc(&rdev->mr_count);
3379
3380 return &mr->ib_mr;
3381fail:
3382 kfree(pbl_tbl_orig);
3383free_mrw:
3384 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3385release_umem:
3386 ib_umem_release(umem);
3387free_mr:
3388 kfree(mr);
3389 return ERR_PTR(rc);
3390}
3391
3392struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3393 struct ib_udata *udata)
3394{
3395 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3396 struct bnxt_re_uctx_resp resp;
3397 struct bnxt_re_ucontext *uctx;
3398 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3399 int rc;
3400
3401 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3402 ibdev->uverbs_abi_ver);
3403
3404 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3405 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3406 BNXT_RE_ABI_VERSION);
3407 return ERR_PTR(-EPERM);
3408 }
3409
3410 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3411 if (!uctx)
3412 return ERR_PTR(-ENOMEM);
3413
3414 uctx->rdev = rdev;
3415
3416 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3417 if (!uctx->shpg) {
3418 rc = -ENOMEM;
3419 goto fail;
3420 }
3421 spin_lock_init(&uctx->sh_lock);
3422
3423 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3424 resp.max_qp = rdev->qplib_ctx.qpc_count;
3425 resp.pg_size = PAGE_SIZE;
3426 resp.cqe_sz = sizeof(struct cq_base);
3427 resp.max_cqd = dev_attr->max_cq_wqes;
3428 resp.rsvd = 0;
3429
3430 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3431 if (rc) {
3432 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3433 rc = -EFAULT;
3434 goto cfail;
3435 }
3436
3437 return &uctx->ib_uctx;
3438cfail:
3439 free_page((unsigned long)uctx->shpg);
3440 uctx->shpg = NULL;
3441fail:
3442 kfree(uctx);
3443 return ERR_PTR(rc);
3444}
3445
3446int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3447{
3448 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3449 struct bnxt_re_ucontext,
3450 ib_uctx);
3451 if (uctx->shpg)
3452 free_page((unsigned long)uctx->shpg);
3453 kfree(uctx);
3454 return 0;
3455}
3456
3457/* Helper function to mmap the virtual memory from user app */
3458int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3459{
3460 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3461 struct bnxt_re_ucontext,
3462 ib_uctx);
3463 struct bnxt_re_dev *rdev = uctx->rdev;
3464 u64 pfn;
3465
3466 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3467 return -EINVAL;
3468
3469 if (vma->vm_pgoff) {
3470 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3471 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3472 PAGE_SIZE, vma->vm_page_prot)) {
3473 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3474 return -EAGAIN;
3475 }
3476 } else {
3477 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3478 if (remap_pfn_range(vma, vma->vm_start,
3479 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3480 dev_err(rdev_to_dev(rdev),
3481 "Failed to map shared page");
3482 return -EAGAIN;
3483 }
3484 }
3485
3486 return 0;
3487}