blob: d2a710b0d8a27e1ef172d919e6c4fc52d04bb90b [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
64static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
65 struct bnxt_qplib_sge *sg_list, int num)
66{
67 int i, total = 0;
68
69 for (i = 0; i < num; i++) {
70 sg_list[i].addr = ib_sg_list[i].addr;
71 sg_list[i].lkey = ib_sg_list[i].lkey;
72 sg_list[i].size = ib_sg_list[i].length;
73 total += sg_list[i].size;
74 }
75 return total;
76}
77
78/* Device */
79struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
80{
81 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
82 struct net_device *netdev = NULL;
83
84 rcu_read_lock();
85 if (rdev)
86 netdev = rdev->netdev;
87 if (netdev)
88 dev_hold(netdev);
89
90 rcu_read_unlock();
91 return netdev;
92}
93
94int bnxt_re_query_device(struct ib_device *ibdev,
95 struct ib_device_attr *ib_attr,
96 struct ib_udata *udata)
97{
98 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
99 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
100
101 memset(ib_attr, 0, sizeof(*ib_attr));
102
103 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
104 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
105 (u8 *)&ib_attr->sys_image_guid);
106 ib_attr->max_mr_size = ~0ull;
107 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
108 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
109 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
110
111 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
112 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
113 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
114 ib_attr->max_qp = dev_attr->max_qp;
115 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
116 ib_attr->device_cap_flags =
117 IB_DEVICE_CURR_QP_STATE_MOD
118 | IB_DEVICE_RC_RNR_NAK_GEN
119 | IB_DEVICE_SHUTDOWN_PORT
120 | IB_DEVICE_SYS_IMAGE_GUID
121 | IB_DEVICE_LOCAL_DMA_LKEY
122 | IB_DEVICE_RESIZE_MAX_WR
123 | IB_DEVICE_PORT_ACTIVE_EVENT
124 | IB_DEVICE_N_NOTIFY_CQ
125 | IB_DEVICE_MEM_WINDOW
126 | IB_DEVICE_MEM_WINDOW_TYPE_2B
127 | IB_DEVICE_MEM_MGT_EXTENSIONS;
128 ib_attr->max_sge = dev_attr->max_qp_sges;
129 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
130 ib_attr->max_cq = dev_attr->max_cq;
131 ib_attr->max_cqe = dev_attr->max_cq_wqes;
132 ib_attr->max_mr = dev_attr->max_mr;
133 ib_attr->max_pd = dev_attr->max_pd;
134 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
135 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
136 ib_attr->atomic_cap = IB_ATOMIC_HCA;
137 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
138
139 ib_attr->max_ee_rd_atom = 0;
140 ib_attr->max_res_rd_atom = 0;
141 ib_attr->max_ee_init_rd_atom = 0;
142 ib_attr->max_ee = 0;
143 ib_attr->max_rdd = 0;
144 ib_attr->max_mw = dev_attr->max_mw;
145 ib_attr->max_raw_ipv6_qp = 0;
146 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
147 ib_attr->max_mcast_grp = 0;
148 ib_attr->max_mcast_qp_attach = 0;
149 ib_attr->max_total_mcast_qp_attach = 0;
150 ib_attr->max_ah = dev_attr->max_ah;
151
152 ib_attr->max_fmr = dev_attr->max_fmr;
153 ib_attr->max_map_per_fmr = 1; /* ? */
154
155 ib_attr->max_srq = dev_attr->max_srq;
156 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
157 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
158
159 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
160
161 ib_attr->max_pkeys = 1;
162 ib_attr->local_ca_ack_delay = 0;
163 return 0;
164}
165
166int bnxt_re_modify_device(struct ib_device *ibdev,
167 int device_modify_mask,
168 struct ib_device_modify *device_modify)
169{
170 switch (device_modify_mask) {
171 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
172 /* Modify the GUID requires the modification of the GID table */
173 /* GUID should be made as READ-ONLY */
174 break;
175 case IB_DEVICE_MODIFY_NODE_DESC:
176 /* Node Desc should be made as READ-ONLY */
177 break;
178 default:
179 break;
180 }
181 return 0;
182}
183
184static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
185{
186 struct ethtool_link_ksettings lksettings;
187 u32 espeed;
188
189 if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
190 memset(&lksettings, 0, sizeof(lksettings));
191 rtnl_lock();
192 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
193 rtnl_unlock();
194 espeed = lksettings.base.speed;
195 } else {
196 espeed = SPEED_UNKNOWN;
197 }
198 switch (espeed) {
199 case SPEED_1000:
200 *speed = IB_SPEED_SDR;
201 *width = IB_WIDTH_1X;
202 break;
203 case SPEED_10000:
204 *speed = IB_SPEED_QDR;
205 *width = IB_WIDTH_1X;
206 break;
207 case SPEED_20000:
208 *speed = IB_SPEED_DDR;
209 *width = IB_WIDTH_4X;
210 break;
211 case SPEED_25000:
212 *speed = IB_SPEED_EDR;
213 *width = IB_WIDTH_1X;
214 break;
215 case SPEED_40000:
216 *speed = IB_SPEED_QDR;
217 *width = IB_WIDTH_4X;
218 break;
219 case SPEED_50000:
220 break;
221 default:
222 *speed = IB_SPEED_SDR;
223 *width = IB_WIDTH_1X;
224 break;
225 }
226}
227
228/* Port */
229int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
230 struct ib_port_attr *port_attr)
231{
232 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
233 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
234
235 memset(port_attr, 0, sizeof(*port_attr));
236
237 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
238 port_attr->state = IB_PORT_ACTIVE;
239 port_attr->phys_state = 5;
240 } else {
241 port_attr->state = IB_PORT_DOWN;
242 port_attr->phys_state = 3;
243 }
244 port_attr->max_mtu = IB_MTU_4096;
245 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
246 port_attr->gid_tbl_len = dev_attr->max_sgid;
247 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
248 IB_PORT_DEVICE_MGMT_SUP |
249 IB_PORT_VENDOR_CLASS_SUP |
250 IB_PORT_IP_BASED_GIDS;
251
252 /* Max MSG size set to 2G for now */
253 port_attr->max_msg_sz = 0x80000000;
254 port_attr->bad_pkey_cntr = 0;
255 port_attr->qkey_viol_cntr = 0;
256 port_attr->pkey_tbl_len = dev_attr->max_pkey;
257 port_attr->lid = 0;
258 port_attr->sm_lid = 0;
259 port_attr->lmc = 0;
260 port_attr->max_vl_num = 4;
261 port_attr->sm_sl = 0;
262 port_attr->subnet_timeout = 0;
263 port_attr->init_type_reply = 0;
264 /* call the underlying netdev's ethtool hooks to query speed settings
265 * for which we acquire rtnl_lock _only_ if it's registered with
266 * IB stack to avoid race in the NETDEV_UNREG path
267 */
268 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
269 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
270 &port_attr->active_width);
271 return 0;
272}
273
274int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
275 int port_modify_mask,
276 struct ib_port_modify *port_modify)
277{
278 switch (port_modify_mask) {
279 case IB_PORT_SHUTDOWN:
280 break;
281 case IB_PORT_INIT_TYPE:
282 break;
283 case IB_PORT_RESET_QKEY_CNTR:
284 break;
285 default:
286 break;
287 }
288 return 0;
289}
290
291int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
292 struct ib_port_immutable *immutable)
293{
294 struct ib_port_attr port_attr;
295
296 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
297 return -EINVAL;
298
299 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
300 immutable->gid_tbl_len = port_attr.gid_tbl_len;
301 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
302 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
303 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
304 return 0;
305}
306
307int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
308 u16 index, u16 *pkey)
309{
310 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
311
312 /* Ignore port_num */
313
314 memset(pkey, 0, sizeof(*pkey));
315 return bnxt_qplib_get_pkey(&rdev->qplib_res,
316 &rdev->qplib_res.pkey_tbl, index, pkey);
317}
318
319int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
320 int index, union ib_gid *gid)
321{
322 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
323 int rc = 0;
324
325 /* Ignore port_num */
326 memset(gid, 0, sizeof(*gid));
327 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
328 &rdev->qplib_res.sgid_tbl, index,
329 (struct bnxt_qplib_gid *)gid);
330 return rc;
331}
332
333int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
334 unsigned int index, void **context)
335{
336 int rc = 0;
337 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
338 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
339 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
340
341 /* Delete the entry from the hardware */
342 ctx = *context;
343 if (!ctx)
344 return -EINVAL;
345
346 if (sgid_tbl && sgid_tbl->active) {
347 if (ctx->idx >= sgid_tbl->max)
348 return -EINVAL;
349 ctx->refcnt--;
350 if (!ctx->refcnt) {
351 rc = bnxt_qplib_del_sgid
352 (sgid_tbl,
353 &sgid_tbl->tbl[ctx->idx], true);
354 if (rc)
355 dev_err(rdev_to_dev(rdev),
356 "Failed to remove GID: %#x", rc);
357 ctx_tbl = sgid_tbl->ctx;
358 ctx_tbl[ctx->idx] = NULL;
359 kfree(ctx);
360 }
361 } else {
362 return -EINVAL;
363 }
364 return rc;
365}
366
367int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
368 unsigned int index, const union ib_gid *gid,
369 const struct ib_gid_attr *attr, void **context)
370{
371 int rc;
372 u32 tbl_idx = 0;
373 u16 vlan_id = 0xFFFF;
374 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
375 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
376 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
377
378 if ((attr->ndev) && is_vlan_dev(attr->ndev))
379 vlan_id = vlan_dev_vlan_id(attr->ndev);
380
381 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
382 rdev->qplib_res.netdev->dev_addr,
383 vlan_id, true, &tbl_idx);
384 if (rc == -EALREADY) {
385 ctx_tbl = sgid_tbl->ctx;
386 ctx_tbl[tbl_idx]->refcnt++;
387 *context = ctx_tbl[tbl_idx];
388 return 0;
389 }
390
391 if (rc < 0) {
392 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
393 return rc;
394 }
395
396 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
397 if (!ctx)
398 return -ENOMEM;
399 ctx_tbl = sgid_tbl->ctx;
400 ctx->idx = tbl_idx;
401 ctx->refcnt = 1;
402 ctx_tbl[tbl_idx] = ctx;
403
404 return rc;
405}
406
407enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
408 u8 port_num)
409{
410 return IB_LINK_LAYER_ETHERNET;
411}
412
413/* Protection Domains */
414int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
415{
416 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
417 struct bnxt_re_dev *rdev = pd->rdev;
418 int rc;
419
420 if (ib_pd->uobject && pd->dpi.dbr) {
421 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
422 struct bnxt_re_ucontext *ucntx;
423
424 /* Free DPI only if this is the first PD allocated by the
425 * application and mark the context dpi as NULL
426 */
427 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
428
429 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
430 &rdev->qplib_res.dpi_tbl,
431 &pd->dpi);
432 if (rc)
433 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
434 /* Don't fail, continue*/
435 ucntx->dpi = NULL;
436 }
437
438 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
439 &rdev->qplib_res.pd_tbl,
440 &pd->qplib_pd);
441 if (rc) {
442 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
443 return rc;
444 }
445
446 kfree(pd);
447 return 0;
448}
449
450struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
451 struct ib_ucontext *ucontext,
452 struct ib_udata *udata)
453{
454 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
455 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
456 struct bnxt_re_ucontext,
457 ib_uctx);
458 struct bnxt_re_pd *pd;
459 int rc;
460
461 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
462 if (!pd)
463 return ERR_PTR(-ENOMEM);
464
465 pd->rdev = rdev;
466 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
467 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
468 rc = -ENOMEM;
469 goto fail;
470 }
471
472 if (udata) {
473 struct bnxt_re_pd_resp resp;
474
475 if (!ucntx->dpi) {
476 /* Allocate DPI in alloc_pd to avoid failing of
477 * ibv_devinfo and family of application when DPIs
478 * are depleted.
479 */
480 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
481 &pd->dpi, ucntx)) {
482 rc = -ENOMEM;
483 goto dbfail;
484 }
485 ucntx->dpi = &pd->dpi;
486 }
487
488 resp.pdid = pd->qplib_pd.id;
489 /* Still allow mapping this DBR to the new user PD. */
490 resp.dpi = ucntx->dpi->dpi;
491 resp.dbr = (u64)ucntx->dpi->umdbr;
492
493 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
494 if (rc) {
495 dev_err(rdev_to_dev(rdev),
496 "Failed to copy user response\n");
497 goto dbfail;
498 }
499 }
500
501 return &pd->ib_pd;
502dbfail:
503 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
504 &pd->qplib_pd);
505fail:
506 kfree(pd);
507 return ERR_PTR(rc);
508}
509
510/* Address Handles */
511int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
512{
513 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
514 struct bnxt_re_dev *rdev = ah->rdev;
515 int rc;
516
517 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
518 if (rc) {
519 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
520 return rc;
521 }
522 kfree(ah);
523 return 0;
524}
525
526struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400527 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800528 struct ib_udata *udata)
529{
530 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
531 struct bnxt_re_dev *rdev = pd->rdev;
532 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400533 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800534 int rc;
535 u16 vlan_tag;
536 u8 nw_type;
537
538 struct ib_gid_attr sgid_attr;
539
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400540 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800541 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
542 return ERR_PTR(-EINVAL);
543 }
544 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
545 if (!ah)
546 return ERR_PTR(-ENOMEM);
547
548 ah->rdev = rdev;
549 ah->qplib_ah.pd = &pd->qplib_pd;
550
551 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400552 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800553 sizeof(union ib_gid));
554 /*
555 * If RoCE V2 is enabled, stack will have two entries for
556 * each GID entry. Avoiding this duplicte entry in HW. Dividing
557 * the GID index by 2 for RoCE V2
558 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400559 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
560 ah->qplib_ah.host_sgid_index = grh->sgid_index;
561 ah->qplib_ah.traffic_class = grh->traffic_class;
562 ah->qplib_ah.flow_label = grh->flow_label;
563 ah->qplib_ah.hop_limit = grh->hop_limit;
564 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800565 if (ib_pd->uobject &&
566 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400567 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800568 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400569 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800570 union ib_gid sgid;
571
572 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400573 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800574 &sgid_attr);
575 if (rc) {
576 dev_err(rdev_to_dev(rdev),
577 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400578 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800579 goto fail;
580 }
581 if (sgid_attr.ndev) {
582 if (is_vlan_dev(sgid_attr.ndev))
583 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
584 dev_put(sgid_attr.ndev);
585 }
586 /* Get network header type for this GID */
587 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
588 switch (nw_type) {
589 case RDMA_NETWORK_IPV4:
590 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
591 break;
592 case RDMA_NETWORK_IPV6:
593 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
594 break;
595 default:
596 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
597 break;
598 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400599 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800600 ah_attr->dmac, &vlan_tag,
601 &sgid_attr.ndev->ifindex,
602 NULL);
603 if (rc) {
604 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
605 goto fail;
606 }
607 }
608
609 memcpy(ah->qplib_ah.dmac, ah_attr->dmac, ETH_ALEN);
610 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
611 if (rc) {
612 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
613 goto fail;
614 }
615
616 /* Write AVID to shared page. */
617 if (ib_pd->uobject) {
618 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
619 struct bnxt_re_ucontext *uctx;
620 unsigned long flag;
621 u32 *wrptr;
622
623 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
624 spin_lock_irqsave(&uctx->sh_lock, flag);
625 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
626 *wrptr = ah->qplib_ah.id;
627 wmb(); /* make sure cache is updated. */
628 spin_unlock_irqrestore(&uctx->sh_lock, flag);
629 }
630
631 return &ah->ib_ah;
632
633fail:
634 kfree(ah);
635 return ERR_PTR(rc);
636}
637
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400638int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800639{
640 return 0;
641}
642
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400643int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800644{
645 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
646
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400647 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800648 memcpy(ah_attr->dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400649 rdma_ah_set_grh(ah_attr, NULL, 0,
650 ah->qplib_ah.host_sgid_index,
651 0, ah->qplib_ah.traffic_class);
652 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
653 rdma_ah_set_port_num(ah_attr, 1);
654 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800655 return 0;
656}
657
658/* Queue Pairs */
659int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
660{
661 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
662 struct bnxt_re_dev *rdev = qp->rdev;
663 int rc;
664
665 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
666 if (rc) {
667 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
668 return rc;
669 }
670 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
671 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
672 &rdev->sqp_ah->qplib_ah);
673 if (rc) {
674 dev_err(rdev_to_dev(rdev),
675 "Failed to destroy HW AH for shadow QP");
676 return rc;
677 }
678
679 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
680 &rdev->qp1_sqp->qplib_qp);
681 if (rc) {
682 dev_err(rdev_to_dev(rdev),
683 "Failed to destroy Shadow QP");
684 return rc;
685 }
686 mutex_lock(&rdev->qp_lock);
687 list_del(&rdev->qp1_sqp->list);
688 atomic_dec(&rdev->qp_count);
689 mutex_unlock(&rdev->qp_lock);
690
691 kfree(rdev->sqp_ah);
692 kfree(rdev->qp1_sqp);
693 }
694
Doug Ledford374cb862017-04-25 14:00:59 -0400695 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800696 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400697 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800698 ib_umem_release(qp->sumem);
699
700 mutex_lock(&rdev->qp_lock);
701 list_del(&qp->list);
702 atomic_dec(&rdev->qp_count);
703 mutex_unlock(&rdev->qp_lock);
704 kfree(qp);
705 return 0;
706}
707
708static u8 __from_ib_qp_type(enum ib_qp_type type)
709{
710 switch (type) {
711 case IB_QPT_GSI:
712 return CMDQ_CREATE_QP1_TYPE_GSI;
713 case IB_QPT_RC:
714 return CMDQ_CREATE_QP_TYPE_RC;
715 case IB_QPT_UD:
716 return CMDQ_CREATE_QP_TYPE_UD;
717 default:
718 return IB_QPT_MAX;
719 }
720}
721
722static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
723 struct bnxt_re_qp *qp, struct ib_udata *udata)
724{
725 struct bnxt_re_qp_req ureq;
726 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
727 struct ib_umem *umem;
728 int bytes = 0;
729 struct ib_ucontext *context = pd->ib_pd.uobject->context;
730 struct bnxt_re_ucontext *cntx = container_of(context,
731 struct bnxt_re_ucontext,
732 ib_uctx);
733 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
734 return -EFAULT;
735
736 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
737 /* Consider mapping PSN search memory only for RC QPs. */
738 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
739 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
740 bytes = PAGE_ALIGN(bytes);
741 umem = ib_umem_get(context, ureq.qpsva, bytes,
742 IB_ACCESS_LOCAL_WRITE, 1);
743 if (IS_ERR(umem))
744 return PTR_ERR(umem);
745
746 qp->sumem = umem;
747 qplib_qp->sq.sglist = umem->sg_head.sgl;
748 qplib_qp->sq.nmap = umem->nmap;
749 qplib_qp->qp_handle = ureq.qp_handle;
750
751 if (!qp->qplib_qp.srq) {
752 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
753 bytes = PAGE_ALIGN(bytes);
754 umem = ib_umem_get(context, ureq.qprva, bytes,
755 IB_ACCESS_LOCAL_WRITE, 1);
756 if (IS_ERR(umem))
757 goto rqfail;
758 qp->rumem = umem;
759 qplib_qp->rq.sglist = umem->sg_head.sgl;
760 qplib_qp->rq.nmap = umem->nmap;
761 }
762
763 qplib_qp->dpi = cntx->dpi;
764 return 0;
765rqfail:
766 ib_umem_release(qp->sumem);
767 qp->sumem = NULL;
768 qplib_qp->sq.sglist = NULL;
769 qplib_qp->sq.nmap = 0;
770
771 return PTR_ERR(umem);
772}
773
774static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
775 (struct bnxt_re_pd *pd,
776 struct bnxt_qplib_res *qp1_res,
777 struct bnxt_qplib_qp *qp1_qp)
778{
779 struct bnxt_re_dev *rdev = pd->rdev;
780 struct bnxt_re_ah *ah;
781 union ib_gid sgid;
782 int rc;
783
784 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
785 if (!ah)
786 return NULL;
787
788 memset(ah, 0, sizeof(*ah));
789 ah->rdev = rdev;
790 ah->qplib_ah.pd = &pd->qplib_pd;
791
792 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
793 if (rc)
794 goto fail;
795
796 /* supply the dgid data same as sgid */
797 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
798 sizeof(union ib_gid));
799 ah->qplib_ah.sgid_index = 0;
800
801 ah->qplib_ah.traffic_class = 0;
802 ah->qplib_ah.flow_label = 0;
803 ah->qplib_ah.hop_limit = 1;
804 ah->qplib_ah.sl = 0;
805 /* Have DMAC same as SMAC */
806 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
807
808 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
809 if (rc) {
810 dev_err(rdev_to_dev(rdev),
811 "Failed to allocate HW AH for Shadow QP");
812 goto fail;
813 }
814
815 return ah;
816
817fail:
818 kfree(ah);
819 return NULL;
820}
821
822static struct bnxt_re_qp *bnxt_re_create_shadow_qp
823 (struct bnxt_re_pd *pd,
824 struct bnxt_qplib_res *qp1_res,
825 struct bnxt_qplib_qp *qp1_qp)
826{
827 struct bnxt_re_dev *rdev = pd->rdev;
828 struct bnxt_re_qp *qp;
829 int rc;
830
831 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
832 if (!qp)
833 return NULL;
834
835 memset(qp, 0, sizeof(*qp));
836 qp->rdev = rdev;
837
838 /* Initialize the shadow QP structure from the QP1 values */
839 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
840
841 qp->qplib_qp.pd = &pd->qplib_pd;
842 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
843 qp->qplib_qp.type = IB_QPT_UD;
844
845 qp->qplib_qp.max_inline_data = 0;
846 qp->qplib_qp.sig_type = true;
847
848 /* Shadow QP SQ depth should be same as QP1 RQ depth */
849 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
850 qp->qplib_qp.sq.max_sge = 2;
851
852 qp->qplib_qp.scq = qp1_qp->scq;
853 qp->qplib_qp.rcq = qp1_qp->rcq;
854
855 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
856 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
857
858 qp->qplib_qp.mtu = qp1_qp->mtu;
859
860 qp->qplib_qp.sq_hdr_buf_size = 0;
861 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
862 qp->qplib_qp.dpi = &rdev->dpi_privileged;
863
864 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
865 if (rc)
866 goto fail;
867
868 rdev->sqp_id = qp->qplib_qp.id;
869
870 spin_lock_init(&qp->sq_lock);
871 INIT_LIST_HEAD(&qp->list);
872 mutex_lock(&rdev->qp_lock);
873 list_add_tail(&qp->list, &rdev->qp_list);
874 atomic_inc(&rdev->qp_count);
875 mutex_unlock(&rdev->qp_lock);
876 return qp;
877fail:
878 kfree(qp);
879 return NULL;
880}
881
882struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
883 struct ib_qp_init_attr *qp_init_attr,
884 struct ib_udata *udata)
885{
886 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
887 struct bnxt_re_dev *rdev = pd->rdev;
888 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
889 struct bnxt_re_qp *qp;
890 struct bnxt_re_cq *cq;
891 int rc, entries;
892
893 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
894 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
895 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
896 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
897 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
898 return ERR_PTR(-EINVAL);
899
900 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
901 if (!qp)
902 return ERR_PTR(-ENOMEM);
903
904 qp->rdev = rdev;
905 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
906 qp->qplib_qp.pd = &pd->qplib_pd;
907 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
908 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
909 if (qp->qplib_qp.type == IB_QPT_MAX) {
910 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
911 qp->qplib_qp.type);
912 rc = -EINVAL;
913 goto fail;
914 }
915 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
916 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
917 IB_SIGNAL_ALL_WR) ? true : false);
918
919 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
920 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
921 dev_attr->max_qp_wqes + 1);
922
923 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
924 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
925 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
926
927 if (qp_init_attr->send_cq) {
928 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
929 ib_cq);
930 if (!cq) {
931 dev_err(rdev_to_dev(rdev), "Send CQ not found");
932 rc = -EINVAL;
933 goto fail;
934 }
935 qp->qplib_qp.scq = &cq->qplib_cq;
936 }
937
938 if (qp_init_attr->recv_cq) {
939 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
940 ib_cq);
941 if (!cq) {
942 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
943 rc = -EINVAL;
944 goto fail;
945 }
946 qp->qplib_qp.rcq = &cq->qplib_cq;
947 }
948
949 if (qp_init_attr->srq) {
950 dev_err(rdev_to_dev(rdev), "SRQ not supported");
951 rc = -ENOTSUPP;
952 goto fail;
953 } else {
954 /* Allocate 1 more than what's provided so posting max doesn't
955 * mean empty
956 */
957 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
958 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
959 dev_attr->max_qp_wqes + 1);
960
961 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
962 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
963 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
964 }
965
966 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
967
968 if (qp_init_attr->qp_type == IB_QPT_GSI) {
969 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
970 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
971 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
972 qp->qplib_qp.sq.max_sge++;
973 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
974 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
975
976 qp->qplib_qp.rq_hdr_buf_size =
977 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
978
979 qp->qplib_qp.sq_hdr_buf_size =
980 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
981 qp->qplib_qp.dpi = &rdev->dpi_privileged;
982 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
983 if (rc) {
984 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
985 goto fail;
986 }
987 /* Create a shadow QP to handle the QP1 traffic */
988 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
989 &qp->qplib_qp);
990 if (!rdev->qp1_sqp) {
991 rc = -EINVAL;
992 dev_err(rdev_to_dev(rdev),
993 "Failed to create Shadow QP for QP1");
994 goto qp_destroy;
995 }
996 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
997 &qp->qplib_qp);
998 if (!rdev->sqp_ah) {
999 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1000 &rdev->qp1_sqp->qplib_qp);
1001 rc = -EINVAL;
1002 dev_err(rdev_to_dev(rdev),
1003 "Failed to create AH entry for ShadowQP");
1004 goto qp_destroy;
1005 }
1006
1007 } else {
1008 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1009 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1010 if (udata) {
1011 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1012 if (rc)
1013 goto fail;
1014 } else {
1015 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1016 }
1017
1018 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1019 if (rc) {
1020 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1021 goto fail;
1022 }
1023 }
1024
1025 qp->ib_qp.qp_num = qp->qplib_qp.id;
1026 spin_lock_init(&qp->sq_lock);
1027
1028 if (udata) {
1029 struct bnxt_re_qp_resp resp;
1030
1031 resp.qpid = qp->ib_qp.qp_num;
1032 resp.rsvd = 0;
1033 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1034 if (rc) {
1035 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1036 goto qp_destroy;
1037 }
1038 }
1039 INIT_LIST_HEAD(&qp->list);
1040 mutex_lock(&rdev->qp_lock);
1041 list_add_tail(&qp->list, &rdev->qp_list);
1042 atomic_inc(&rdev->qp_count);
1043 mutex_unlock(&rdev->qp_lock);
1044
1045 return &qp->ib_qp;
1046qp_destroy:
1047 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1048fail:
1049 kfree(qp);
1050 return ERR_PTR(rc);
1051}
1052
1053static u8 __from_ib_qp_state(enum ib_qp_state state)
1054{
1055 switch (state) {
1056 case IB_QPS_RESET:
1057 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1058 case IB_QPS_INIT:
1059 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1060 case IB_QPS_RTR:
1061 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1062 case IB_QPS_RTS:
1063 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1064 case IB_QPS_SQD:
1065 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1066 case IB_QPS_SQE:
1067 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1068 case IB_QPS_ERR:
1069 default:
1070 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1071 }
1072}
1073
1074static enum ib_qp_state __to_ib_qp_state(u8 state)
1075{
1076 switch (state) {
1077 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1078 return IB_QPS_RESET;
1079 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1080 return IB_QPS_INIT;
1081 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1082 return IB_QPS_RTR;
1083 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1084 return IB_QPS_RTS;
1085 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1086 return IB_QPS_SQD;
1087 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1088 return IB_QPS_SQE;
1089 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1090 default:
1091 return IB_QPS_ERR;
1092 }
1093}
1094
1095static u32 __from_ib_mtu(enum ib_mtu mtu)
1096{
1097 switch (mtu) {
1098 case IB_MTU_256:
1099 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1100 case IB_MTU_512:
1101 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1102 case IB_MTU_1024:
1103 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1104 case IB_MTU_2048:
1105 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1106 case IB_MTU_4096:
1107 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1108 default:
1109 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1110 }
1111}
1112
1113static enum ib_mtu __to_ib_mtu(u32 mtu)
1114{
1115 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1116 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1117 return IB_MTU_256;
1118 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1119 return IB_MTU_512;
1120 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1121 return IB_MTU_1024;
1122 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1123 return IB_MTU_2048;
1124 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1125 return IB_MTU_4096;
1126 default:
1127 return IB_MTU_2048;
1128 }
1129}
1130
1131static int __from_ib_access_flags(int iflags)
1132{
1133 int qflags = 0;
1134
1135 if (iflags & IB_ACCESS_LOCAL_WRITE)
1136 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1137 if (iflags & IB_ACCESS_REMOTE_READ)
1138 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
1139 if (iflags & IB_ACCESS_REMOTE_WRITE)
1140 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
1141 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
1142 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
1143 if (iflags & IB_ACCESS_MW_BIND)
1144 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
1145 if (iflags & IB_ZERO_BASED)
1146 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
1147 if (iflags & IB_ACCESS_ON_DEMAND)
1148 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
1149 return qflags;
1150};
1151
1152static enum ib_access_flags __to_ib_access_flags(int qflags)
1153{
1154 enum ib_access_flags iflags = 0;
1155
1156 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
1157 iflags |= IB_ACCESS_LOCAL_WRITE;
1158 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
1159 iflags |= IB_ACCESS_REMOTE_WRITE;
1160 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
1161 iflags |= IB_ACCESS_REMOTE_READ;
1162 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
1163 iflags |= IB_ACCESS_REMOTE_ATOMIC;
1164 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
1165 iflags |= IB_ACCESS_MW_BIND;
1166 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
1167 iflags |= IB_ZERO_BASED;
1168 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
1169 iflags |= IB_ACCESS_ON_DEMAND;
1170 return iflags;
1171};
1172
1173static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1174 struct bnxt_re_qp *qp1_qp,
1175 int qp_attr_mask)
1176{
1177 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1178 int rc = 0;
1179
1180 if (qp_attr_mask & IB_QP_STATE) {
1181 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1182 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1183 }
1184 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1185 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1186 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1187 }
1188
1189 if (qp_attr_mask & IB_QP_QKEY) {
1190 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1191 /* Using a Random QKEY */
1192 qp->qplib_qp.qkey = 0x81818181;
1193 }
1194 if (qp_attr_mask & IB_QP_SQ_PSN) {
1195 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1196 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1197 }
1198
1199 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1200 if (rc)
1201 dev_err(rdev_to_dev(rdev),
1202 "Failed to modify Shadow QP for QP1");
1203 return rc;
1204}
1205
1206int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1207 int qp_attr_mask, struct ib_udata *udata)
1208{
1209 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1210 struct bnxt_re_dev *rdev = qp->rdev;
1211 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1212 enum ib_qp_state curr_qp_state, new_qp_state;
1213 int rc, entries;
1214 int status;
1215 union ib_gid sgid;
1216 struct ib_gid_attr sgid_attr;
1217 u8 nw_type;
1218
1219 qp->qplib_qp.modify_flags = 0;
1220 if (qp_attr_mask & IB_QP_STATE) {
1221 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1222 new_qp_state = qp_attr->qp_state;
1223 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1224 ib_qp->qp_type, qp_attr_mask,
1225 IB_LINK_LAYER_ETHERNET)) {
1226 dev_err(rdev_to_dev(rdev),
1227 "Invalid attribute mask: %#x specified ",
1228 qp_attr_mask);
1229 dev_err(rdev_to_dev(rdev),
1230 "for qpn: %#x type: %#x",
1231 ib_qp->qp_num, ib_qp->qp_type);
1232 dev_err(rdev_to_dev(rdev),
1233 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1234 curr_qp_state, new_qp_state);
1235 return -EINVAL;
1236 }
1237 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1238 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1239 }
1240 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1241 qp->qplib_qp.modify_flags |=
1242 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1243 qp->qplib_qp.en_sqd_async_notify = true;
1244 }
1245 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1246 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1247 qp->qplib_qp.access =
1248 __from_ib_access_flags(qp_attr->qp_access_flags);
1249 /* LOCAL_WRITE access must be set to allow RC receive */
1250 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1251 }
1252 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1253 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1254 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1255 }
1256 if (qp_attr_mask & IB_QP_QKEY) {
1257 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1258 qp->qplib_qp.qkey = qp_attr->qkey;
1259 }
1260 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001261 const struct ib_global_route *grh =
1262 rdma_ah_read_grh(&qp_attr->ah_attr);
1263
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001264 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1265 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1266 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1267 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1268 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1269 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1270 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001271 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001272 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001273 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001274 /* If RoCE V2 is enabled, stack will have two entries for
1275 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1276 * the GID index by 2 for RoCE V2
1277 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001278 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1279 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1280 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1281 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1282 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001283 ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.dmac);
1284
1285 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001286 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001287 &sgid, &sgid_attr);
1288 if (!status && sgid_attr.ndev) {
1289 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1290 ETH_ALEN);
1291 dev_put(sgid_attr.ndev);
1292 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1293 &sgid);
1294 switch (nw_type) {
1295 case RDMA_NETWORK_IPV4:
1296 qp->qplib_qp.nw_type =
1297 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1298 break;
1299 case RDMA_NETWORK_IPV6:
1300 qp->qplib_qp.nw_type =
1301 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1302 break;
1303 default:
1304 qp->qplib_qp.nw_type =
1305 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1306 break;
1307 }
1308 }
1309 }
1310
1311 if (qp_attr_mask & IB_QP_PATH_MTU) {
1312 qp->qplib_qp.modify_flags |=
1313 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1314 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1315 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1316 qp->qplib_qp.modify_flags |=
1317 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1318 qp->qplib_qp.path_mtu =
1319 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1320 }
1321
1322 if (qp_attr_mask & IB_QP_TIMEOUT) {
1323 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1324 qp->qplib_qp.timeout = qp_attr->timeout;
1325 }
1326 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1327 qp->qplib_qp.modify_flags |=
1328 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1329 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1330 }
1331 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1332 qp->qplib_qp.modify_flags |=
1333 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1334 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1335 }
1336 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1337 qp->qplib_qp.modify_flags |=
1338 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1339 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1340 }
1341 if (qp_attr_mask & IB_QP_RQ_PSN) {
1342 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1343 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1344 }
1345 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1346 qp->qplib_qp.modify_flags |=
1347 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1348 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
1349 }
1350 if (qp_attr_mask & IB_QP_SQ_PSN) {
1351 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1352 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1353 }
1354 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1355 qp->qplib_qp.modify_flags |=
1356 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1357 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1358 }
1359 if (qp_attr_mask & IB_QP_CAP) {
1360 qp->qplib_qp.modify_flags |=
1361 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1362 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1363 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1364 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1365 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1366 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1367 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1368 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1369 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1370 (qp_attr->cap.max_inline_data >=
1371 dev_attr->max_inline_data)) {
1372 dev_err(rdev_to_dev(rdev),
1373 "Create QP failed - max exceeded");
1374 return -EINVAL;
1375 }
1376 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1377 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1378 dev_attr->max_qp_wqes + 1);
1379 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1380 if (qp->qplib_qp.rq.max_wqe) {
1381 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1382 qp->qplib_qp.rq.max_wqe =
1383 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1384 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1385 } else {
1386 /* SRQ was used prior, just ignore the RQ caps */
1387 }
1388 }
1389 if (qp_attr_mask & IB_QP_DEST_QPN) {
1390 qp->qplib_qp.modify_flags |=
1391 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1392 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1393 }
1394 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1395 if (rc) {
1396 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1397 return rc;
1398 }
1399 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1400 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1401 return rc;
1402}
1403
1404int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1405 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1406{
1407 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1408 struct bnxt_re_dev *rdev = qp->rdev;
1409 struct bnxt_qplib_qp qplib_qp;
1410 int rc;
1411
1412 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1413 qplib_qp.id = qp->qplib_qp.id;
1414 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1415
1416 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1417 if (rc) {
1418 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1419 return rc;
1420 }
1421 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1422 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1423 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1424 qp_attr->pkey_index = qplib_qp.pkey_index;
1425 qp_attr->qkey = qplib_qp.qkey;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001426 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1427 qplib_qp.ah.host_sgid_index,
1428 qplib_qp.ah.hop_limit,
1429 qplib_qp.ah.traffic_class);
1430 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1431 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001432 ether_addr_copy(qp_attr->ah_attr.dmac, qplib_qp.ah.dmac);
1433 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1434 qp_attr->timeout = qplib_qp.timeout;
1435 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1436 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1437 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1438 qp_attr->rq_psn = qplib_qp.rq.psn;
1439 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1440 qp_attr->sq_psn = qplib_qp.sq.psn;
1441 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1442 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1443 IB_SIGNAL_REQ_WR;
1444 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1445
1446 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1447 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1448 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1449 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1450 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1451 qp_init_attr->cap = qp_attr->cap;
1452
1453 return 0;
1454}
1455
1456/* Routine for sending QP1 packets for RoCE V1 an V2
1457 */
1458static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1459 struct ib_send_wr *wr,
1460 struct bnxt_qplib_swqe *wqe,
1461 int payload_size)
1462{
1463 struct ib_device *ibdev = &qp->rdev->ibdev;
1464 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1465 ib_ah);
1466 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1467 struct bnxt_qplib_sge sge;
1468 union ib_gid sgid;
1469 u8 nw_type;
1470 u16 ether_type;
1471 struct ib_gid_attr sgid_attr;
1472 union ib_gid dgid;
1473 bool is_eth = false;
1474 bool is_vlan = false;
1475 bool is_grh = false;
1476 bool is_udp = false;
1477 u8 ip_version = 0;
1478 u16 vlan_id = 0xFFFF;
1479 void *buf;
1480 int i, rc = 0, size;
1481
1482 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1483
1484 rc = ib_get_cached_gid(ibdev, 1,
1485 qplib_ah->host_sgid_index, &sgid,
1486 &sgid_attr);
1487 if (rc) {
1488 dev_err(rdev_to_dev(qp->rdev),
1489 "Failed to query gid at index %d",
1490 qplib_ah->host_sgid_index);
1491 return rc;
1492 }
1493 if (sgid_attr.ndev) {
1494 if (is_vlan_dev(sgid_attr.ndev))
1495 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1496 dev_put(sgid_attr.ndev);
1497 }
1498 /* Get network header type for this GID */
1499 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1500 switch (nw_type) {
1501 case RDMA_NETWORK_IPV4:
1502 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1503 break;
1504 case RDMA_NETWORK_IPV6:
1505 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1506 break;
1507 default:
1508 nw_type = BNXT_RE_ROCE_V1_PACKET;
1509 break;
1510 }
1511 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1512 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1513 if (is_udp) {
1514 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1515 ip_version = 4;
1516 ether_type = ETH_P_IP;
1517 } else {
1518 ip_version = 6;
1519 ether_type = ETH_P_IPV6;
1520 }
1521 is_grh = false;
1522 } else {
1523 ether_type = ETH_P_IBOE;
1524 is_grh = true;
1525 }
1526
1527 is_eth = true;
1528 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1529
1530 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1531 ip_version, is_udp, 0, &qp->qp1_hdr);
1532
1533 /* ETH */
1534 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1535 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1536
1537 /* For vlan, check the sgid for vlan existence */
1538
1539 if (!is_vlan) {
1540 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1541 } else {
1542 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1543 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1544 }
1545
1546 if (is_grh || (ip_version == 6)) {
1547 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1548 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1549 sizeof(sgid));
1550 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1551 }
1552
1553 if (ip_version == 4) {
1554 qp->qp1_hdr.ip4.tos = 0;
1555 qp->qp1_hdr.ip4.id = 0;
1556 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1557 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1558
1559 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1560 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1561 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1562 }
1563
1564 if (is_udp) {
1565 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1566 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1567 qp->qp1_hdr.udp.csum = 0;
1568 }
1569
1570 /* BTH */
1571 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1572 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1573 qp->qp1_hdr.immediate_present = 1;
1574 } else {
1575 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1576 }
1577 if (wr->send_flags & IB_SEND_SOLICITED)
1578 qp->qp1_hdr.bth.solicited_event = 1;
1579 /* pad_count */
1580 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1581
1582 /* P_key for QP1 is for all members */
1583 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1584 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1585 qp->qp1_hdr.bth.ack_req = 0;
1586 qp->send_psn++;
1587 qp->send_psn &= BTH_PSN_MASK;
1588 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1589 /* DETH */
1590 /* Use the priviledged Q_Key for QP1 */
1591 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1592 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1593
1594 /* Pack the QP1 to the transmit buffer */
1595 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1596 if (buf) {
1597 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1598 for (i = wqe->num_sge; i; i--) {
1599 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1600 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1601 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1602 }
1603
1604 /*
1605 * Max Header buf size for IPV6 RoCE V2 is 86,
1606 * which is same as the QP1 SQ header buffer.
1607 * Header buf size for IPV4 RoCE V2 can be 66.
1608 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1609 * Subtract 20 bytes from QP1 SQ header buf size
1610 */
1611 if (is_udp && ip_version == 4)
1612 sge.size -= 20;
1613 /*
1614 * Max Header buf size for RoCE V1 is 78.
1615 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1616 * Subtract 8 bytes from QP1 SQ header buf size
1617 */
1618 if (!is_udp)
1619 sge.size -= 8;
1620
1621 /* Subtract 4 bytes for non vlan packets */
1622 if (!is_vlan)
1623 sge.size -= 4;
1624
1625 wqe->sg_list[0].addr = sge.addr;
1626 wqe->sg_list[0].lkey = sge.lkey;
1627 wqe->sg_list[0].size = sge.size;
1628 wqe->num_sge++;
1629
1630 } else {
1631 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1632 rc = -ENOMEM;
1633 }
1634 return rc;
1635}
1636
1637/* For the MAD layer, it only provides the recv SGE the size of
1638 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1639 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1640 * receive packet (334 bytes) with no VLAN and then copy the GRH
1641 * and the MAD datagram out to the provided SGE.
1642 */
1643static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1644 struct ib_recv_wr *wr,
1645 struct bnxt_qplib_swqe *wqe,
1646 int payload_size)
1647{
1648 struct bnxt_qplib_sge ref, sge;
1649 u32 rq_prod_index;
1650 struct bnxt_re_sqp_entries *sqp_entry;
1651
1652 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1653
1654 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1655 return -ENOMEM;
1656
1657 /* Create 1 SGE to receive the entire
1658 * ethernet packet
1659 */
1660 /* Save the reference from ULP */
1661 ref.addr = wqe->sg_list[0].addr;
1662 ref.lkey = wqe->sg_list[0].lkey;
1663 ref.size = wqe->sg_list[0].size;
1664
1665 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1666
1667 /* SGE 1 */
1668 wqe->sg_list[0].addr = sge.addr;
1669 wqe->sg_list[0].lkey = sge.lkey;
1670 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1671 sge.size -= wqe->sg_list[0].size;
1672
1673 sqp_entry->sge.addr = ref.addr;
1674 sqp_entry->sge.lkey = ref.lkey;
1675 sqp_entry->sge.size = ref.size;
1676 /* Store the wrid for reporting completion */
1677 sqp_entry->wrid = wqe->wr_id;
1678 /* change the wqe->wrid to table index */
1679 wqe->wr_id = rq_prod_index;
1680 return 0;
1681}
1682
1683static int is_ud_qp(struct bnxt_re_qp *qp)
1684{
1685 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1686}
1687
1688static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1689 struct ib_send_wr *wr,
1690 struct bnxt_qplib_swqe *wqe)
1691{
1692 struct bnxt_re_ah *ah = NULL;
1693
1694 if (is_ud_qp(qp)) {
1695 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1696 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1697 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1698 wqe->send.avid = ah->qplib_ah.id;
1699 }
1700 switch (wr->opcode) {
1701 case IB_WR_SEND:
1702 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1703 break;
1704 case IB_WR_SEND_WITH_IMM:
1705 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1706 wqe->send.imm_data = wr->ex.imm_data;
1707 break;
1708 case IB_WR_SEND_WITH_INV:
1709 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1710 wqe->send.inv_key = wr->ex.invalidate_rkey;
1711 break;
1712 default:
1713 return -EINVAL;
1714 }
1715 if (wr->send_flags & IB_SEND_SIGNALED)
1716 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1717 if (wr->send_flags & IB_SEND_FENCE)
1718 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1719 if (wr->send_flags & IB_SEND_SOLICITED)
1720 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1721 if (wr->send_flags & IB_SEND_INLINE)
1722 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1723
1724 return 0;
1725}
1726
1727static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1728 struct bnxt_qplib_swqe *wqe)
1729{
1730 switch (wr->opcode) {
1731 case IB_WR_RDMA_WRITE:
1732 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1733 break;
1734 case IB_WR_RDMA_WRITE_WITH_IMM:
1735 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1736 wqe->rdma.imm_data = wr->ex.imm_data;
1737 break;
1738 case IB_WR_RDMA_READ:
1739 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1740 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1741 break;
1742 default:
1743 return -EINVAL;
1744 }
1745 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1746 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1747 if (wr->send_flags & IB_SEND_SIGNALED)
1748 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1749 if (wr->send_flags & IB_SEND_FENCE)
1750 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1751 if (wr->send_flags & IB_SEND_SOLICITED)
1752 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1753 if (wr->send_flags & IB_SEND_INLINE)
1754 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1755
1756 return 0;
1757}
1758
1759static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1760 struct bnxt_qplib_swqe *wqe)
1761{
1762 switch (wr->opcode) {
1763 case IB_WR_ATOMIC_CMP_AND_SWP:
1764 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1765 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1766 break;
1767 case IB_WR_ATOMIC_FETCH_AND_ADD:
1768 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1769 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1770 break;
1771 default:
1772 return -EINVAL;
1773 }
1774 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1775 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1776 if (wr->send_flags & IB_SEND_SIGNALED)
1777 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1778 if (wr->send_flags & IB_SEND_FENCE)
1779 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1780 if (wr->send_flags & IB_SEND_SOLICITED)
1781 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1782 return 0;
1783}
1784
1785static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1786 struct bnxt_qplib_swqe *wqe)
1787{
1788 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1789 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1790
1791 if (wr->send_flags & IB_SEND_SIGNALED)
1792 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1793 if (wr->send_flags & IB_SEND_FENCE)
1794 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1795 if (wr->send_flags & IB_SEND_SOLICITED)
1796 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1797
1798 return 0;
1799}
1800
1801static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1802 struct bnxt_qplib_swqe *wqe)
1803{
1804 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1805 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1806 int access = wr->access;
1807
1808 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1809 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1810 wqe->frmr.page_list = mr->pages;
1811 wqe->frmr.page_list_len = mr->npages;
1812 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1813 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1814
1815 if (wr->wr.send_flags & IB_SEND_FENCE)
1816 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1817 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1818 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1819
1820 if (access & IB_ACCESS_LOCAL_WRITE)
1821 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1822 if (access & IB_ACCESS_REMOTE_READ)
1823 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1824 if (access & IB_ACCESS_REMOTE_WRITE)
1825 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1826 if (access & IB_ACCESS_REMOTE_ATOMIC)
1827 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1828 if (access & IB_ACCESS_MW_BIND)
1829 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1830
1831 wqe->frmr.l_key = wr->key;
1832 wqe->frmr.length = wr->mr->length;
1833 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1834 wqe->frmr.va = wr->mr->iova;
1835 return 0;
1836}
1837
1838static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1839 struct ib_send_wr *wr,
1840 struct bnxt_qplib_swqe *wqe)
1841{
1842 /* Copy the inline data to the data field */
1843 u8 *in_data;
1844 u32 i, sge_len;
1845 void *sge_addr;
1846
1847 in_data = wqe->inline_data;
1848 for (i = 0; i < wr->num_sge; i++) {
1849 sge_addr = (void *)(unsigned long)
1850 wr->sg_list[i].addr;
1851 sge_len = wr->sg_list[i].length;
1852
1853 if ((sge_len + wqe->inline_len) >
1854 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1855 dev_err(rdev_to_dev(rdev),
1856 "Inline data size requested > supported value");
1857 return -EINVAL;
1858 }
1859 sge_len = wr->sg_list[i].length;
1860
1861 memcpy(in_data, sge_addr, sge_len);
1862 in_data += wr->sg_list[i].length;
1863 wqe->inline_len += wr->sg_list[i].length;
1864 }
1865 return wqe->inline_len;
1866}
1867
1868static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
1869 struct ib_send_wr *wr,
1870 struct bnxt_qplib_swqe *wqe)
1871{
1872 int payload_sz = 0;
1873
1874 if (wr->send_flags & IB_SEND_INLINE)
1875 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
1876 else
1877 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
1878 wqe->num_sge);
1879
1880 return payload_sz;
1881}
1882
1883static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
1884 struct bnxt_re_qp *qp,
1885 struct ib_send_wr *wr)
1886{
1887 struct bnxt_qplib_swqe wqe;
1888 int rc = 0, payload_sz = 0;
1889 unsigned long flags;
1890
1891 spin_lock_irqsave(&qp->sq_lock, flags);
1892 memset(&wqe, 0, sizeof(wqe));
1893 while (wr) {
1894 /* House keeping */
1895 memset(&wqe, 0, sizeof(wqe));
1896
1897 /* Common */
1898 wqe.num_sge = wr->num_sge;
1899 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
1900 dev_err(rdev_to_dev(rdev),
1901 "Limit exceeded for Send SGEs");
1902 rc = -EINVAL;
1903 goto bad;
1904 }
1905
1906 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
1907 if (payload_sz < 0) {
1908 rc = -EINVAL;
1909 goto bad;
1910 }
1911 wqe.wr_id = wr->wr_id;
1912
1913 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
1914
1915 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
1916 if (!rc)
1917 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
1918bad:
1919 if (rc) {
1920 dev_err(rdev_to_dev(rdev),
1921 "Post send failed opcode = %#x rc = %d",
1922 wr->opcode, rc);
1923 break;
1924 }
1925 wr = wr->next;
1926 }
1927 bnxt_qplib_post_send_db(&qp->qplib_qp);
1928 spin_unlock_irqrestore(&qp->sq_lock, flags);
1929 return rc;
1930}
1931
1932int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
1933 struct ib_send_wr **bad_wr)
1934{
1935 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1936 struct bnxt_qplib_swqe wqe;
1937 int rc = 0, payload_sz = 0;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&qp->sq_lock, flags);
1941 while (wr) {
1942 /* House keeping */
1943 memset(&wqe, 0, sizeof(wqe));
1944
1945 /* Common */
1946 wqe.num_sge = wr->num_sge;
1947 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
1948 dev_err(rdev_to_dev(qp->rdev),
1949 "Limit exceeded for Send SGEs");
1950 rc = -EINVAL;
1951 goto bad;
1952 }
1953
1954 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
1955 if (payload_sz < 0) {
1956 rc = -EINVAL;
1957 goto bad;
1958 }
1959 wqe.wr_id = wr->wr_id;
1960
1961 switch (wr->opcode) {
1962 case IB_WR_SEND:
1963 case IB_WR_SEND_WITH_IMM:
1964 if (ib_qp->qp_type == IB_QPT_GSI) {
1965 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
1966 payload_sz);
1967 if (rc)
1968 goto bad;
1969 wqe.rawqp1.lflags |=
1970 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
1971 }
1972 switch (wr->send_flags) {
1973 case IB_SEND_IP_CSUM:
1974 wqe.rawqp1.lflags |=
1975 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
1976 break;
1977 default:
1978 break;
1979 }
1980 /* Fall thru to build the wqe */
1981 case IB_WR_SEND_WITH_INV:
1982 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
1983 break;
1984 case IB_WR_RDMA_WRITE:
1985 case IB_WR_RDMA_WRITE_WITH_IMM:
1986 case IB_WR_RDMA_READ:
1987 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
1988 break;
1989 case IB_WR_ATOMIC_CMP_AND_SWP:
1990 case IB_WR_ATOMIC_FETCH_AND_ADD:
1991 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
1992 break;
1993 case IB_WR_RDMA_READ_WITH_INV:
1994 dev_err(rdev_to_dev(qp->rdev),
1995 "RDMA Read with Invalidate is not supported");
1996 rc = -EINVAL;
1997 goto bad;
1998 case IB_WR_LOCAL_INV:
1999 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2000 break;
2001 case IB_WR_REG_MR:
2002 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2003 break;
2004 default:
2005 /* Unsupported WRs */
2006 dev_err(rdev_to_dev(qp->rdev),
2007 "WR (%#x) is not supported", wr->opcode);
2008 rc = -EINVAL;
2009 goto bad;
2010 }
2011 if (!rc)
2012 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2013bad:
2014 if (rc) {
2015 dev_err(rdev_to_dev(qp->rdev),
2016 "post_send failed op:%#x qps = %#x rc = %d\n",
2017 wr->opcode, qp->qplib_qp.state, rc);
2018 *bad_wr = wr;
2019 break;
2020 }
2021 wr = wr->next;
2022 }
2023 bnxt_qplib_post_send_db(&qp->qplib_qp);
2024 spin_unlock_irqrestore(&qp->sq_lock, flags);
2025
2026 return rc;
2027}
2028
2029static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2030 struct bnxt_re_qp *qp,
2031 struct ib_recv_wr *wr)
2032{
2033 struct bnxt_qplib_swqe wqe;
2034 int rc = 0, payload_sz = 0;
2035
2036 memset(&wqe, 0, sizeof(wqe));
2037 while (wr) {
2038 /* House keeping */
2039 memset(&wqe, 0, sizeof(wqe));
2040
2041 /* Common */
2042 wqe.num_sge = wr->num_sge;
2043 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2044 dev_err(rdev_to_dev(rdev),
2045 "Limit exceeded for Receive SGEs");
2046 rc = -EINVAL;
2047 break;
2048 }
2049 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2050 wr->num_sge);
2051 wqe.wr_id = wr->wr_id;
2052 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2053
2054 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2055 if (rc)
2056 break;
2057
2058 wr = wr->next;
2059 }
2060 if (!rc)
2061 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2062 return rc;
2063}
2064
2065int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2066 struct ib_recv_wr **bad_wr)
2067{
2068 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2069 struct bnxt_qplib_swqe wqe;
2070 int rc = 0, payload_sz = 0;
2071
2072 while (wr) {
2073 /* House keeping */
2074 memset(&wqe, 0, sizeof(wqe));
2075
2076 /* Common */
2077 wqe.num_sge = wr->num_sge;
2078 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2079 dev_err(rdev_to_dev(qp->rdev),
2080 "Limit exceeded for Receive SGEs");
2081 rc = -EINVAL;
2082 *bad_wr = wr;
2083 break;
2084 }
2085
2086 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2087 wr->num_sge);
2088 wqe.wr_id = wr->wr_id;
2089 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2090
2091 if (ib_qp->qp_type == IB_QPT_GSI)
2092 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2093 payload_sz);
2094 if (!rc)
2095 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2096 if (rc) {
2097 *bad_wr = wr;
2098 break;
2099 }
2100 wr = wr->next;
2101 }
2102 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2103 return rc;
2104}
2105
2106/* Completion Queues */
2107int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2108{
2109 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2110 struct bnxt_re_dev *rdev = cq->rdev;
2111 int rc;
2112
2113 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2114 if (rc) {
2115 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2116 return rc;
2117 }
Doug Ledford374cb862017-04-25 14:00:59 -04002118 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002119 ib_umem_release(cq->umem);
2120
2121 if (cq) {
2122 kfree(cq->cql);
2123 kfree(cq);
2124 }
2125 atomic_dec(&rdev->cq_count);
2126 rdev->nq.budget--;
2127 return 0;
2128}
2129
2130struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2131 const struct ib_cq_init_attr *attr,
2132 struct ib_ucontext *context,
2133 struct ib_udata *udata)
2134{
2135 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2136 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2137 struct bnxt_re_cq *cq = NULL;
2138 int rc, entries;
2139 int cqe = attr->cqe;
2140
2141 /* Validate CQ fields */
2142 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2143 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2144 return ERR_PTR(-EINVAL);
2145 }
2146 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2147 if (!cq)
2148 return ERR_PTR(-ENOMEM);
2149
2150 cq->rdev = rdev;
2151 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2152
2153 entries = roundup_pow_of_two(cqe + 1);
2154 if (entries > dev_attr->max_cq_wqes + 1)
2155 entries = dev_attr->max_cq_wqes + 1;
2156
2157 if (context) {
2158 struct bnxt_re_cq_req req;
2159 struct bnxt_re_ucontext *uctx = container_of
2160 (context,
2161 struct bnxt_re_ucontext,
2162 ib_uctx);
2163 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2164 rc = -EFAULT;
2165 goto fail;
2166 }
2167
2168 cq->umem = ib_umem_get(context, req.cq_va,
2169 entries * sizeof(struct cq_base),
2170 IB_ACCESS_LOCAL_WRITE, 1);
2171 if (IS_ERR(cq->umem)) {
2172 rc = PTR_ERR(cq->umem);
2173 goto fail;
2174 }
2175 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2176 cq->qplib_cq.nmap = cq->umem->nmap;
2177 cq->qplib_cq.dpi = uctx->dpi;
2178 } else {
2179 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2180 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2181 GFP_KERNEL);
2182 if (!cq->cql) {
2183 rc = -ENOMEM;
2184 goto fail;
2185 }
2186
2187 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2188 cq->qplib_cq.sghead = NULL;
2189 cq->qplib_cq.nmap = 0;
2190 }
2191 cq->qplib_cq.max_wqe = entries;
2192 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2193
2194 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2195 if (rc) {
2196 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2197 goto fail;
2198 }
2199
2200 cq->ib_cq.cqe = entries;
2201 cq->cq_period = cq->qplib_cq.period;
2202 rdev->nq.budget++;
2203
2204 atomic_inc(&rdev->cq_count);
2205
2206 if (context) {
2207 struct bnxt_re_cq_resp resp;
2208
2209 resp.cqid = cq->qplib_cq.id;
2210 resp.tail = cq->qplib_cq.hwq.cons;
2211 resp.phase = cq->qplib_cq.period;
2212 resp.rsvd = 0;
2213 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2214 if (rc) {
2215 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2216 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2217 goto c2fail;
2218 }
2219 }
2220
2221 return &cq->ib_cq;
2222
2223c2fail:
2224 if (context)
2225 ib_umem_release(cq->umem);
2226fail:
2227 kfree(cq->cql);
2228 kfree(cq);
2229 return ERR_PTR(rc);
2230}
2231
2232static u8 __req_to_ib_wc_status(u8 qstatus)
2233{
2234 switch (qstatus) {
2235 case CQ_REQ_STATUS_OK:
2236 return IB_WC_SUCCESS;
2237 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2238 return IB_WC_BAD_RESP_ERR;
2239 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2240 return IB_WC_LOC_LEN_ERR;
2241 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2242 return IB_WC_LOC_QP_OP_ERR;
2243 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2244 return IB_WC_LOC_PROT_ERR;
2245 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2246 return IB_WC_GENERAL_ERR;
2247 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2248 return IB_WC_REM_INV_REQ_ERR;
2249 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2250 return IB_WC_REM_ACCESS_ERR;
2251 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2252 return IB_WC_REM_OP_ERR;
2253 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2254 return IB_WC_RNR_RETRY_EXC_ERR;
2255 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2256 return IB_WC_RETRY_EXC_ERR;
2257 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2258 return IB_WC_WR_FLUSH_ERR;
2259 default:
2260 return IB_WC_GENERAL_ERR;
2261 }
2262 return 0;
2263}
2264
2265static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2266{
2267 switch (qstatus) {
2268 case CQ_RES_RAWETH_QP1_STATUS_OK:
2269 return IB_WC_SUCCESS;
2270 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2271 return IB_WC_LOC_ACCESS_ERR;
2272 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2273 return IB_WC_LOC_LEN_ERR;
2274 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2275 return IB_WC_LOC_PROT_ERR;
2276 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2277 return IB_WC_LOC_QP_OP_ERR;
2278 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2279 return IB_WC_GENERAL_ERR;
2280 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2281 return IB_WC_WR_FLUSH_ERR;
2282 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2283 return IB_WC_WR_FLUSH_ERR;
2284 default:
2285 return IB_WC_GENERAL_ERR;
2286 }
2287}
2288
2289static u8 __rc_to_ib_wc_status(u8 qstatus)
2290{
2291 switch (qstatus) {
2292 case CQ_RES_RC_STATUS_OK:
2293 return IB_WC_SUCCESS;
2294 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2295 return IB_WC_LOC_ACCESS_ERR;
2296 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2297 return IB_WC_LOC_LEN_ERR;
2298 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2299 return IB_WC_LOC_PROT_ERR;
2300 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2301 return IB_WC_LOC_QP_OP_ERR;
2302 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2303 return IB_WC_GENERAL_ERR;
2304 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2305 return IB_WC_REM_INV_REQ_ERR;
2306 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2307 return IB_WC_WR_FLUSH_ERR;
2308 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2309 return IB_WC_WR_FLUSH_ERR;
2310 default:
2311 return IB_WC_GENERAL_ERR;
2312 }
2313}
2314
2315static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2316{
2317 switch (cqe->type) {
2318 case BNXT_QPLIB_SWQE_TYPE_SEND:
2319 wc->opcode = IB_WC_SEND;
2320 break;
2321 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2322 wc->opcode = IB_WC_SEND;
2323 wc->wc_flags |= IB_WC_WITH_IMM;
2324 break;
2325 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2326 wc->opcode = IB_WC_SEND;
2327 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2328 break;
2329 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2330 wc->opcode = IB_WC_RDMA_WRITE;
2331 break;
2332 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2333 wc->opcode = IB_WC_RDMA_WRITE;
2334 wc->wc_flags |= IB_WC_WITH_IMM;
2335 break;
2336 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2337 wc->opcode = IB_WC_RDMA_READ;
2338 break;
2339 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2340 wc->opcode = IB_WC_COMP_SWAP;
2341 break;
2342 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2343 wc->opcode = IB_WC_FETCH_ADD;
2344 break;
2345 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2346 wc->opcode = IB_WC_LOCAL_INV;
2347 break;
2348 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2349 wc->opcode = IB_WC_REG_MR;
2350 break;
2351 default:
2352 wc->opcode = IB_WC_SEND;
2353 break;
2354 }
2355
2356 wc->status = __req_to_ib_wc_status(cqe->status);
2357}
2358
2359static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2360 u16 raweth_qp1_flags2)
2361{
2362 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2363
2364 /* raweth_qp1_flags Bit 9-6 indicates itype */
2365 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2366 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2367 return -1;
2368
2369 if (raweth_qp1_flags2 &
2370 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2371 raweth_qp1_flags2 &
2372 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2373 is_udp = true;
2374 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2375 (raweth_qp1_flags2 &
2376 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2377 (is_ipv6 = true) : (is_ipv4 = true);
2378 return ((is_ipv6) ?
2379 BNXT_RE_ROCEV2_IPV6_PACKET :
2380 BNXT_RE_ROCEV2_IPV4_PACKET);
2381 } else {
2382 return BNXT_RE_ROCE_V1_PACKET;
2383 }
2384}
2385
2386static int bnxt_re_to_ib_nw_type(int nw_type)
2387{
2388 u8 nw_hdr_type = 0xFF;
2389
2390 switch (nw_type) {
2391 case BNXT_RE_ROCE_V1_PACKET:
2392 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2393 break;
2394 case BNXT_RE_ROCEV2_IPV4_PACKET:
2395 nw_hdr_type = RDMA_NETWORK_IPV4;
2396 break;
2397 case BNXT_RE_ROCEV2_IPV6_PACKET:
2398 nw_hdr_type = RDMA_NETWORK_IPV6;
2399 break;
2400 }
2401 return nw_hdr_type;
2402}
2403
2404static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2405 void *rq_hdr_buf)
2406{
2407 u8 *tmp_buf = NULL;
2408 struct ethhdr *eth_hdr;
2409 u16 eth_type;
2410 bool rc = false;
2411
2412 tmp_buf = (u8 *)rq_hdr_buf;
2413 /*
2414 * If dest mac is not same as I/F mac, this could be a
2415 * loopback address or multicast address, check whether
2416 * it is a loopback packet
2417 */
2418 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2419 tmp_buf += 4;
2420 /* Check the ether type */
2421 eth_hdr = (struct ethhdr *)tmp_buf;
2422 eth_type = ntohs(eth_hdr->h_proto);
2423 switch (eth_type) {
2424 case ETH_P_IBOE:
2425 rc = true;
2426 break;
2427 case ETH_P_IP:
2428 case ETH_P_IPV6: {
2429 u32 len;
2430 struct udphdr *udp_hdr;
2431
2432 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2433 sizeof(struct ipv6hdr));
2434 tmp_buf += sizeof(struct ethhdr) + len;
2435 udp_hdr = (struct udphdr *)tmp_buf;
2436 if (ntohs(udp_hdr->dest) ==
2437 ROCE_V2_UDP_DPORT)
2438 rc = true;
2439 break;
2440 }
2441 default:
2442 break;
2443 }
2444 }
2445
2446 return rc;
2447}
2448
2449static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2450 struct bnxt_qplib_cqe *cqe)
2451{
2452 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2453 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2454 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2455 struct ib_send_wr *swr;
2456 struct ib_ud_wr udwr;
2457 struct ib_recv_wr rwr;
2458 int pkt_type = 0;
2459 u32 tbl_idx;
2460 void *rq_hdr_buf;
2461 dma_addr_t rq_hdr_buf_map;
2462 dma_addr_t shrq_hdr_buf_map;
2463 u32 offset = 0;
2464 u32 skip_bytes = 0;
2465 struct ib_sge s_sge[2];
2466 struct ib_sge r_sge[2];
2467 int rc;
2468
2469 memset(&udwr, 0, sizeof(udwr));
2470 memset(&rwr, 0, sizeof(rwr));
2471 memset(&s_sge, 0, sizeof(s_sge));
2472 memset(&r_sge, 0, sizeof(r_sge));
2473
2474 swr = &udwr.wr;
2475 tbl_idx = cqe->wr_id;
2476
2477 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2478 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2479 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2480 tbl_idx);
2481
2482 /* Shadow QP header buffer */
2483 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2484 tbl_idx);
2485 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2486
2487 /* Store this cqe */
2488 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2489 sqp_entry->qp1_qp = qp1_qp;
2490
2491 /* Find packet type from the cqe */
2492
2493 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2494 cqe->raweth_qp1_flags2);
2495 if (pkt_type < 0) {
2496 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2497 return -EINVAL;
2498 }
2499
2500 /* Adjust the offset for the user buffer and post in the rq */
2501
2502 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2503 offset = 20;
2504
2505 /*
2506 * QP1 loopback packet has 4 bytes of internal header before
2507 * ether header. Skip these four bytes.
2508 */
2509 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2510 skip_bytes = 4;
2511
2512 /* First send SGE . Skip the ether header*/
2513 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2514 + skip_bytes;
2515 s_sge[0].lkey = 0xFFFFFFFF;
2516 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2517 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2518
2519 /* Second Send SGE */
2520 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2521 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2522 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2523 s_sge[1].addr += 8;
2524 s_sge[1].lkey = 0xFFFFFFFF;
2525 s_sge[1].length = 256;
2526
2527 /* First recv SGE */
2528
2529 r_sge[0].addr = shrq_hdr_buf_map;
2530 r_sge[0].lkey = 0xFFFFFFFF;
2531 r_sge[0].length = 40;
2532
2533 r_sge[1].addr = sqp_entry->sge.addr + offset;
2534 r_sge[1].lkey = sqp_entry->sge.lkey;
2535 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2536
2537 /* Create receive work request */
2538 rwr.num_sge = 2;
2539 rwr.sg_list = r_sge;
2540 rwr.wr_id = tbl_idx;
2541 rwr.next = NULL;
2542
2543 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2544 if (rc) {
2545 dev_err(rdev_to_dev(rdev),
2546 "Failed to post Rx buffers to shadow QP");
2547 return -ENOMEM;
2548 }
2549
2550 swr->num_sge = 2;
2551 swr->sg_list = s_sge;
2552 swr->wr_id = tbl_idx;
2553 swr->opcode = IB_WR_SEND;
2554 swr->next = NULL;
2555
2556 udwr.ah = &rdev->sqp_ah->ib_ah;
2557 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2558 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2559
2560 /* post data received in the send queue */
2561 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2562
2563 return 0;
2564}
2565
2566static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2567 struct bnxt_qplib_cqe *cqe)
2568{
2569 wc->opcode = IB_WC_RECV;
2570 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2571 wc->wc_flags |= IB_WC_GRH;
2572}
2573
2574static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2575 struct bnxt_qplib_cqe *cqe)
2576{
2577 wc->opcode = IB_WC_RECV;
2578 wc->status = __rc_to_ib_wc_status(cqe->status);
2579
2580 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2581 wc->wc_flags |= IB_WC_WITH_IMM;
2582 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2583 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2584 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2585 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2586 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2587}
2588
2589static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2590 struct ib_wc *wc,
2591 struct bnxt_qplib_cqe *cqe)
2592{
2593 u32 tbl_idx;
2594 struct bnxt_re_dev *rdev = qp->rdev;
2595 struct bnxt_re_qp *qp1_qp = NULL;
2596 struct bnxt_qplib_cqe *orig_cqe = NULL;
2597 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2598 int nw_type;
2599
2600 tbl_idx = cqe->wr_id;
2601
2602 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2603 qp1_qp = sqp_entry->qp1_qp;
2604 orig_cqe = &sqp_entry->cqe;
2605
2606 wc->wr_id = sqp_entry->wrid;
2607 wc->byte_len = orig_cqe->length;
2608 wc->qp = &qp1_qp->ib_qp;
2609
2610 wc->ex.imm_data = orig_cqe->immdata;
2611 wc->src_qp = orig_cqe->src_qp;
2612 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2613 wc->port_num = 1;
2614 wc->vendor_err = orig_cqe->status;
2615
2616 wc->opcode = IB_WC_RECV;
2617 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2618 wc->wc_flags |= IB_WC_GRH;
2619
2620 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2621 orig_cqe->raweth_qp1_flags2);
2622 if (nw_type >= 0) {
2623 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2624 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2625 }
2626}
2627
2628static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2629 struct bnxt_qplib_cqe *cqe)
2630{
2631 wc->opcode = IB_WC_RECV;
2632 wc->status = __rc_to_ib_wc_status(cqe->status);
2633
2634 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2635 wc->wc_flags |= IB_WC_WITH_IMM;
2636 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2637 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2638 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2639 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2640 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2641}
2642
2643int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2644{
2645 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2646 struct bnxt_re_qp *qp;
2647 struct bnxt_qplib_cqe *cqe;
2648 int i, ncqe, budget;
2649 u32 tbl_idx;
2650 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2651 unsigned long flags;
2652
2653 spin_lock_irqsave(&cq->cq_lock, flags);
2654 budget = min_t(u32, num_entries, cq->max_cql);
2655 if (!cq->cql) {
2656 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2657 goto exit;
2658 }
2659 cqe = &cq->cql[0];
2660 while (budget) {
2661 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
2662 if (!ncqe)
2663 break;
2664
2665 for (i = 0; i < ncqe; i++, cqe++) {
2666 /* Transcribe each qplib_wqe back to ib_wc */
2667 memset(wc, 0, sizeof(*wc));
2668
2669 wc->wr_id = cqe->wr_id;
2670 wc->byte_len = cqe->length;
2671 qp = container_of
2672 ((struct bnxt_qplib_qp *)
2673 (unsigned long)(cqe->qp_handle),
2674 struct bnxt_re_qp, qplib_qp);
2675 if (!qp) {
2676 dev_err(rdev_to_dev(cq->rdev),
2677 "POLL CQ : bad QP handle");
2678 continue;
2679 }
2680 wc->qp = &qp->ib_qp;
2681 wc->ex.imm_data = cqe->immdata;
2682 wc->src_qp = cqe->src_qp;
2683 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2684 wc->port_num = 1;
2685 wc->vendor_err = cqe->status;
2686
2687 switch (cqe->opcode) {
2688 case CQ_BASE_CQE_TYPE_REQ:
2689 if (qp->qplib_qp.id ==
2690 qp->rdev->qp1_sqp->qplib_qp.id) {
2691 /* Handle this completion with
2692 * the stored completion
2693 */
2694 memset(wc, 0, sizeof(*wc));
2695 continue;
2696 }
2697 bnxt_re_process_req_wc(wc, cqe);
2698 break;
2699 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2700 if (!cqe->status) {
2701 int rc = 0;
2702
2703 rc = bnxt_re_process_raw_qp_pkt_rx
2704 (qp, cqe);
2705 if (!rc) {
2706 memset(wc, 0, sizeof(*wc));
2707 continue;
2708 }
2709 cqe->status = -1;
2710 }
2711 /* Errors need not be looped back.
2712 * But change the wr_id to the one
2713 * stored in the table
2714 */
2715 tbl_idx = cqe->wr_id;
2716 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2717 wc->wr_id = sqp_entry->wrid;
2718 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2719 break;
2720 case CQ_BASE_CQE_TYPE_RES_RC:
2721 bnxt_re_process_res_rc_wc(wc, cqe);
2722 break;
2723 case CQ_BASE_CQE_TYPE_RES_UD:
2724 if (qp->qplib_qp.id ==
2725 qp->rdev->qp1_sqp->qplib_qp.id) {
2726 /* Handle this completion with
2727 * the stored completion
2728 */
2729 if (cqe->status) {
2730 continue;
2731 } else {
2732 bnxt_re_process_res_shadow_qp_wc
2733 (qp, wc, cqe);
2734 break;
2735 }
2736 }
2737 bnxt_re_process_res_ud_wc(wc, cqe);
2738 break;
2739 default:
2740 dev_err(rdev_to_dev(cq->rdev),
2741 "POLL CQ : type 0x%x not handled",
2742 cqe->opcode);
2743 continue;
2744 }
2745 wc++;
2746 budget--;
2747 }
2748 }
2749exit:
2750 spin_unlock_irqrestore(&cq->cq_lock, flags);
2751 return num_entries - budget;
2752}
2753
2754int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2755 enum ib_cq_notify_flags ib_cqn_flags)
2756{
2757 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2758 int type = 0;
2759
2760 /* Trigger on the very next completion */
2761 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2762 type = DBR_DBR_TYPE_CQ_ARMALL;
2763 /* Trigger on the next solicited completion */
2764 else if (ib_cqn_flags & IB_CQ_SOLICITED)
2765 type = DBR_DBR_TYPE_CQ_ARMSE;
2766
2767 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
2768
2769 return 0;
2770}
2771
2772/* Memory Regions */
2773struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
2774{
2775 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2776 struct bnxt_re_dev *rdev = pd->rdev;
2777 struct bnxt_re_mr *mr;
2778 u64 pbl = 0;
2779 int rc;
2780
2781 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2782 if (!mr)
2783 return ERR_PTR(-ENOMEM);
2784
2785 mr->rdev = rdev;
2786 mr->qplib_mr.pd = &pd->qplib_pd;
2787 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
2788 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2789
2790 /* Allocate and register 0 as the address */
2791 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
2792 if (rc)
2793 goto fail;
2794
2795 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
2796 mr->qplib_mr.total_size = -1; /* Infinte length */
2797 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
2798 if (rc)
2799 goto fail_mr;
2800
2801 mr->ib_mr.lkey = mr->qplib_mr.lkey;
2802 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
2803 IB_ACCESS_REMOTE_ATOMIC))
2804 mr->ib_mr.rkey = mr->ib_mr.lkey;
2805 atomic_inc(&rdev->mr_count);
2806
2807 return &mr->ib_mr;
2808
2809fail_mr:
2810 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2811fail:
2812 kfree(mr);
2813 return ERR_PTR(rc);
2814}
2815
2816int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2817{
2818 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2819 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00002820 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002821
2822 if (mr->npages && mr->pages) {
2823 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
2824 &mr->qplib_frpl);
2825 kfree(mr->pages);
2826 mr->npages = 0;
2827 mr->pages = NULL;
2828 }
2829 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2830
Doug Ledford374cb862017-04-25 14:00:59 -04002831 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002832 ib_umem_release(mr->ib_umem);
2833
2834 kfree(mr);
2835 atomic_dec(&rdev->mr_count);
2836 return rc;
2837}
2838
2839static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
2840{
2841 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2842
2843 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
2844 return -ENOMEM;
2845
2846 mr->pages[mr->npages++] = addr;
2847 return 0;
2848}
2849
2850int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
2851 unsigned int *sg_offset)
2852{
2853 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2854
2855 mr->npages = 0;
2856 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
2857}
2858
2859struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
2860 u32 max_num_sg)
2861{
2862 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2863 struct bnxt_re_dev *rdev = pd->rdev;
2864 struct bnxt_re_mr *mr = NULL;
2865 int rc;
2866
2867 if (type != IB_MR_TYPE_MEM_REG) {
2868 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
2869 return ERR_PTR(-EINVAL);
2870 }
2871 if (max_num_sg > MAX_PBL_LVL_1_PGS)
2872 return ERR_PTR(-EINVAL);
2873
2874 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2875 if (!mr)
2876 return ERR_PTR(-ENOMEM);
2877
2878 mr->rdev = rdev;
2879 mr->qplib_mr.pd = &pd->qplib_pd;
2880 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
2881 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2882
2883 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
2884 if (rc)
2885 goto fail;
2886
2887 mr->ib_mr.lkey = mr->qplib_mr.lkey;
2888 mr->ib_mr.rkey = mr->ib_mr.lkey;
2889
2890 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2891 if (!mr->pages) {
2892 rc = -ENOMEM;
2893 goto fail;
2894 }
2895 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
2896 &mr->qplib_frpl, max_num_sg);
2897 if (rc) {
2898 dev_err(rdev_to_dev(rdev),
2899 "Failed to allocate HW FR page list");
2900 goto fail_mr;
2901 }
2902
2903 atomic_inc(&rdev->mr_count);
2904 return &mr->ib_mr;
2905
2906fail_mr:
2907 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2908fail:
2909 kfree(mr->pages);
2910 kfree(mr);
2911 return ERR_PTR(rc);
2912}
2913
2914/* Fast Memory Regions */
2915struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
2916 struct ib_fmr_attr *fmr_attr)
2917{
2918 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2919 struct bnxt_re_dev *rdev = pd->rdev;
2920 struct bnxt_re_fmr *fmr;
2921 int rc;
2922
2923 if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
2924 fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
2925 dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
2926 return ERR_PTR(-ENOMEM);
2927 }
2928 fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
2929 if (!fmr)
2930 return ERR_PTR(-ENOMEM);
2931
2932 fmr->rdev = rdev;
2933 fmr->qplib_fmr.pd = &pd->qplib_pd;
2934 fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2935
2936 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
2937 if (rc)
2938 goto fail;
2939
2940 fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
2941 fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
2942 fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
2943
2944 atomic_inc(&rdev->mr_count);
2945 return &fmr->ib_fmr;
2946fail:
2947 kfree(fmr);
2948 return ERR_PTR(rc);
2949}
2950
2951int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
2952 u64 iova)
2953{
2954 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2955 ib_fmr);
2956 struct bnxt_re_dev *rdev = fmr->rdev;
2957 int rc;
2958
2959 fmr->qplib_fmr.va = iova;
2960 fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
2961
2962 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
2963 list_len, true);
2964 if (rc)
2965 dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
2966 fmr->ib_fmr.lkey);
2967 return rc;
2968}
2969
2970int bnxt_re_unmap_fmr(struct list_head *fmr_list)
2971{
2972 struct bnxt_re_dev *rdev;
2973 struct bnxt_re_fmr *fmr;
2974 struct ib_fmr *ib_fmr;
2975 int rc = 0;
2976
2977 /* Validate each FMRs inside the fmr_list */
2978 list_for_each_entry(ib_fmr, fmr_list, list) {
2979 fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
2980 rdev = fmr->rdev;
2981
2982 if (rdev) {
2983 rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
2984 &fmr->qplib_fmr, true);
2985 if (rc)
2986 break;
2987 }
2988 }
2989 return rc;
2990}
2991
2992int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
2993{
2994 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2995 ib_fmr);
2996 struct bnxt_re_dev *rdev = fmr->rdev;
2997 int rc;
2998
2999 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
3000 if (rc)
3001 dev_err(rdev_to_dev(rdev), "Failed to free FMR");
3002
3003 kfree(fmr);
3004 atomic_dec(&rdev->mr_count);
3005 return rc;
3006}
3007
3008/* uverbs */
3009struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3010 u64 virt_addr, int mr_access_flags,
3011 struct ib_udata *udata)
3012{
3013 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3014 struct bnxt_re_dev *rdev = pd->rdev;
3015 struct bnxt_re_mr *mr;
3016 struct ib_umem *umem;
3017 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003018 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003019 struct scatterlist *sg;
3020 int entry;
3021
3022 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3023 if (!mr)
3024 return ERR_PTR(-ENOMEM);
3025
3026 mr->rdev = rdev;
3027 mr->qplib_mr.pd = &pd->qplib_pd;
3028 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3029 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3030
3031 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3032 mr_access_flags, 0);
3033 if (IS_ERR(umem)) {
3034 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3035 rc = -EFAULT;
3036 goto free_mr;
3037 }
3038 mr->ib_umem = umem;
3039
3040 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3041 if (rc) {
3042 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3043 goto release_umem;
3044 }
3045 /* The fixed portion of the rkey is the same as the lkey */
3046 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3047
3048 mr->qplib_mr.va = virt_addr;
3049 umem_pgs = ib_umem_page_count(umem);
3050 if (!umem_pgs) {
3051 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3052 rc = -EINVAL;
3053 goto free_mrw;
3054 }
3055 mr->qplib_mr.total_size = length;
3056
3057 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3058 if (!pbl_tbl) {
3059 rc = -EINVAL;
3060 goto free_mrw;
3061 }
3062 pbl_tbl_orig = pbl_tbl;
3063
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003064 if (umem->hugetlb) {
3065 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3066 rc = -EFAULT;
3067 goto fail;
3068 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003069
3070 if (umem->page_shift != PAGE_SHIFT) {
3071 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003072 rc = -EFAULT;
3073 goto fail;
3074 }
3075 /* Map umem buf ptrs to the PBL */
3076 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003077 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003078 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003079 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003080 }
3081 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3082 umem_pgs, false);
3083 if (rc) {
3084 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3085 goto fail;
3086 }
3087
3088 kfree(pbl_tbl_orig);
3089
3090 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3091 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3092 atomic_inc(&rdev->mr_count);
3093
3094 return &mr->ib_mr;
3095fail:
3096 kfree(pbl_tbl_orig);
3097free_mrw:
3098 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3099release_umem:
3100 ib_umem_release(umem);
3101free_mr:
3102 kfree(mr);
3103 return ERR_PTR(rc);
3104}
3105
3106struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3107 struct ib_udata *udata)
3108{
3109 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3110 struct bnxt_re_uctx_resp resp;
3111 struct bnxt_re_ucontext *uctx;
3112 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3113 int rc;
3114
3115 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3116 ibdev->uverbs_abi_ver);
3117
3118 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3119 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3120 BNXT_RE_ABI_VERSION);
3121 return ERR_PTR(-EPERM);
3122 }
3123
3124 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3125 if (!uctx)
3126 return ERR_PTR(-ENOMEM);
3127
3128 uctx->rdev = rdev;
3129
3130 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3131 if (!uctx->shpg) {
3132 rc = -ENOMEM;
3133 goto fail;
3134 }
3135 spin_lock_init(&uctx->sh_lock);
3136
3137 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3138 resp.max_qp = rdev->qplib_ctx.qpc_count;
3139 resp.pg_size = PAGE_SIZE;
3140 resp.cqe_sz = sizeof(struct cq_base);
3141 resp.max_cqd = dev_attr->max_cq_wqes;
3142 resp.rsvd = 0;
3143
3144 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3145 if (rc) {
3146 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3147 rc = -EFAULT;
3148 goto cfail;
3149 }
3150
3151 return &uctx->ib_uctx;
3152cfail:
3153 free_page((unsigned long)uctx->shpg);
3154 uctx->shpg = NULL;
3155fail:
3156 kfree(uctx);
3157 return ERR_PTR(rc);
3158}
3159
3160int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3161{
3162 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3163 struct bnxt_re_ucontext,
3164 ib_uctx);
3165 if (uctx->shpg)
3166 free_page((unsigned long)uctx->shpg);
3167 kfree(uctx);
3168 return 0;
3169}
3170
3171/* Helper function to mmap the virtual memory from user app */
3172int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3173{
3174 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3175 struct bnxt_re_ucontext,
3176 ib_uctx);
3177 struct bnxt_re_dev *rdev = uctx->rdev;
3178 u64 pfn;
3179
3180 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3181 return -EINVAL;
3182
3183 if (vma->vm_pgoff) {
3184 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3185 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3186 PAGE_SIZE, vma->vm_page_prot)) {
3187 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3188 return -EAGAIN;
3189 }
3190 } else {
3191 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3192 if (remap_pfn_range(vma, vma->vm_start,
3193 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3194 dev_err(rdev_to_dev(rdev),
3195 "Failed to map shared page");
3196 return -EAGAIN;
3197 }
3198 }
3199
3200 return 0;
3201}