blob: e62b7c2c7da6a1953605fbe7d089a3d9b6f9a50a [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter (header)
37 */
38
39#ifndef __BNXT_RE_IB_VERBS_H__
40#define __BNXT_RE_IB_VERBS_H__
41
42struct bnxt_re_gid_ctx {
43 u32 idx;
44 u32 refcnt;
45};
46
Eddie Wai9152e0b2017-06-14 03:26:23 -070047#define BNXT_RE_FENCE_BYTES 64
48struct bnxt_re_fence_data {
49 u32 size;
50 u8 va[BNXT_RE_FENCE_BYTES];
51 dma_addr_t dma_addr;
52 struct bnxt_re_mr *mr;
53 struct ib_mw *mw;
54 struct bnxt_qplib_swqe bind_wqe;
55 u32 bind_rkey;
56};
57
Selvin Xavier1ac5a402017-02-10 03:19:33 -080058struct bnxt_re_pd {
59 struct bnxt_re_dev *rdev;
60 struct ib_pd ib_pd;
61 struct bnxt_qplib_pd qplib_pd;
Eddie Wai9152e0b2017-06-14 03:26:23 -070062 struct bnxt_re_fence_data fence;
Selvin Xavier1ac5a402017-02-10 03:19:33 -080063};
64
65struct bnxt_re_ah {
66 struct bnxt_re_dev *rdev;
67 struct ib_ah ib_ah;
68 struct bnxt_qplib_ah qplib_ah;
69};
70
Devesh Sharma37cb11a2018-01-11 11:52:11 -050071struct bnxt_re_srq {
72 struct bnxt_re_dev *rdev;
73 u32 srq_limit;
74 struct ib_srq ib_srq;
75 struct bnxt_qplib_srq qplib_srq;
76 struct ib_umem *umem;
77 spinlock_t lock; /* protect srq */
78};
79
Selvin Xavier1ac5a402017-02-10 03:19:33 -080080struct bnxt_re_qp {
81 struct list_head list;
82 struct bnxt_re_dev *rdev;
83 struct ib_qp ib_qp;
84 spinlock_t sq_lock; /* protect sq */
Devesh Sharma018cf592017-05-22 03:15:40 -070085 spinlock_t rq_lock; /* protect rq */
Selvin Xavier1ac5a402017-02-10 03:19:33 -080086 struct bnxt_qplib_qp qplib_qp;
87 struct ib_umem *sumem;
88 struct ib_umem *rumem;
89 /* QP1 */
90 u32 send_psn;
91 struct ib_ud_header qp1_hdr;
Selvin Xavier3b921e32018-02-15 21:20:11 -080092 struct bnxt_re_cq *scq;
93 struct bnxt_re_cq *rcq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -080094};
95
96struct bnxt_re_cq {
97 struct bnxt_re_dev *rdev;
98 spinlock_t cq_lock; /* protect cq */
99 u16 cq_count;
100 u16 cq_period;
101 struct ib_cq ib_cq;
102 struct bnxt_qplib_cq qplib_cq;
103 struct bnxt_qplib_cqe *cql;
104#define MAX_CQL_PER_POLL 1024
105 u32 max_cql;
106 struct ib_umem *umem;
107};
108
109struct bnxt_re_mr {
110 struct bnxt_re_dev *rdev;
111 struct ib_mr ib_mr;
112 struct ib_umem *ib_umem;
113 struct bnxt_qplib_mrw qplib_mr;
114 u32 npages;
115 u64 *pages;
116 struct bnxt_qplib_frpl qplib_frpl;
117};
118
119struct bnxt_re_frpl {
120 struct bnxt_re_dev *rdev;
121 struct bnxt_qplib_frpl qplib_frpl;
122 u64 *page_list;
123};
124
125struct bnxt_re_fmr {
126 struct bnxt_re_dev *rdev;
127 struct ib_fmr ib_fmr;
128 struct bnxt_qplib_mrw qplib_fmr;
129};
130
131struct bnxt_re_mw {
132 struct bnxt_re_dev *rdev;
133 struct ib_mw ib_mw;
134 struct bnxt_qplib_mrw qplib_mw;
135};
136
137struct bnxt_re_ucontext {
138 struct bnxt_re_dev *rdev;
139 struct ib_ucontext ib_uctx;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700140 struct bnxt_qplib_dpi dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800141 void *shpg;
142 spinlock_t sh_lock; /* protect shpg */
143};
144
145struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
146
147int bnxt_re_query_device(struct ib_device *ibdev,
148 struct ib_device_attr *ib_attr,
149 struct ib_udata *udata);
150int bnxt_re_modify_device(struct ib_device *ibdev,
151 int device_modify_mask,
152 struct ib_device_modify *device_modify);
153int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
154 struct ib_port_attr *port_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800155int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
156 struct ib_port_immutable *immutable);
Selvin Xavier2fc68542018-01-11 11:52:08 -0500157void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800158int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
159 u16 index, u16 *pkey);
160int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
161 unsigned int index, void **context);
162int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
163 unsigned int index, const union ib_gid *gid,
164 const struct ib_gid_attr *attr, void **context);
165int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
166 int index, union ib_gid *gid);
167enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
168 u8 port_num);
169struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
170 struct ib_ucontext *context,
171 struct ib_udata *udata);
172int bnxt_re_dealloc_pd(struct ib_pd *pd);
173struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400174 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800175 struct ib_udata *udata);
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400176int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
177int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800178int bnxt_re_destroy_ah(struct ib_ah *ah);
Devesh Sharma37cb11a2018-01-11 11:52:11 -0500179struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
180 struct ib_srq_init_attr *srq_init_attr,
181 struct ib_udata *udata);
182int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
183 enum ib_srq_attr_mask srq_attr_mask,
184 struct ib_udata *udata);
185int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
186int bnxt_re_destroy_srq(struct ib_srq *srq);
187int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
188 struct ib_recv_wr **bad_recv_wr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800189struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
190 struct ib_qp_init_attr *qp_init_attr,
191 struct ib_udata *udata);
192int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
193 int qp_attr_mask, struct ib_udata *udata);
194int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
195 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
196int bnxt_re_destroy_qp(struct ib_qp *qp);
197int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
198 struct ib_send_wr **bad_send_wr);
199int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
200 struct ib_recv_wr **bad_recv_wr);
201struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
202 const struct ib_cq_init_attr *attr,
203 struct ib_ucontext *context,
204 struct ib_udata *udata);
205int bnxt_re_destroy_cq(struct ib_cq *cq);
206int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
207int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
208struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
209
210int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
211 unsigned int *sg_offset);
212struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
213 u32 max_num_sg);
214int bnxt_re_dereg_mr(struct ib_mr *mr);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700215struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
216 struct ib_udata *udata);
217int bnxt_re_dealloc_mw(struct ib_mw *mw);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800218struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
219 u64 virt_addr, int mr_access_flags,
220 struct ib_udata *udata);
221struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
222 struct ib_udata *udata);
223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
Selvin Xavier942c9b62018-03-05 21:49:28 -0800225
226unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
227void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800228#endif /* __BNXT_RE_IB_VERBS_H__ */