Selvin Xavier | 1ac5a40 | 2017-02-10 03:19:33 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Broadcom NetXtreme-E RoCE driver. |
| 3 | * |
| 4 | * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term |
| 5 | * Broadcom refers to Broadcom Limited and/or its subsidiaries. |
| 6 | * |
| 7 | * This software is available to you under a choice of one of two |
| 8 | * licenses. You may choose to be licensed under the terms of the GNU |
| 9 | * General Public License (GPL) Version 2, available from the file |
| 10 | * COPYING in the main directory of this source tree, or the |
| 11 | * BSD license below: |
| 12 | * |
| 13 | * Redistribution and use in source and binary forms, with or without |
| 14 | * modification, are permitted provided that the following conditions |
| 15 | * are met: |
| 16 | * |
| 17 | * 1. Redistributions of source code must retain the above copyright |
| 18 | * notice, this list of conditions and the following disclaimer. |
| 19 | * 2. Redistributions in binary form must reproduce the above copyright |
| 20 | * notice, this list of conditions and the following disclaimer in |
| 21 | * the documentation and/or other materials provided with the |
| 22 | * distribution. |
| 23 | * |
| 24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' |
| 25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
| 26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
| 28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 32 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
| 33 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
| 34 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 35 | * |
| 36 | * Description: Fast Path Operators (header) |
| 37 | */ |
| 38 | |
| 39 | #ifndef __BNXT_QPLIB_FP_H__ |
| 40 | #define __BNXT_QPLIB_FP_H__ |
| 41 | |
| 42 | struct bnxt_qplib_sge { |
| 43 | u64 addr; |
| 44 | u32 lkey; |
| 45 | u32 size; |
| 46 | }; |
| 47 | |
| 48 | #define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send) |
| 49 | |
| 50 | #define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE) |
| 51 | #define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1) |
| 52 | |
| 53 | static inline u32 get_sqe_pg(u32 val) |
| 54 | { |
| 55 | return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG); |
| 56 | } |
| 57 | |
| 58 | static inline u32 get_sqe_idx(u32 val) |
| 59 | { |
| 60 | return (val & SQE_MAX_IDX_PER_PG); |
| 61 | } |
| 62 | |
| 63 | #define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search) |
| 64 | |
| 65 | #define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE) |
| 66 | #define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1) |
| 67 | |
| 68 | static inline u32 get_psne_pg(u32 val) |
| 69 | { |
| 70 | return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG); |
| 71 | } |
| 72 | |
| 73 | static inline u32 get_psne_idx(u32 val) |
| 74 | { |
| 75 | return (val & PSNE_MAX_IDX_PER_PG); |
| 76 | } |
| 77 | |
| 78 | #define BNXT_QPLIB_QP_MAX_SGL 6 |
| 79 | |
| 80 | struct bnxt_qplib_swq { |
| 81 | u64 wr_id; |
| 82 | u8 type; |
| 83 | u8 flags; |
| 84 | u32 start_psn; |
| 85 | u32 next_psn; |
| 86 | struct sq_psn_search *psn_search; |
| 87 | }; |
| 88 | |
| 89 | struct bnxt_qplib_swqe { |
| 90 | /* General */ |
| 91 | u64 wr_id; |
| 92 | u8 reqs_type; |
| 93 | u8 type; |
| 94 | #define BNXT_QPLIB_SWQE_TYPE_SEND 0 |
| 95 | #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 |
| 96 | #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 |
| 97 | #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 |
| 98 | #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 |
| 99 | #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 |
| 100 | #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 |
| 101 | #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 |
| 102 | #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 |
| 103 | #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 |
| 104 | #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 |
| 105 | #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 |
| 106 | #define BNXT_QPLIB_SWQE_TYPE_RECV 128 |
| 107 | #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 |
| 108 | u8 flags; |
| 109 | #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0) |
| 110 | #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1) |
| 111 | #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2) |
| 112 | #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3) |
| 113 | #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4) |
| 114 | struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL]; |
| 115 | int num_sge; |
| 116 | /* Max inline data is 96 bytes */ |
| 117 | u32 inline_len; |
| 118 | #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96 |
| 119 | u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH]; |
| 120 | |
| 121 | union { |
| 122 | /* Send, with imm, inval key */ |
| 123 | struct { |
| 124 | union { |
| 125 | __be32 imm_data; |
| 126 | u32 inv_key; |
| 127 | }; |
| 128 | u32 q_key; |
| 129 | u32 dst_qp; |
| 130 | u16 avid; |
| 131 | } send; |
| 132 | |
| 133 | /* Send Raw Ethernet and QP1 */ |
| 134 | struct { |
| 135 | u16 lflags; |
| 136 | u16 cfa_action; |
| 137 | u32 cfa_meta; |
| 138 | } rawqp1; |
| 139 | |
| 140 | /* RDMA write, with imm, read */ |
| 141 | struct { |
| 142 | union { |
| 143 | __be32 imm_data; |
| 144 | u32 inv_key; |
| 145 | }; |
| 146 | u64 remote_va; |
| 147 | u32 r_key; |
| 148 | } rdma; |
| 149 | |
| 150 | /* Atomic cmp/swap, fetch/add */ |
| 151 | struct { |
| 152 | u64 remote_va; |
| 153 | u32 r_key; |
| 154 | u64 swap_data; |
| 155 | u64 cmp_data; |
| 156 | } atomic; |
| 157 | |
| 158 | /* Local Invalidate */ |
| 159 | struct { |
| 160 | u32 inv_l_key; |
| 161 | } local_inv; |
| 162 | |
| 163 | /* FR-PMR */ |
| 164 | struct { |
| 165 | u8 access_cntl; |
| 166 | u8 pg_sz_log; |
| 167 | bool zero_based; |
| 168 | u32 l_key; |
| 169 | u32 length; |
| 170 | u8 pbl_pg_sz_log; |
| 171 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 |
| 172 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 |
| 173 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 |
| 174 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 |
| 175 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 |
| 176 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 |
| 177 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 |
| 178 | #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 |
| 179 | u8 levels; |
| 180 | #define PAGE_SHIFT_4K 12 |
| 181 | __le64 *pbl_ptr; |
| 182 | dma_addr_t pbl_dma_ptr; |
| 183 | u64 *page_list; |
| 184 | u16 page_list_len; |
| 185 | u64 va; |
| 186 | } frmr; |
| 187 | |
| 188 | /* Bind */ |
| 189 | struct { |
| 190 | u8 access_cntl; |
| 191 | #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0) |
| 192 | #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1) |
| 193 | #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2) |
| 194 | #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3) |
| 195 | #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4) |
| 196 | bool zero_based; |
| 197 | u8 mw_type; |
| 198 | u32 parent_l_key; |
| 199 | u32 r_key; |
| 200 | u64 va; |
| 201 | u32 length; |
| 202 | } bind; |
| 203 | }; |
| 204 | }; |
| 205 | |
| 206 | #define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe) |
| 207 | |
| 208 | #define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE) |
| 209 | #define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1) |
| 210 | #define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG) |
| 211 | #define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG) |
| 212 | |
| 213 | struct bnxt_qplib_q { |
| 214 | struct bnxt_qplib_hwq hwq; |
| 215 | struct bnxt_qplib_swq *swq; |
| 216 | struct scatterlist *sglist; |
| 217 | u32 nmap; |
| 218 | u32 max_wqe; |
| 219 | u16 max_sge; |
| 220 | u32 psn; |
| 221 | bool flush_in_progress; |
| 222 | }; |
| 223 | |
| 224 | struct bnxt_qplib_qp { |
| 225 | struct bnxt_qplib_pd *pd; |
| 226 | struct bnxt_qplib_dpi *dpi; |
| 227 | u64 qp_handle; |
| 228 | u32 id; |
| 229 | u8 type; |
| 230 | u8 sig_type; |
| 231 | u32 modify_flags; |
| 232 | u8 state; |
| 233 | u8 cur_qp_state; |
| 234 | u32 max_inline_data; |
| 235 | u32 mtu; |
| 236 | u8 path_mtu; |
| 237 | bool en_sqd_async_notify; |
| 238 | u16 pkey_index; |
| 239 | u32 qkey; |
| 240 | u32 dest_qp_id; |
| 241 | u8 access; |
| 242 | u8 timeout; |
| 243 | u8 retry_cnt; |
| 244 | u8 rnr_retry; |
| 245 | u32 min_rnr_timer; |
| 246 | u32 max_rd_atomic; |
| 247 | u32 max_dest_rd_atomic; |
| 248 | u32 dest_qpn; |
| 249 | u8 smac[6]; |
| 250 | u16 vlan_id; |
| 251 | u8 nw_type; |
| 252 | struct bnxt_qplib_ah ah; |
| 253 | |
| 254 | #define BTH_PSN_MASK ((1 << 24) - 1) |
| 255 | /* SQ */ |
| 256 | struct bnxt_qplib_q sq; |
| 257 | /* RQ */ |
| 258 | struct bnxt_qplib_q rq; |
| 259 | /* SRQ */ |
| 260 | struct bnxt_qplib_srq *srq; |
| 261 | /* CQ */ |
| 262 | struct bnxt_qplib_cq *scq; |
| 263 | struct bnxt_qplib_cq *rcq; |
| 264 | /* IRRQ and ORRQ */ |
| 265 | struct bnxt_qplib_hwq irrq; |
| 266 | struct bnxt_qplib_hwq orrq; |
| 267 | /* Header buffer for QP1 */ |
| 268 | int sq_hdr_buf_size; |
| 269 | int rq_hdr_buf_size; |
| 270 | /* |
| 271 | * Buffer space for ETH(14), IP or GRH(40), UDP header(8) |
| 272 | * and ib_bth + ib_deth (20). |
| 273 | * Max required is 82 when RoCE V2 is enabled |
| 274 | */ |
| 275 | #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 |
| 276 | /* Ethernet header = 14 */ |
| 277 | /* ib_grh = 40 (provided by MAD) */ |
| 278 | /* ib_bth + ib_deth = 20 */ |
| 279 | /* MAD = 256 (provided by MAD) */ |
| 280 | /* iCRC = 4 */ |
| 281 | #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 |
| 282 | #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 |
| 283 | #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 |
| 284 | #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 |
| 285 | #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 |
| 286 | void *sq_hdr_buf; |
| 287 | dma_addr_t sq_hdr_buf_map; |
| 288 | void *rq_hdr_buf; |
| 289 | dma_addr_t rq_hdr_buf_map; |
| 290 | }; |
| 291 | |
| 292 | #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) |
| 293 | |
| 294 | #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE) |
| 295 | #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1) |
| 296 | #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG) |
| 297 | #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG) |
| 298 | |
| 299 | #define ROCE_CQE_CMP_V 0 |
| 300 | #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \ |
| 301 | (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ |
| 302 | !((raw_cons) & (cp_bit))) |
| 303 | |
| 304 | struct bnxt_qplib_cqe { |
| 305 | u8 status; |
| 306 | u8 type; |
| 307 | u8 opcode; |
| 308 | u32 length; |
| 309 | u64 wr_id; |
| 310 | union { |
| 311 | __be32 immdata; |
| 312 | u32 invrkey; |
| 313 | }; |
| 314 | u64 qp_handle; |
| 315 | u64 mr_handle; |
| 316 | u16 flags; |
| 317 | u8 smac[6]; |
| 318 | u32 src_qp; |
| 319 | u16 raweth_qp1_flags; |
| 320 | u16 raweth_qp1_errors; |
| 321 | u16 raweth_qp1_cfa_code; |
| 322 | u32 raweth_qp1_flags2; |
| 323 | u32 raweth_qp1_metadata; |
| 324 | u8 raweth_qp1_payload_offset; |
| 325 | u16 pkey_index; |
| 326 | }; |
| 327 | |
| 328 | #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 |
| 329 | struct bnxt_qplib_cq { |
| 330 | struct bnxt_qplib_dpi *dpi; |
| 331 | void __iomem *dbr_base; |
| 332 | u32 max_wqe; |
| 333 | u32 id; |
| 334 | u16 count; |
| 335 | u16 period; |
| 336 | struct bnxt_qplib_hwq hwq; |
| 337 | u32 cnq_hw_ring_id; |
| 338 | bool resize_in_progress; |
| 339 | struct scatterlist *sghead; |
| 340 | u32 nmap; |
| 341 | u64 cq_handle; |
| 342 | |
| 343 | #define CQ_RESIZE_WAIT_TIME_MS 500 |
| 344 | unsigned long flags; |
| 345 | #define CQ_FLAGS_RESIZE_IN_PROG 1 |
| 346 | wait_queue_head_t waitq; |
| 347 | }; |
| 348 | |
| 349 | #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) |
| 350 | #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) |
| 351 | #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2) |
| 352 | #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1) |
| 353 | #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1) |
| 354 | #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1) |
| 355 | |
| 356 | #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base) |
| 357 | |
| 358 | #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE) |
| 359 | #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1) |
| 360 | #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG) |
| 361 | #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG) |
| 362 | |
| 363 | #define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \ |
| 364 | (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ |
| 365 | !((raw_cons) & (cp_bit))) |
| 366 | |
| 367 | #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) |
| 368 | |
| 369 | #define NQ_CONS_PCI_BAR_REGION 2 |
| 370 | #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT) |
| 371 | #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID |
| 372 | #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK |
| 373 | #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \ |
| 374 | NQ_DB_IDX_VALID) |
| 375 | #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \ |
| 376 | NQ_DB_IDX_VALID | \ |
| 377 | NQ_DB_IRQ_DIS) |
| 378 | #define NQ_DB_REARM(db, raw_cons, cp_bit) \ |
| 379 | writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db) |
| 380 | #define NQ_DB(db, raw_cons, cp_bit) \ |
| 381 | writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) |
| 382 | |
| 383 | struct bnxt_qplib_nq { |
| 384 | struct pci_dev *pdev; |
| 385 | |
| 386 | int vector; |
| 387 | int budget; |
| 388 | bool requested; |
| 389 | struct tasklet_struct worker; |
| 390 | struct bnxt_qplib_hwq hwq; |
| 391 | |
| 392 | u16 bar_reg; |
| 393 | u16 bar_reg_off; |
| 394 | u16 ring_id; |
| 395 | void __iomem *bar_reg_iomem; |
| 396 | |
| 397 | int (*cqn_handler) |
| 398 | (struct bnxt_qplib_nq *nq, |
| 399 | struct bnxt_qplib_cq *cq); |
| 400 | int (*srqn_handler) |
| 401 | (struct bnxt_qplib_nq *nq, |
| 402 | void *srq, |
| 403 | u8 event); |
| 404 | }; |
| 405 | |
| 406 | void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); |
| 407 | int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, |
| 408 | int msix_vector, int bar_reg_offset, |
| 409 | int (*cqn_handler)(struct bnxt_qplib_nq *nq, |
| 410 | struct bnxt_qplib_cq *cq), |
| 411 | int (*srqn_handler)(struct bnxt_qplib_nq *nq, |
| 412 | void *srq, |
| 413 | u8 event)); |
| 414 | int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
| 415 | int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
| 416 | int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
| 417 | int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
| 418 | int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
| 419 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, |
| 420 | struct bnxt_qplib_sge *sge); |
| 421 | void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, |
| 422 | struct bnxt_qplib_sge *sge); |
| 423 | u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); |
| 424 | dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, |
| 425 | u32 index); |
| 426 | void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); |
| 427 | int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, |
| 428 | struct bnxt_qplib_swqe *wqe); |
| 429 | void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); |
| 430 | int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, |
| 431 | struct bnxt_qplib_swqe *wqe); |
| 432 | int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
| 433 | int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
| 434 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, |
| 435 | int num); |
| 436 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); |
| 437 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); |
| 438 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); |
| 439 | #endif /* __BNXT_QPLIB_FP_H__ */ |