Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 1 | /******************************************************************* |
| 2 | * This file is part of the Emulex RoCE Device Driver for * |
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * |
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * |
| 5 | * EMULEX and SLI are trademarks of Emulex. * |
| 6 | * www.emulex.com * |
| 7 | * * |
| 8 | * This program is free software; you can redistribute it and/or * |
| 9 | * modify it under the terms of version 2 of the GNU General * |
| 10 | * Public License as published by the Free Software Foundation. * |
| 11 | * This program is distributed in the hope that it will be useful. * |
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
| 17 | * more details, a copy of which can be found in the file COPYING * |
| 18 | * included with this package. * |
| 19 | * |
| 20 | * Contact Information: |
| 21 | * linux-drivers@emulex.com |
| 22 | * |
| 23 | * Emulex |
| 24 | * 3333 Susan Street |
| 25 | * Costa Mesa, CA 92626 |
| 26 | *******************************************************************/ |
| 27 | |
| 28 | #ifndef __OCRDMA_H__ |
| 29 | #define __OCRDMA_H__ |
| 30 | |
| 31 | #include <linux/mutex.h> |
| 32 | #include <linux/list.h> |
| 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/pci.h> |
| 35 | |
| 36 | #include <rdma/ib_verbs.h> |
| 37 | #include <rdma/ib_user_verbs.h> |
Devesh Sharma | fad51b7 | 2014-02-04 11:57:10 +0530 | [diff] [blame] | 38 | #include <rdma/ib_addr.h> |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 39 | |
| 40 | #include <be_roce.h> |
| 41 | #include "ocrdma_sli.h" |
| 42 | |
Devesh Sharma | 0154410 | 2014-02-04 11:57:00 +0530 | [diff] [blame] | 43 | #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" |
| 44 | |
| 45 | #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 46 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
| 47 | |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 48 | #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" |
| 49 | #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" |
| 50 | |
| 51 | #define OC_SKH_DEVICE_PF 0x720 |
| 52 | #define OC_SKH_DEVICE_VF 0x728 |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 53 | #define OCRDMA_MAX_AH 512 |
| 54 | |
| 55 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
| 56 | |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 57 | #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) |
| 58 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 59 | struct ocrdma_dev_attr { |
| 60 | u8 fw_ver[32]; |
| 61 | u32 vendor_id; |
| 62 | u32 device_id; |
| 63 | u16 max_pd; |
| 64 | u16 max_cq; |
| 65 | u16 max_cqe; |
| 66 | u16 max_qp; |
| 67 | u16 max_wqe; |
| 68 | u16 max_rqe; |
Naresh Gottumukkala | 7c33880 | 2013-08-26 15:27:39 +0530 | [diff] [blame] | 69 | u16 max_srq; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 70 | u32 max_inline_data; |
| 71 | int max_send_sge; |
| 72 | int max_recv_sge; |
Mahesh Vardhamanaiah | 634c579 | 2012-06-08 21:26:11 +0530 | [diff] [blame] | 73 | int max_srq_sge; |
Naresh Gottumukkala | 45e86b3 | 2013-08-07 12:52:37 +0530 | [diff] [blame] | 74 | int max_rdma_sge; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 75 | int max_mr; |
| 76 | u64 max_mr_size; |
| 77 | u32 max_num_mr_pbl; |
Selvin Xavier | ac578ae | 2014-02-04 11:57:04 +0530 | [diff] [blame] | 78 | int max_mw; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 79 | int max_fmr; |
| 80 | int max_map_per_fmr; |
| 81 | int max_pages_per_frmr; |
| 82 | u16 max_ord_per_qp; |
| 83 | u16 max_ird_per_qp; |
| 84 | |
| 85 | int device_cap_flags; |
| 86 | u8 cq_overflow_detect; |
| 87 | u8 srq_supported; |
| 88 | |
| 89 | u32 wqe_size; |
| 90 | u32 rqe_size; |
| 91 | u32 ird_page_size; |
| 92 | u8 local_ca_ack_delay; |
| 93 | u8 ird; |
| 94 | u8 num_ird_pages; |
| 95 | }; |
| 96 | |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 97 | struct ocrdma_dma_mem { |
| 98 | void *va; |
| 99 | dma_addr_t pa; |
| 100 | u32 size; |
| 101 | }; |
| 102 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 103 | struct ocrdma_pbl { |
| 104 | void *va; |
| 105 | dma_addr_t pa; |
| 106 | }; |
| 107 | |
| 108 | struct ocrdma_queue_info { |
| 109 | void *va; |
| 110 | dma_addr_t dma; |
| 111 | u32 size; |
| 112 | u16 len; |
| 113 | u16 entry_size; /* Size of an element in the queue */ |
| 114 | u16 id; /* qid, where to ring the doorbell. */ |
| 115 | u16 head, tail; |
| 116 | bool created; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 117 | }; |
| 118 | |
| 119 | struct ocrdma_eq { |
| 120 | struct ocrdma_queue_info q; |
| 121 | u32 vector; |
| 122 | int cq_cnt; |
| 123 | struct ocrdma_dev *dev; |
| 124 | char irq_name[32]; |
| 125 | }; |
| 126 | |
| 127 | struct ocrdma_mq { |
| 128 | struct ocrdma_queue_info sq; |
| 129 | struct ocrdma_queue_info cq; |
| 130 | bool rearm_cq; |
| 131 | }; |
| 132 | |
| 133 | struct mqe_ctx { |
| 134 | struct mutex lock; /* for serializing mailbox commands on MQ */ |
| 135 | wait_queue_head_t cmd_wait; |
| 136 | u32 tag; |
| 137 | u16 cqe_status; |
| 138 | u16 ext_status; |
| 139 | bool cmd_done; |
Mitesh Ahuja | 6dab026 | 2014-06-10 19:32:21 +0530 | [diff] [blame] | 140 | bool fw_error_state; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 141 | }; |
| 142 | |
Naresh Gottumukkala | 1852d1d | 2013-09-06 15:02:47 +0530 | [diff] [blame] | 143 | struct ocrdma_hw_mr { |
| 144 | u32 lkey; |
| 145 | u8 fr_mr; |
| 146 | u8 remote_atomic; |
| 147 | u8 remote_rd; |
| 148 | u8 remote_wr; |
| 149 | u8 local_rd; |
| 150 | u8 local_wr; |
| 151 | u8 mw_bind; |
| 152 | u8 rsvd; |
| 153 | u64 len; |
| 154 | struct ocrdma_pbl *pbl_table; |
| 155 | u32 num_pbls; |
| 156 | u32 num_pbes; |
| 157 | u32 pbl_size; |
| 158 | u32 pbe_size; |
| 159 | u64 fbo; |
| 160 | u64 va; |
| 161 | }; |
| 162 | |
| 163 | struct ocrdma_mr { |
| 164 | struct ib_mr ibmr; |
| 165 | struct ib_umem *umem; |
| 166 | struct ocrdma_hw_mr hwmr; |
| 167 | }; |
| 168 | |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 169 | struct ocrdma_stats { |
| 170 | u8 type; |
| 171 | struct ocrdma_dev *dev; |
| 172 | }; |
| 173 | |
| 174 | struct stats_mem { |
| 175 | struct ocrdma_mqe mqe; |
| 176 | void *va; |
| 177 | dma_addr_t pa; |
| 178 | u32 size; |
| 179 | char *debugfs_mem; |
| 180 | }; |
| 181 | |
| 182 | struct phy_info { |
| 183 | u16 auto_speeds_supported; |
| 184 | u16 fixed_speeds_supported; |
| 185 | u16 phy_type; |
| 186 | u16 interface_type; |
| 187 | }; |
| 188 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 189 | struct ocrdma_dev { |
| 190 | struct ib_device ibdev; |
| 191 | struct ocrdma_dev_attr attr; |
| 192 | |
| 193 | struct mutex dev_lock; /* provides syncronise access to device data */ |
| 194 | spinlock_t flush_q_lock ____cacheline_aligned; |
| 195 | |
| 196 | struct ocrdma_cq **cq_tbl; |
| 197 | struct ocrdma_qp **qp_tbl; |
| 198 | |
Naresh Gottumukkala | c88bd03 | 2013-08-26 15:27:41 +0530 | [diff] [blame] | 199 | struct ocrdma_eq *eq_tbl; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 200 | int eq_cnt; |
| 201 | u16 base_eqid; |
| 202 | u16 max_eq; |
| 203 | |
| 204 | union ib_gid *sgid_tbl; |
| 205 | /* provided synchronization to sgid table for |
| 206 | * updating gid entries triggered by notifier. |
| 207 | */ |
| 208 | spinlock_t sgid_lock; |
| 209 | |
| 210 | int gsi_qp_created; |
| 211 | struct ocrdma_cq *gsi_sqcq; |
| 212 | struct ocrdma_cq *gsi_rqcq; |
| 213 | |
| 214 | struct { |
| 215 | struct ocrdma_av *va; |
| 216 | dma_addr_t pa; |
| 217 | u32 size; |
| 218 | u32 num_ah; |
| 219 | /* provide synchronization for av |
| 220 | * entry allocations. |
| 221 | */ |
| 222 | spinlock_t lock; |
| 223 | u32 ahid; |
| 224 | struct ocrdma_pbl pbl; |
| 225 | } av_tbl; |
| 226 | |
| 227 | void *mbx_cmd; |
| 228 | struct ocrdma_mq mq; |
| 229 | struct mqe_ctx mqe_ctx; |
| 230 | |
| 231 | struct be_dev_info nic_info; |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 232 | struct phy_info phy; |
| 233 | char model_number[32]; |
| 234 | u32 hba_port_num; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 235 | |
| 236 | struct list_head entry; |
Sasha Levin | 3e4d60a | 2012-04-28 07:40:01 +0200 | [diff] [blame] | 237 | struct rcu_head rcu; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 238 | int id; |
Selvin Xavier | 4f1df84 | 2014-06-10 19:32:24 +0530 | [diff] [blame^] | 239 | u64 *stag_arr; |
Selvin Xavier | 31dbdd9 | 2014-06-10 19:32:13 +0530 | [diff] [blame] | 240 | u8 sl; /* service level */ |
| 241 | bool pfc_state; |
| 242 | atomic_t update_sl; |
Naresh Gottumukkala | 84b105d | 2013-08-26 15:27:50 +0530 | [diff] [blame] | 243 | u16 pvid; |
Devesh Sharma | 21c3391 | 2014-02-04 11:56:56 +0530 | [diff] [blame] | 244 | u32 asic_id; |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 245 | |
| 246 | ulong last_stats_time; |
| 247 | struct mutex stats_lock; /* provide synch for debugfs operations */ |
| 248 | struct stats_mem stats_mem; |
| 249 | struct ocrdma_stats rsrc_stats; |
| 250 | struct ocrdma_stats rx_stats; |
| 251 | struct ocrdma_stats wqe_stats; |
| 252 | struct ocrdma_stats tx_stats; |
| 253 | struct ocrdma_stats db_err_stats; |
| 254 | struct ocrdma_stats tx_qp_err_stats; |
| 255 | struct ocrdma_stats rx_qp_err_stats; |
| 256 | struct ocrdma_stats tx_dbg_stats; |
| 257 | struct ocrdma_stats rx_dbg_stats; |
| 258 | struct dentry *dir; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 259 | }; |
| 260 | |
| 261 | struct ocrdma_cq { |
| 262 | struct ib_cq ibcq; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 263 | struct ocrdma_cqe *va; |
| 264 | u32 phase; |
| 265 | u32 getp; /* pointer to pending wrs to |
| 266 | * return to stack, wrap arounds |
| 267 | * at max_hw_cqe |
| 268 | */ |
| 269 | u32 max_hw_cqe; |
| 270 | bool phase_change; |
Devesh Sharma | ea61762 | 2014-02-04 11:56:54 +0530 | [diff] [blame] | 271 | bool deferred_arm, deferred_sol; |
| 272 | bool first_arm; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 273 | |
| 274 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization |
| 275 | * to cq polling |
| 276 | */ |
| 277 | /* syncronizes cq completion handler invoked from multiple context */ |
| 278 | spinlock_t comp_handler_lock ____cacheline_aligned; |
| 279 | u16 id; |
| 280 | u16 eqn; |
| 281 | |
| 282 | struct ocrdma_ucontext *ucontext; |
| 283 | dma_addr_t pa; |
| 284 | u32 len; |
Devesh Sharma | ea61762 | 2014-02-04 11:56:54 +0530 | [diff] [blame] | 285 | u32 cqe_cnt; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 286 | |
| 287 | /* head of all qp's sq and rq for which cqes need to be flushed |
| 288 | * by the software. |
| 289 | */ |
| 290 | struct list_head sq_head, rq_head; |
| 291 | }; |
| 292 | |
| 293 | struct ocrdma_pd { |
| 294 | struct ib_pd ibpd; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 295 | struct ocrdma_ucontext *uctx; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 296 | u32 id; |
| 297 | int num_dpp_qp; |
| 298 | u32 dpp_page; |
| 299 | bool dpp_enabled; |
| 300 | }; |
| 301 | |
| 302 | struct ocrdma_ah { |
| 303 | struct ib_ah ibah; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 304 | struct ocrdma_av *av; |
| 305 | u16 sgid_index; |
| 306 | u32 id; |
| 307 | }; |
| 308 | |
| 309 | struct ocrdma_qp_hwq_info { |
| 310 | u8 *va; /* virtual address */ |
| 311 | u32 max_sges; |
| 312 | u32 head, tail; |
| 313 | u32 entry_size; |
| 314 | u32 max_cnt; |
| 315 | u32 max_wqe_idx; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 316 | u16 dbid; /* qid, where to ring the doorbell. */ |
| 317 | u32 len; |
| 318 | dma_addr_t pa; |
| 319 | }; |
| 320 | |
| 321 | struct ocrdma_srq { |
| 322 | struct ib_srq ibsrq; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 323 | u8 __iomem *db; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 324 | struct ocrdma_qp_hwq_info rq; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 325 | u64 *rqe_wr_id_tbl; |
| 326 | u32 *idx_bit_fields; |
| 327 | u32 bit_fields_len; |
Naresh Gottumukkala | 9884bcd | 2013-06-10 04:42:42 +0000 | [diff] [blame] | 328 | |
| 329 | /* provide synchronization to multiple context(s) posting rqe */ |
| 330 | spinlock_t q_lock ____cacheline_aligned; |
| 331 | |
| 332 | struct ocrdma_pd *pd; |
| 333 | u32 id; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 334 | }; |
| 335 | |
| 336 | struct ocrdma_qp { |
| 337 | struct ib_qp ibqp; |
| 338 | struct ocrdma_dev *dev; |
| 339 | |
| 340 | u8 __iomem *sq_db; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 341 | struct ocrdma_qp_hwq_info sq; |
| 342 | struct { |
| 343 | uint64_t wrid; |
| 344 | uint16_t dpp_wqe_idx; |
| 345 | uint16_t dpp_wqe; |
| 346 | uint8_t signaled; |
| 347 | uint8_t rsvd[3]; |
| 348 | } *wqe_wr_id_tbl; |
| 349 | u32 max_inline_data; |
Naresh Gottumukkala | 9884bcd | 2013-06-10 04:42:42 +0000 | [diff] [blame] | 350 | |
| 351 | /* provide synchronization to multiple context(s) posting wqe, rqe */ |
| 352 | spinlock_t q_lock ____cacheline_aligned; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 353 | struct ocrdma_cq *sq_cq; |
| 354 | /* list maintained per CQ to flush SQ errors */ |
| 355 | struct list_head sq_entry; |
| 356 | |
| 357 | u8 __iomem *rq_db; |
| 358 | struct ocrdma_qp_hwq_info rq; |
| 359 | u64 *rqe_wr_id_tbl; |
| 360 | struct ocrdma_cq *rq_cq; |
| 361 | struct ocrdma_srq *srq; |
| 362 | /* list maintained per CQ to flush RQ errors */ |
| 363 | struct list_head rq_entry; |
| 364 | |
| 365 | enum ocrdma_qp_state state; /* QP state */ |
| 366 | int cap_flags; |
| 367 | u32 max_ord, max_ird; |
| 368 | |
| 369 | u32 id; |
| 370 | struct ocrdma_pd *pd; |
| 371 | |
| 372 | enum ib_qp_type qp_type; |
| 373 | |
| 374 | int sgid_idx; |
| 375 | u32 qkey; |
| 376 | bool dpp_enabled; |
| 377 | u8 *ird_q_va; |
Naresh Gottumukkala | 2b51a9b | 2013-08-26 15:27:43 +0530 | [diff] [blame] | 378 | bool signaled; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 379 | }; |
| 380 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 381 | struct ocrdma_ucontext { |
| 382 | struct ib_ucontext ibucontext; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 383 | |
| 384 | struct list_head mm_head; |
| 385 | struct mutex mm_list_lock; /* protects list entries of mm type */ |
Naresh Gottumukkala | cffce99 | 2013-08-26 15:27:44 +0530 | [diff] [blame] | 386 | struct ocrdma_pd *cntxt_pd; |
| 387 | int pd_in_use; |
| 388 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 389 | struct { |
| 390 | u32 *va; |
| 391 | dma_addr_t pa; |
| 392 | u32 len; |
| 393 | } ah_tbl; |
| 394 | }; |
| 395 | |
| 396 | struct ocrdma_mm { |
| 397 | struct { |
| 398 | u64 phy_addr; |
| 399 | unsigned long len; |
| 400 | } key; |
| 401 | struct list_head entry; |
| 402 | }; |
| 403 | |
| 404 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) |
| 405 | { |
| 406 | return container_of(ibdev, struct ocrdma_dev, ibdev); |
| 407 | } |
| 408 | |
| 409 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext |
| 410 | *ibucontext) |
| 411 | { |
| 412 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); |
| 413 | } |
| 414 | |
| 415 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) |
| 416 | { |
| 417 | return container_of(ibpd, struct ocrdma_pd, ibpd); |
| 418 | } |
| 419 | |
| 420 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) |
| 421 | { |
| 422 | return container_of(ibcq, struct ocrdma_cq, ibcq); |
| 423 | } |
| 424 | |
| 425 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) |
| 426 | { |
| 427 | return container_of(ibqp, struct ocrdma_qp, ibqp); |
| 428 | } |
| 429 | |
| 430 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) |
| 431 | { |
| 432 | return container_of(ibmr, struct ocrdma_mr, ibmr); |
| 433 | } |
| 434 | |
| 435 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) |
| 436 | { |
| 437 | return container_of(ibah, struct ocrdma_ah, ibah); |
| 438 | } |
| 439 | |
| 440 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) |
| 441 | { |
| 442 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); |
| 443 | } |
| 444 | |
Naresh Gottumukkala | df176ea | 2013-06-10 04:42:41 +0000 | [diff] [blame] | 445 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) |
| 446 | { |
| 447 | int cqe_valid; |
| 448 | cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; |
Naresh Gottumukkala | f99b164 | 2013-08-07 12:52:32 +0530 | [diff] [blame] | 449 | return (cqe_valid == cq->phase); |
Naresh Gottumukkala | df176ea | 2013-06-10 04:42:41 +0000 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) |
| 453 | { |
| 454 | return (le32_to_cpu(cqe->flags_status_srcqpn) & |
| 455 | OCRDMA_CQE_QTYPE) ? 0 : 1; |
| 456 | } |
| 457 | |
| 458 | static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) |
| 459 | { |
| 460 | return (le32_to_cpu(cqe->flags_status_srcqpn) & |
| 461 | OCRDMA_CQE_INVALIDATE) ? 1 : 0; |
| 462 | } |
| 463 | |
| 464 | static inline int is_cqe_imm(struct ocrdma_cqe *cqe) |
| 465 | { |
| 466 | return (le32_to_cpu(cqe->flags_status_srcqpn) & |
| 467 | OCRDMA_CQE_IMM) ? 1 : 0; |
| 468 | } |
| 469 | |
| 470 | static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) |
| 471 | { |
| 472 | return (le32_to_cpu(cqe->flags_status_srcqpn) & |
| 473 | OCRDMA_CQE_WRITE_IMM) ? 1 : 0; |
| 474 | } |
| 475 | |
Moni Shoua | 40aca6f | 2013-12-12 18:03:15 +0200 | [diff] [blame] | 476 | static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, |
| 477 | struct ib_ah_attr *ah_attr, u8 *mac_addr) |
| 478 | { |
| 479 | struct in6_addr in6; |
| 480 | |
| 481 | memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); |
| 482 | if (rdma_is_multicast_addr(&in6)) |
| 483 | rdma_get_mcast_mac(&in6, mac_addr); |
| 484 | else |
| 485 | memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); |
| 486 | return 0; |
| 487 | } |
Naresh Gottumukkala | df176ea | 2013-06-10 04:42:41 +0000 | [diff] [blame] | 488 | |
Selvin Xavier | a51f06e | 2014-02-04 11:57:07 +0530 | [diff] [blame] | 489 | static inline char *hca_name(struct ocrdma_dev *dev) |
| 490 | { |
| 491 | switch (dev->nic_info.pdev->device) { |
| 492 | case OC_SKH_DEVICE_PF: |
| 493 | case OC_SKH_DEVICE_VF: |
| 494 | return OC_NAME_SH; |
| 495 | default: |
| 496 | return OC_NAME_UNKNOWN; |
| 497 | } |
| 498 | } |
| 499 | |
Devesh Sharma | ea61762 | 2014-02-04 11:56:54 +0530 | [diff] [blame] | 500 | static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, |
| 501 | int eqid) |
| 502 | { |
| 503 | int indx; |
| 504 | |
| 505 | for (indx = 0; indx < dev->eq_cnt; indx++) { |
| 506 | if (dev->eq_tbl[indx].q.id == eqid) |
| 507 | return indx; |
| 508 | } |
| 509 | |
| 510 | return -EINVAL; |
| 511 | } |
| 512 | |
Devesh Sharma | 21c3391 | 2014-02-04 11:56:56 +0530 | [diff] [blame] | 513 | static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) |
| 514 | { |
| 515 | if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { |
| 516 | pci_read_config_dword( |
| 517 | dev->nic_info.pdev, |
| 518 | OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); |
| 519 | } |
| 520 | |
| 521 | return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> |
| 522 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; |
| 523 | } |
| 524 | |
Selvin Xavier | 31dbdd9 | 2014-06-10 19:32:13 +0530 | [diff] [blame] | 525 | static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio) |
| 526 | { |
| 527 | return *(pfc + prio); |
| 528 | } |
| 529 | |
| 530 | static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio) |
| 531 | { |
| 532 | return *(app_prio + prio); |
| 533 | } |
| 534 | |
| 535 | static inline u8 ocrdma_is_enabled_and_synced(u32 state) |
| 536 | { /* May also be used to interpret TC-state, QCN-state |
| 537 | * Appl-state and Logical-link-state in future. |
| 538 | */ |
| 539 | return (state & OCRDMA_STATE_FLAG_ENABLED) && |
| 540 | (state & OCRDMA_STATE_FLAG_SYNC); |
| 541 | } |
| 542 | |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 543 | #endif |