Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1 | /* QLogic qed NIC Driver |
| 2 | * Copyright (c) 2015 QLogic Corporation |
| 3 | * |
| 4 | * This software is available under the terms of the GNU General Public License |
| 5 | * (GPL) Version 2, available from the file COPYING in the main directory of |
| 6 | * this source tree. |
| 7 | */ |
| 8 | |
| 9 | #ifndef _QED_H |
| 10 | #define _QED_H |
| 11 | |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/delay.h> |
| 15 | #include <linux/firmware.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/string.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | #include <linux/zlib.h> |
| 24 | #include <linux/hashtable.h> |
| 25 | #include <linux/qed/qed_if.h> |
| 26 | #include "qed_hsi.h" |
| 27 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 28 | extern const struct qed_common_ops qed_common_ops_pass; |
Yuval Mintz | 7c2d7d7 | 2016-04-10 12:43:02 +0300 | [diff] [blame] | 29 | #define DRV_MODULE_VERSION "8.7.1.20" |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 30 | |
| 31 | #define MAX_HWFNS_PER_DEVICE (4) |
| 32 | #define NAME_SIZE 16 |
| 33 | #define VER_SIZE 16 |
| 34 | |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 35 | #define QED_WFQ_UNIT 100 |
| 36 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 37 | /* cau states */ |
| 38 | enum qed_coalescing_mode { |
| 39 | QED_COAL_MODE_DISABLE, |
| 40 | QED_COAL_MODE_ENABLE |
| 41 | }; |
| 42 | |
| 43 | struct qed_eth_cb_ops; |
| 44 | struct qed_dev_info; |
| 45 | |
| 46 | /* helpers */ |
| 47 | static inline u32 qed_db_addr(u32 cid, u32 DEMS) |
| 48 | { |
| 49 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | |
| 50 | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); |
| 51 | |
| 52 | return db_addr; |
| 53 | } |
| 54 | |
| 55 | #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ |
| 56 | ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ |
| 57 | ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) |
| 58 | |
| 59 | #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) |
| 60 | |
| 61 | #define D_TRINE(val, cond1, cond2, true1, true2, def) \ |
| 62 | (val == (cond1) ? true1 : \ |
| 63 | (val == (cond2) ? true2 : def)) |
| 64 | |
| 65 | /* forward */ |
| 66 | struct qed_ptt_pool; |
| 67 | struct qed_spq; |
| 68 | struct qed_sb_info; |
| 69 | struct qed_sb_attn_info; |
| 70 | struct qed_cxt_mngr; |
| 71 | struct qed_sb_sp_info; |
| 72 | struct qed_mcp_info; |
| 73 | |
| 74 | struct qed_rt_data { |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 75 | u32 *init_val; |
| 76 | bool *b_valid; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 77 | }; |
| 78 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 79 | enum qed_tunn_mode { |
| 80 | QED_MODE_L2GENEVE_TUNN, |
| 81 | QED_MODE_IPGENEVE_TUNN, |
| 82 | QED_MODE_L2GRE_TUNN, |
| 83 | QED_MODE_IPGRE_TUNN, |
| 84 | QED_MODE_VXLAN_TUNN, |
| 85 | }; |
| 86 | |
| 87 | enum qed_tunn_clss { |
| 88 | QED_TUNN_CLSS_MAC_VLAN, |
| 89 | QED_TUNN_CLSS_MAC_VNI, |
| 90 | QED_TUNN_CLSS_INNER_MAC_VLAN, |
| 91 | QED_TUNN_CLSS_INNER_MAC_VNI, |
| 92 | MAX_QED_TUNN_CLSS, |
| 93 | }; |
| 94 | |
| 95 | struct qed_tunn_start_params { |
| 96 | unsigned long tunn_mode; |
| 97 | u16 vxlan_udp_port; |
| 98 | u16 geneve_udp_port; |
| 99 | u8 update_vxlan_udp_port; |
| 100 | u8 update_geneve_udp_port; |
| 101 | u8 tunn_clss_vxlan; |
| 102 | u8 tunn_clss_l2geneve; |
| 103 | u8 tunn_clss_ipgeneve; |
| 104 | u8 tunn_clss_l2gre; |
| 105 | u8 tunn_clss_ipgre; |
| 106 | }; |
| 107 | |
| 108 | struct qed_tunn_update_params { |
| 109 | unsigned long tunn_mode_update_mask; |
| 110 | unsigned long tunn_mode; |
| 111 | u16 vxlan_udp_port; |
| 112 | u16 geneve_udp_port; |
| 113 | u8 update_rx_pf_clss; |
| 114 | u8 update_tx_pf_clss; |
| 115 | u8 update_vxlan_udp_port; |
| 116 | u8 update_geneve_udp_port; |
| 117 | u8 tunn_clss_vxlan; |
| 118 | u8 tunn_clss_l2geneve; |
| 119 | u8 tunn_clss_ipgeneve; |
| 120 | u8 tunn_clss_l2gre; |
| 121 | u8 tunn_clss_ipgre; |
| 122 | }; |
| 123 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 124 | /* The PCI personality is not quite synonymous to protocol ID: |
| 125 | * 1. All personalities need CORE connections |
| 126 | * 2. The Ethernet personality may support also the RoCE protocol |
| 127 | */ |
| 128 | enum qed_pci_personality { |
| 129 | QED_PCI_ETH, |
Yuval Mintz | c5ac931 | 2016-06-03 14:35:34 +0300 | [diff] [blame^] | 130 | QED_PCI_ISCSI, |
| 131 | QED_PCI_ETH_ROCE, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 132 | QED_PCI_DEFAULT /* default in shmem */ |
| 133 | }; |
| 134 | |
| 135 | /* All VFs are symmetric, all counters are PF + all VFs */ |
| 136 | struct qed_qm_iids { |
| 137 | u32 cids; |
| 138 | u32 vf_cids; |
| 139 | u32 tids; |
| 140 | }; |
| 141 | |
| 142 | enum QED_RESOURCES { |
| 143 | QED_SB, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 144 | QED_L2_QUEUE, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 145 | QED_VPORT, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 146 | QED_RSS_ENG, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 147 | QED_PQ, |
| 148 | QED_RL, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 149 | QED_MAC, |
| 150 | QED_VLAN, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 151 | QED_ILT, |
| 152 | QED_MAX_RESC, |
| 153 | }; |
| 154 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 155 | enum QED_FEATURE { |
| 156 | QED_PF_L2_QUE, |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 157 | QED_VF, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 158 | QED_MAX_FEATURES, |
| 159 | }; |
| 160 | |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 161 | enum QED_PORT_MODE { |
| 162 | QED_PORT_MODE_DE_2X40G, |
| 163 | QED_PORT_MODE_DE_2X50G, |
| 164 | QED_PORT_MODE_DE_1X100G, |
| 165 | QED_PORT_MODE_DE_4X10G_F, |
| 166 | QED_PORT_MODE_DE_4X10G_E, |
| 167 | QED_PORT_MODE_DE_4X20G, |
| 168 | QED_PORT_MODE_DE_1X40G, |
| 169 | QED_PORT_MODE_DE_2X25G, |
| 170 | QED_PORT_MODE_DE_1X25G |
| 171 | }; |
| 172 | |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 173 | enum qed_dev_cap { |
| 174 | QED_DEV_CAP_ETH, |
Yuval Mintz | c5ac931 | 2016-06-03 14:35:34 +0300 | [diff] [blame^] | 175 | QED_DEV_CAP_ISCSI, |
| 176 | QED_DEV_CAP_ROCE, |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 177 | }; |
| 178 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 179 | struct qed_hw_info { |
| 180 | /* PCI personality */ |
| 181 | enum qed_pci_personality personality; |
| 182 | |
| 183 | /* Resource Allocation scheme results */ |
| 184 | u32 resc_start[QED_MAX_RESC]; |
| 185 | u32 resc_num[QED_MAX_RESC]; |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 186 | u32 feat_num[QED_MAX_FEATURES]; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 187 | |
| 188 | #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) |
| 189 | #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) |
| 190 | #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) |
| 191 | |
| 192 | u8 num_tc; |
| 193 | u8 offload_tc; |
| 194 | u8 non_offload_tc; |
| 195 | |
| 196 | u32 concrete_fid; |
| 197 | u16 opaque_fid; |
| 198 | u16 ovlan; |
| 199 | u32 part_num[4]; |
| 200 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 201 | unsigned char hw_mac_addr[ETH_ALEN]; |
| 202 | |
| 203 | struct qed_igu_info *p_igu_info; |
| 204 | |
| 205 | u32 port_mode; |
| 206 | u32 hw_mode; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 207 | unsigned long device_capabilities; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 208 | }; |
| 209 | |
| 210 | struct qed_hw_cid_data { |
| 211 | u32 cid; |
| 212 | bool b_cid_allocated; |
| 213 | |
| 214 | /* Additional identifiers */ |
| 215 | u16 opaque_fid; |
| 216 | u8 vport_id; |
| 217 | }; |
| 218 | |
| 219 | /* maximun size of read/write commands (HW limit) */ |
| 220 | #define DMAE_MAX_RW_SIZE 0x2000 |
| 221 | |
| 222 | struct qed_dmae_info { |
| 223 | /* Mutex for synchronizing access to functions */ |
| 224 | struct mutex mutex; |
| 225 | |
| 226 | u8 channel; |
| 227 | |
| 228 | dma_addr_t completion_word_phys_addr; |
| 229 | |
| 230 | /* The memory location where the DMAE writes the completion |
| 231 | * value when an operation is finished on this context. |
| 232 | */ |
| 233 | u32 *p_completion_word; |
| 234 | |
| 235 | dma_addr_t intermediate_buffer_phys_addr; |
| 236 | |
| 237 | /* An intermediate buffer for DMAE operations that use virtual |
| 238 | * addresses - data is DMA'd to/from this buffer and then |
| 239 | * memcpy'd to/from the virtual address |
| 240 | */ |
| 241 | u32 *p_intermediate_buffer; |
| 242 | |
| 243 | dma_addr_t dmae_cmd_phys_addr; |
| 244 | struct dmae_cmd *p_dmae_cmd; |
| 245 | }; |
| 246 | |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 247 | struct qed_wfq_data { |
| 248 | /* when feature is configured for at least 1 vport */ |
| 249 | u32 min_speed; |
| 250 | bool configured; |
| 251 | }; |
| 252 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 253 | struct qed_qm_info { |
| 254 | struct init_qm_pq_params *qm_pq_params; |
| 255 | struct init_qm_vport_params *qm_vport_params; |
| 256 | struct init_qm_port_params *qm_port_params; |
| 257 | u16 start_pq; |
| 258 | u8 start_vport; |
| 259 | u8 pure_lb_pq; |
| 260 | u8 offload_pq; |
| 261 | u8 pure_ack_pq; |
| 262 | u8 vf_queues_offset; |
| 263 | u16 num_pqs; |
| 264 | u16 num_vf_pqs; |
| 265 | u8 num_vports; |
| 266 | u8 max_phys_tcs_per_port; |
| 267 | bool pf_rl_en; |
| 268 | bool pf_wfq_en; |
| 269 | bool vport_rl_en; |
| 270 | bool vport_wfq_en; |
| 271 | u8 pf_wfq; |
| 272 | u32 pf_rl; |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 273 | struct qed_wfq_data *wfq_data; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 274 | }; |
| 275 | |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 276 | struct storm_stats { |
| 277 | u32 address; |
| 278 | u32 len; |
| 279 | }; |
| 280 | |
| 281 | struct qed_storm_stats { |
| 282 | struct storm_stats mstats; |
| 283 | struct storm_stats pstats; |
| 284 | struct storm_stats tstats; |
| 285 | struct storm_stats ustats; |
| 286 | }; |
| 287 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 288 | struct qed_fw_data { |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 289 | struct fw_ver_info *fw_ver_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 290 | const u8 *modes_tree_buf; |
| 291 | union init_op *init_ops; |
| 292 | const u32 *arr_data; |
| 293 | u32 init_ops_size; |
| 294 | }; |
| 295 | |
| 296 | struct qed_simd_fp_handler { |
| 297 | void *token; |
| 298 | void (*func)(void *); |
| 299 | }; |
| 300 | |
| 301 | struct qed_hwfn { |
| 302 | struct qed_dev *cdev; |
| 303 | u8 my_id; /* ID inside the PF */ |
| 304 | #define IS_LEAD_HWFN(edev) (!((edev)->my_id)) |
| 305 | u8 rel_pf_id; /* Relative to engine*/ |
| 306 | u8 abs_pf_id; |
| 307 | #define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) |
| 308 | u8 port_id; |
| 309 | bool b_active; |
| 310 | |
| 311 | u32 dp_module; |
| 312 | u8 dp_level; |
| 313 | char name[NAME_SIZE]; |
| 314 | |
| 315 | bool first_on_engine; |
| 316 | bool hw_init_done; |
| 317 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 318 | u8 num_funcs_on_engine; |
| 319 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 320 | /* BAR access */ |
| 321 | void __iomem *regview; |
| 322 | void __iomem *doorbells; |
| 323 | u64 db_phys_addr; |
| 324 | unsigned long db_size; |
| 325 | |
| 326 | /* PTT pool */ |
| 327 | struct qed_ptt_pool *p_ptt_pool; |
| 328 | |
| 329 | /* HW info */ |
| 330 | struct qed_hw_info hw_info; |
| 331 | |
| 332 | /* rt_array (for init-tool) */ |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 333 | struct qed_rt_data rt_data; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 334 | |
| 335 | /* SPQ */ |
| 336 | struct qed_spq *p_spq; |
| 337 | |
| 338 | /* EQ */ |
| 339 | struct qed_eq *p_eq; |
| 340 | |
| 341 | /* Consolidate Q*/ |
| 342 | struct qed_consq *p_consq; |
| 343 | |
| 344 | /* Slow-Path definitions */ |
| 345 | struct tasklet_struct *sp_dpc; |
| 346 | bool b_sp_dpc_enabled; |
| 347 | |
| 348 | struct qed_ptt *p_main_ptt; |
| 349 | struct qed_ptt *p_dpc_ptt; |
| 350 | |
| 351 | struct qed_sb_sp_info *p_sp_sb; |
| 352 | struct qed_sb_attn_info *p_sb_attn; |
| 353 | |
| 354 | /* Protocol related */ |
| 355 | struct qed_pf_params pf_params; |
| 356 | |
| 357 | /* Array of sb_info of all status blocks */ |
| 358 | struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; |
| 359 | u16 num_sbs; |
| 360 | |
| 361 | struct qed_cxt_mngr *p_cxt_mngr; |
| 362 | |
| 363 | /* Flag indicating whether interrupts are enabled or not*/ |
| 364 | bool b_int_enabled; |
Sudarsana Kalluru | 8f16bc9 | 2015-12-07 06:25:59 -0500 | [diff] [blame] | 365 | bool b_int_requested; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 366 | |
Sudarsana Reddy Kalluru | fc916ff | 2016-03-09 09:16:23 +0200 | [diff] [blame] | 367 | /* True if the driver requests for the link */ |
| 368 | bool b_drv_link_init; |
| 369 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 370 | struct qed_vf_iov *vf_iov_info; |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 371 | struct qed_pf_iov *pf_iov_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 372 | struct qed_mcp_info *mcp_info; |
| 373 | |
Sudarsana Reddy Kalluru | 39651ab | 2016-05-17 06:44:26 -0400 | [diff] [blame] | 374 | struct qed_dcbx_info *p_dcbx_info; |
| 375 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 376 | struct qed_hw_cid_data *p_tx_cids; |
| 377 | struct qed_hw_cid_data *p_rx_cids; |
| 378 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 379 | struct qed_dmae_info dmae_info; |
| 380 | |
| 381 | /* QM init */ |
| 382 | struct qed_qm_info qm_info; |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 383 | struct qed_storm_stats storm_stats; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 384 | |
| 385 | /* Buffer for unzipping firmware data */ |
| 386 | void *unzip_buf; |
| 387 | |
| 388 | struct qed_simd_fp_handler simd_proto_handler[64]; |
| 389 | |
Yuval Mintz | 37bff2b | 2016-05-11 16:36:13 +0300 | [diff] [blame] | 390 | #ifdef CONFIG_QED_SRIOV |
| 391 | struct workqueue_struct *iov_wq; |
| 392 | struct delayed_work iov_task; |
| 393 | unsigned long iov_task_flags; |
| 394 | #endif |
| 395 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 396 | struct z_stream_s *stream; |
| 397 | }; |
| 398 | |
| 399 | struct pci_params { |
| 400 | int pm_cap; |
| 401 | |
| 402 | unsigned long mem_start; |
| 403 | unsigned long mem_end; |
| 404 | unsigned int irq; |
| 405 | u8 pf_num; |
| 406 | }; |
| 407 | |
| 408 | struct qed_int_param { |
| 409 | u32 int_mode; |
| 410 | u8 num_vectors; |
| 411 | u8 min_msix_cnt; /* for minimal functionality */ |
| 412 | }; |
| 413 | |
| 414 | struct qed_int_params { |
| 415 | struct qed_int_param in; |
| 416 | struct qed_int_param out; |
| 417 | struct msix_entry *msix_table; |
| 418 | bool fp_initialized; |
| 419 | u8 fp_msix_base; |
| 420 | u8 fp_msix_cnt; |
| 421 | }; |
| 422 | |
| 423 | struct qed_dev { |
| 424 | u32 dp_module; |
| 425 | u8 dp_level; |
| 426 | char name[NAME_SIZE]; |
| 427 | |
| 428 | u8 type; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 429 | #define QED_DEV_TYPE_BB (0 << 0) |
| 430 | #define QED_DEV_TYPE_AH BIT(0) |
| 431 | /* Translate type/revision combo into the proper conditions */ |
| 432 | #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) |
| 433 | #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ |
| 434 | CHIP_REV_IS_A0(dev)) |
| 435 | #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ |
| 436 | CHIP_REV_IS_B0(dev)) |
| 437 | |
| 438 | #define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ |
| 439 | QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) |
| 440 | |
| 441 | u16 vendor_id; |
| 442 | u16 device_id; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 443 | |
| 444 | u16 chip_num; |
| 445 | #define CHIP_NUM_MASK 0xffff |
| 446 | #define CHIP_NUM_SHIFT 16 |
| 447 | |
| 448 | u16 chip_rev; |
| 449 | #define CHIP_REV_MASK 0xf |
| 450 | #define CHIP_REV_SHIFT 12 |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 451 | #define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) |
| 452 | #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 453 | |
| 454 | u16 chip_metal; |
| 455 | #define CHIP_METAL_MASK 0xff |
| 456 | #define CHIP_METAL_SHIFT 4 |
| 457 | |
| 458 | u16 chip_bond_id; |
| 459 | #define CHIP_BOND_ID_MASK 0xf |
| 460 | #define CHIP_BOND_ID_SHIFT 0 |
| 461 | |
| 462 | u8 num_engines; |
| 463 | u8 num_ports_in_engines; |
| 464 | u8 num_funcs_in_port; |
| 465 | |
| 466 | u8 path_id; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 467 | enum qed_mf_mode mf_mode; |
| 468 | #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) |
| 469 | #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) |
| 470 | #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 471 | |
| 472 | int pcie_width; |
| 473 | int pcie_speed; |
| 474 | u8 ver_str[VER_SIZE]; |
| 475 | |
| 476 | /* Add MF related configuration */ |
| 477 | u8 mcp_rev; |
| 478 | u8 boot_mode; |
| 479 | |
| 480 | u8 wol; |
| 481 | |
| 482 | u32 int_mode; |
| 483 | enum qed_coalescing_mode int_coalescing_mode; |
| 484 | u8 rx_coalesce_usecs; |
| 485 | u8 tx_coalesce_usecs; |
| 486 | |
| 487 | /* Start Bar offset of first hwfn */ |
| 488 | void __iomem *regview; |
| 489 | void __iomem *doorbells; |
| 490 | u64 db_phys_addr; |
| 491 | unsigned long db_size; |
| 492 | |
| 493 | /* PCI */ |
| 494 | u8 cache_shift; |
| 495 | |
| 496 | /* Init */ |
| 497 | const struct iro *iro_arr; |
| 498 | #define IRO (p_hwfn->cdev->iro_arr) |
| 499 | |
| 500 | /* HW functions */ |
| 501 | u8 num_hwfns; |
| 502 | struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; |
| 503 | |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 504 | /* SRIOV */ |
| 505 | struct qed_hw_sriov_info *p_iov_info; |
| 506 | #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) |
| 507 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 508 | unsigned long tunn_mode; |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 509 | |
| 510 | bool b_is_vf; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 511 | u32 drv_type; |
| 512 | |
| 513 | struct qed_eth_stats *reset_stats; |
| 514 | struct qed_fw_data *fw_data; |
| 515 | |
| 516 | u32 mcp_nvm_resp; |
| 517 | |
| 518 | /* Linux specific here */ |
| 519 | struct qede_dev *edev; |
| 520 | struct pci_dev *pdev; |
| 521 | int msg_enable; |
| 522 | |
| 523 | struct pci_params pci_params; |
| 524 | |
| 525 | struct qed_int_params int_params; |
| 526 | |
| 527 | u8 protocol; |
| 528 | #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) |
| 529 | |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 530 | /* Callbacks to protocol driver */ |
| 531 | union { |
| 532 | struct qed_common_cb_ops *common; |
| 533 | struct qed_eth_cb_ops *eth; |
| 534 | } protocol_ops; |
| 535 | void *ops_cookie; |
| 536 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 537 | const struct firmware *firmware; |
| 538 | }; |
| 539 | |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 540 | #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 541 | #define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 542 | #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB |
| 543 | #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB |
| 544 | |
| 545 | /** |
| 546 | * @brief qed_concrete_to_sw_fid - get the sw function id from |
| 547 | * the concrete value. |
| 548 | * |
| 549 | * @param concrete_fid |
| 550 | * |
| 551 | * @return inline u8 |
| 552 | */ |
| 553 | static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, |
| 554 | u32 concrete_fid) |
| 555 | { |
| 556 | u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); |
| 557 | |
| 558 | return pfid; |
| 559 | } |
| 560 | |
| 561 | #define PURE_LB_TC 8 |
| 562 | |
Yuval Mintz | 733def6 | 2016-05-11 16:36:22 +0300 | [diff] [blame] | 563 | int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 564 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); |
| 565 | |
Yuval Mintz | 733def6 | 2016-05-11 16:36:22 +0300 | [diff] [blame] | 566 | void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 567 | #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) |
| 568 | |
| 569 | /* Other Linux specific common definitions */ |
| 570 | #define DP_NAME(cdev) ((cdev)->name) |
| 571 | |
| 572 | #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ |
| 573 | (cdev->regview) + \ |
| 574 | (offset)) |
| 575 | |
| 576 | #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) |
| 577 | #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) |
| 578 | #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset)) |
| 579 | |
| 580 | #define DOORBELL(cdev, db_addr, val) \ |
| 581 | writel((u32)val, (void __iomem *)((u8 __iomem *)\ |
| 582 | (cdev->doorbells) + (db_addr))) |
| 583 | |
| 584 | /* Prototypes */ |
| 585 | int qed_fill_dev_info(struct qed_dev *cdev, |
| 586 | struct qed_dev_info *dev_info); |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 587 | void qed_link_update(struct qed_hwfn *hwfn); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 588 | u32 qed_unzip_data(struct qed_hwfn *p_hwfn, |
| 589 | u32 input_len, u8 *input_buf, |
| 590 | u32 max_size, u8 *unzip_buf); |
| 591 | |
Sudarsana Kalluru | 8f16bc9 | 2015-12-07 06:25:59 -0500 | [diff] [blame] | 592 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn); |
| 593 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 594 | #endif /* _QED_H */ |