Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1 | /* QLogic qed NIC Driver |
| 2 | * Copyright (c) 2015 QLogic Corporation |
| 3 | * |
| 4 | * This software is available under the terms of the GNU General Public License |
| 5 | * (GPL) Version 2, available from the file COPYING in the main directory of |
| 6 | * this source tree. |
| 7 | */ |
| 8 | |
| 9 | #ifndef _QED_H |
| 10 | #define _QED_H |
| 11 | |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/delay.h> |
| 15 | #include <linux/firmware.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/string.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | #include <linux/zlib.h> |
| 24 | #include <linux/hashtable.h> |
| 25 | #include <linux/qed/qed_if.h> |
Tomer Tayar | c965db4 | 2016-09-07 16:36:24 +0300 | [diff] [blame] | 26 | #include "qed_debug.h" |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 27 | #include "qed_hsi.h" |
| 28 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 29 | extern const struct qed_common_ops qed_common_ops_pass; |
Yuval Mintz | 05fafbf | 2016-08-19 09:33:31 +0300 | [diff] [blame] | 30 | #define DRV_MODULE_VERSION "8.10.9.20" |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 31 | |
| 32 | #define MAX_HWFNS_PER_DEVICE (4) |
| 33 | #define NAME_SIZE 16 |
| 34 | #define VER_SIZE 16 |
| 35 | |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 36 | #define QED_WFQ_UNIT 100 |
| 37 | |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 38 | #define QED_WID_SIZE (1024) |
| 39 | #define QED_PF_DEMS_SIZE (4) |
| 40 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 41 | /* cau states */ |
| 42 | enum qed_coalescing_mode { |
| 43 | QED_COAL_MODE_DISABLE, |
| 44 | QED_COAL_MODE_ENABLE |
| 45 | }; |
| 46 | |
| 47 | struct qed_eth_cb_ops; |
| 48 | struct qed_dev_info; |
Sudarsana Reddy Kalluru | 6c75424 | 2016-08-16 10:51:03 -0400 | [diff] [blame] | 49 | union qed_mcp_protocol_stats; |
| 50 | enum qed_mcp_protocol_type; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 51 | |
| 52 | /* helpers */ |
| 53 | static inline u32 qed_db_addr(u32 cid, u32 DEMS) |
| 54 | { |
| 55 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 56 | (cid * QED_PF_DEMS_SIZE); |
| 57 | |
| 58 | return db_addr; |
| 59 | } |
| 60 | |
| 61 | static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) |
| 62 | { |
| 63 | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 64 | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); |
| 65 | |
| 66 | return db_addr; |
| 67 | } |
| 68 | |
| 69 | #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ |
| 70 | ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ |
| 71 | ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) |
| 72 | |
| 73 | #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) |
| 74 | |
| 75 | #define D_TRINE(val, cond1, cond2, true1, true2, def) \ |
| 76 | (val == (cond1) ? true1 : \ |
| 77 | (val == (cond2) ? true2 : def)) |
| 78 | |
| 79 | /* forward */ |
| 80 | struct qed_ptt_pool; |
| 81 | struct qed_spq; |
| 82 | struct qed_sb_info; |
| 83 | struct qed_sb_attn_info; |
| 84 | struct qed_cxt_mngr; |
| 85 | struct qed_sb_sp_info; |
Yuval Mintz | 0a7fb11 | 2016-10-01 21:59:55 +0300 | [diff] [blame] | 86 | struct qed_ll2_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 87 | struct qed_mcp_info; |
| 88 | |
| 89 | struct qed_rt_data { |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 90 | u32 *init_val; |
| 91 | bool *b_valid; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 92 | }; |
| 93 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 94 | enum qed_tunn_mode { |
| 95 | QED_MODE_L2GENEVE_TUNN, |
| 96 | QED_MODE_IPGENEVE_TUNN, |
| 97 | QED_MODE_L2GRE_TUNN, |
| 98 | QED_MODE_IPGRE_TUNN, |
| 99 | QED_MODE_VXLAN_TUNN, |
| 100 | }; |
| 101 | |
| 102 | enum qed_tunn_clss { |
| 103 | QED_TUNN_CLSS_MAC_VLAN, |
| 104 | QED_TUNN_CLSS_MAC_VNI, |
| 105 | QED_TUNN_CLSS_INNER_MAC_VLAN, |
| 106 | QED_TUNN_CLSS_INNER_MAC_VNI, |
| 107 | MAX_QED_TUNN_CLSS, |
| 108 | }; |
| 109 | |
| 110 | struct qed_tunn_start_params { |
| 111 | unsigned long tunn_mode; |
| 112 | u16 vxlan_udp_port; |
| 113 | u16 geneve_udp_port; |
| 114 | u8 update_vxlan_udp_port; |
| 115 | u8 update_geneve_udp_port; |
| 116 | u8 tunn_clss_vxlan; |
| 117 | u8 tunn_clss_l2geneve; |
| 118 | u8 tunn_clss_ipgeneve; |
| 119 | u8 tunn_clss_l2gre; |
| 120 | u8 tunn_clss_ipgre; |
| 121 | }; |
| 122 | |
| 123 | struct qed_tunn_update_params { |
| 124 | unsigned long tunn_mode_update_mask; |
| 125 | unsigned long tunn_mode; |
| 126 | u16 vxlan_udp_port; |
| 127 | u16 geneve_udp_port; |
| 128 | u8 update_rx_pf_clss; |
| 129 | u8 update_tx_pf_clss; |
| 130 | u8 update_vxlan_udp_port; |
| 131 | u8 update_geneve_udp_port; |
| 132 | u8 tunn_clss_vxlan; |
| 133 | u8 tunn_clss_l2geneve; |
| 134 | u8 tunn_clss_ipgeneve; |
| 135 | u8 tunn_clss_l2gre; |
| 136 | u8 tunn_clss_ipgre; |
| 137 | }; |
| 138 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 139 | /* The PCI personality is not quite synonymous to protocol ID: |
| 140 | * 1. All personalities need CORE connections |
| 141 | * 2. The Ethernet personality may support also the RoCE protocol |
| 142 | */ |
| 143 | enum qed_pci_personality { |
| 144 | QED_PCI_ETH, |
Yuval Mintz | c5ac931 | 2016-06-03 14:35:34 +0300 | [diff] [blame] | 145 | QED_PCI_ISCSI, |
| 146 | QED_PCI_ETH_ROCE, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 147 | QED_PCI_DEFAULT /* default in shmem */ |
| 148 | }; |
| 149 | |
| 150 | /* All VFs are symmetric, all counters are PF + all VFs */ |
| 151 | struct qed_qm_iids { |
| 152 | u32 cids; |
| 153 | u32 vf_cids; |
| 154 | u32 tids; |
| 155 | }; |
| 156 | |
Tomer Tayar | 2edbff8 | 2016-10-31 07:14:27 +0200 | [diff] [blame^] | 157 | /* HW / FW resources, output of features supported below, most information |
| 158 | * is received from MFW. |
| 159 | */ |
| 160 | enum qed_resources { |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 161 | QED_SB, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 162 | QED_L2_QUEUE, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 163 | QED_VPORT, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 164 | QED_RSS_ENG, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 165 | QED_PQ, |
| 166 | QED_RL, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 167 | QED_MAC, |
| 168 | QED_VLAN, |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 169 | QED_RDMA_CNQ_RAM, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 170 | QED_ILT, |
Yuval Mintz | 0a7fb11 | 2016-10-01 21:59:55 +0300 | [diff] [blame] | 171 | QED_LL2_QUEUE, |
Tomer Tayar | 2edbff8 | 2016-10-31 07:14:27 +0200 | [diff] [blame^] | 172 | QED_CMDQS_CQS, |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 173 | QED_RDMA_STATS_QUEUE, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 174 | QED_MAX_RESC, |
| 175 | }; |
| 176 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 177 | enum QED_FEATURE { |
| 178 | QED_PF_L2_QUE, |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 179 | QED_VF, |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 180 | QED_RDMA_CNQ, |
Mintz, Yuval | 5a1f965 | 2016-10-31 07:14:26 +0200 | [diff] [blame] | 181 | QED_VF_L2_QUE, |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 182 | QED_MAX_FEATURES, |
| 183 | }; |
| 184 | |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 185 | enum QED_PORT_MODE { |
| 186 | QED_PORT_MODE_DE_2X40G, |
| 187 | QED_PORT_MODE_DE_2X50G, |
| 188 | QED_PORT_MODE_DE_1X100G, |
| 189 | QED_PORT_MODE_DE_4X10G_F, |
| 190 | QED_PORT_MODE_DE_4X10G_E, |
| 191 | QED_PORT_MODE_DE_4X20G, |
| 192 | QED_PORT_MODE_DE_1X40G, |
| 193 | QED_PORT_MODE_DE_2X25G, |
| 194 | QED_PORT_MODE_DE_1X25G |
| 195 | }; |
| 196 | |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 197 | enum qed_dev_cap { |
| 198 | QED_DEV_CAP_ETH, |
Yuval Mintz | c5ac931 | 2016-06-03 14:35:34 +0300 | [diff] [blame] | 199 | QED_DEV_CAP_ISCSI, |
| 200 | QED_DEV_CAP_ROCE, |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 201 | }; |
| 202 | |
Mintz, Yuval | 14d3964 | 2016-10-31 07:14:23 +0200 | [diff] [blame] | 203 | enum qed_wol_support { |
| 204 | QED_WOL_SUPPORT_NONE, |
| 205 | QED_WOL_SUPPORT_PME, |
| 206 | }; |
| 207 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 208 | struct qed_hw_info { |
| 209 | /* PCI personality */ |
| 210 | enum qed_pci_personality personality; |
| 211 | |
| 212 | /* Resource Allocation scheme results */ |
| 213 | u32 resc_start[QED_MAX_RESC]; |
| 214 | u32 resc_num[QED_MAX_RESC]; |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 215 | u32 feat_num[QED_MAX_FEATURES]; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 216 | |
| 217 | #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) |
| 218 | #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 219 | #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ |
| 220 | RESC_NUM(_p_hwfn, resc)) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 221 | #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) |
| 222 | |
| 223 | u8 num_tc; |
| 224 | u8 offload_tc; |
| 225 | u8 non_offload_tc; |
| 226 | |
| 227 | u32 concrete_fid; |
| 228 | u16 opaque_fid; |
| 229 | u16 ovlan; |
| 230 | u32 part_num[4]; |
| 231 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 232 | unsigned char hw_mac_addr[ETH_ALEN]; |
| 233 | |
| 234 | struct qed_igu_info *p_igu_info; |
| 235 | |
| 236 | u32 port_mode; |
| 237 | u32 hw_mode; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 238 | unsigned long device_capabilities; |
Sudarsana Kalluru | 0fefbfb | 2016-10-31 07:14:21 +0200 | [diff] [blame] | 239 | u16 mtu; |
Mintz, Yuval | 14d3964 | 2016-10-31 07:14:23 +0200 | [diff] [blame] | 240 | |
| 241 | enum qed_wol_support b_wol_support; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 242 | }; |
| 243 | |
| 244 | struct qed_hw_cid_data { |
| 245 | u32 cid; |
| 246 | bool b_cid_allocated; |
| 247 | |
| 248 | /* Additional identifiers */ |
| 249 | u16 opaque_fid; |
| 250 | u8 vport_id; |
| 251 | }; |
| 252 | |
| 253 | /* maximun size of read/write commands (HW limit) */ |
| 254 | #define DMAE_MAX_RW_SIZE 0x2000 |
| 255 | |
| 256 | struct qed_dmae_info { |
| 257 | /* Mutex for synchronizing access to functions */ |
| 258 | struct mutex mutex; |
| 259 | |
| 260 | u8 channel; |
| 261 | |
| 262 | dma_addr_t completion_word_phys_addr; |
| 263 | |
| 264 | /* The memory location where the DMAE writes the completion |
| 265 | * value when an operation is finished on this context. |
| 266 | */ |
| 267 | u32 *p_completion_word; |
| 268 | |
| 269 | dma_addr_t intermediate_buffer_phys_addr; |
| 270 | |
| 271 | /* An intermediate buffer for DMAE operations that use virtual |
| 272 | * addresses - data is DMA'd to/from this buffer and then |
| 273 | * memcpy'd to/from the virtual address |
| 274 | */ |
| 275 | u32 *p_intermediate_buffer; |
| 276 | |
| 277 | dma_addr_t dmae_cmd_phys_addr; |
| 278 | struct dmae_cmd *p_dmae_cmd; |
| 279 | }; |
| 280 | |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 281 | struct qed_wfq_data { |
| 282 | /* when feature is configured for at least 1 vport */ |
| 283 | u32 min_speed; |
| 284 | bool configured; |
| 285 | }; |
| 286 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 287 | struct qed_qm_info { |
| 288 | struct init_qm_pq_params *qm_pq_params; |
| 289 | struct init_qm_vport_params *qm_vport_params; |
| 290 | struct init_qm_port_params *qm_port_params; |
| 291 | u16 start_pq; |
| 292 | u8 start_vport; |
| 293 | u8 pure_lb_pq; |
| 294 | u8 offload_pq; |
| 295 | u8 pure_ack_pq; |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 296 | u8 ooo_pq; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 297 | u8 vf_queues_offset; |
| 298 | u16 num_pqs; |
| 299 | u16 num_vf_pqs; |
| 300 | u8 num_vports; |
| 301 | u8 max_phys_tcs_per_port; |
| 302 | bool pf_rl_en; |
| 303 | bool pf_wfq_en; |
| 304 | bool vport_rl_en; |
| 305 | bool vport_wfq_en; |
| 306 | u8 pf_wfq; |
| 307 | u32 pf_rl; |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 308 | struct qed_wfq_data *wfq_data; |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 309 | u8 num_pf_rls; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 310 | }; |
| 311 | |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 312 | struct storm_stats { |
| 313 | u32 address; |
| 314 | u32 len; |
| 315 | }; |
| 316 | |
| 317 | struct qed_storm_stats { |
| 318 | struct storm_stats mstats; |
| 319 | struct storm_stats pstats; |
| 320 | struct storm_stats tstats; |
| 321 | struct storm_stats ustats; |
| 322 | }; |
| 323 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 324 | struct qed_fw_data { |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 325 | struct fw_ver_info *fw_ver_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 326 | const u8 *modes_tree_buf; |
| 327 | union init_op *init_ops; |
| 328 | const u32 *arr_data; |
| 329 | u32 init_ops_size; |
| 330 | }; |
| 331 | |
| 332 | struct qed_simd_fp_handler { |
| 333 | void *token; |
| 334 | void (*func)(void *); |
| 335 | }; |
| 336 | |
| 337 | struct qed_hwfn { |
| 338 | struct qed_dev *cdev; |
| 339 | u8 my_id; /* ID inside the PF */ |
| 340 | #define IS_LEAD_HWFN(edev) (!((edev)->my_id)) |
| 341 | u8 rel_pf_id; /* Relative to engine*/ |
| 342 | u8 abs_pf_id; |
| 343 | #define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) |
| 344 | u8 port_id; |
| 345 | bool b_active; |
| 346 | |
| 347 | u32 dp_module; |
| 348 | u8 dp_level; |
| 349 | char name[NAME_SIZE]; |
| 350 | |
| 351 | bool first_on_engine; |
| 352 | bool hw_init_done; |
| 353 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 354 | u8 num_funcs_on_engine; |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 355 | u8 enabled_func_idx; |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 356 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 357 | /* BAR access */ |
| 358 | void __iomem *regview; |
| 359 | void __iomem *doorbells; |
| 360 | u64 db_phys_addr; |
| 361 | unsigned long db_size; |
| 362 | |
| 363 | /* PTT pool */ |
| 364 | struct qed_ptt_pool *p_ptt_pool; |
| 365 | |
| 366 | /* HW info */ |
| 367 | struct qed_hw_info hw_info; |
| 368 | |
| 369 | /* rt_array (for init-tool) */ |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 370 | struct qed_rt_data rt_data; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 371 | |
| 372 | /* SPQ */ |
| 373 | struct qed_spq *p_spq; |
| 374 | |
| 375 | /* EQ */ |
| 376 | struct qed_eq *p_eq; |
| 377 | |
| 378 | /* Consolidate Q*/ |
| 379 | struct qed_consq *p_consq; |
| 380 | |
| 381 | /* Slow-Path definitions */ |
| 382 | struct tasklet_struct *sp_dpc; |
| 383 | bool b_sp_dpc_enabled; |
| 384 | |
| 385 | struct qed_ptt *p_main_ptt; |
| 386 | struct qed_ptt *p_dpc_ptt; |
| 387 | |
| 388 | struct qed_sb_sp_info *p_sp_sb; |
| 389 | struct qed_sb_attn_info *p_sb_attn; |
| 390 | |
| 391 | /* Protocol related */ |
Yuval Mintz | 0a7fb11 | 2016-10-01 21:59:55 +0300 | [diff] [blame] | 392 | bool using_ll2; |
| 393 | struct qed_ll2_info *p_ll2_info; |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 394 | struct qed_rdma_info *p_rdma_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 395 | struct qed_pf_params pf_params; |
| 396 | |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 397 | bool b_rdma_enabled_in_prs; |
| 398 | u32 rdma_prs_search_reg; |
| 399 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 400 | /* Array of sb_info of all status blocks */ |
| 401 | struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; |
| 402 | u16 num_sbs; |
| 403 | |
| 404 | struct qed_cxt_mngr *p_cxt_mngr; |
| 405 | |
| 406 | /* Flag indicating whether interrupts are enabled or not*/ |
| 407 | bool b_int_enabled; |
Sudarsana Kalluru | 8f16bc9 | 2015-12-07 06:25:59 -0500 | [diff] [blame] | 408 | bool b_int_requested; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 409 | |
Sudarsana Reddy Kalluru | fc916ff | 2016-03-09 09:16:23 +0200 | [diff] [blame] | 410 | /* True if the driver requests for the link */ |
| 411 | bool b_drv_link_init; |
| 412 | |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 413 | struct qed_vf_iov *vf_iov_info; |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 414 | struct qed_pf_iov *pf_iov_info; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 415 | struct qed_mcp_info *mcp_info; |
| 416 | |
Sudarsana Reddy Kalluru | 39651ab | 2016-05-17 06:44:26 -0400 | [diff] [blame] | 417 | struct qed_dcbx_info *p_dcbx_info; |
| 418 | |
Yuval Mintz | 25c089d | 2015-10-26 11:02:26 +0200 | [diff] [blame] | 419 | struct qed_hw_cid_data *p_tx_cids; |
| 420 | struct qed_hw_cid_data *p_rx_cids; |
| 421 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 422 | struct qed_dmae_info dmae_info; |
| 423 | |
| 424 | /* QM init */ |
| 425 | struct qed_qm_info qm_info; |
Manish Chopra | 9df2ed0 | 2015-10-26 11:02:33 +0200 | [diff] [blame] | 426 | struct qed_storm_stats storm_stats; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 427 | |
| 428 | /* Buffer for unzipping firmware data */ |
| 429 | void *unzip_buf; |
| 430 | |
Tomer Tayar | c965db4 | 2016-09-07 16:36:24 +0300 | [diff] [blame] | 431 | struct dbg_tools_data dbg_info; |
| 432 | |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 433 | /* PWM region specific data */ |
| 434 | u32 dpi_size; |
| 435 | u32 dpi_count; |
| 436 | |
| 437 | /* This is used to calculate the doorbell address */ |
| 438 | u32 dpi_start_offset; |
| 439 | |
| 440 | /* If one of the following is set then EDPM shouldn't be used */ |
| 441 | u8 dcbx_no_edpm; |
| 442 | u8 db_bar_no_edpm; |
| 443 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 444 | struct qed_simd_fp_handler simd_proto_handler[64]; |
| 445 | |
Yuval Mintz | 37bff2b | 2016-05-11 16:36:13 +0300 | [diff] [blame] | 446 | #ifdef CONFIG_QED_SRIOV |
| 447 | struct workqueue_struct *iov_wq; |
| 448 | struct delayed_work iov_task; |
| 449 | unsigned long iov_task_flags; |
| 450 | #endif |
| 451 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 452 | struct z_stream_s *stream; |
Ram Amrani | abd4967 | 2016-10-01 22:00:01 +0300 | [diff] [blame] | 453 | struct qed_roce_ll2_info *ll2; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 454 | }; |
| 455 | |
| 456 | struct pci_params { |
| 457 | int pm_cap; |
| 458 | |
| 459 | unsigned long mem_start; |
| 460 | unsigned long mem_end; |
| 461 | unsigned int irq; |
| 462 | u8 pf_num; |
| 463 | }; |
| 464 | |
| 465 | struct qed_int_param { |
| 466 | u32 int_mode; |
| 467 | u8 num_vectors; |
| 468 | u8 min_msix_cnt; /* for minimal functionality */ |
| 469 | }; |
| 470 | |
| 471 | struct qed_int_params { |
| 472 | struct qed_int_param in; |
| 473 | struct qed_int_param out; |
| 474 | struct msix_entry *msix_table; |
| 475 | bool fp_initialized; |
| 476 | u8 fp_msix_base; |
| 477 | u8 fp_msix_cnt; |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 478 | u8 rdma_msix_base; |
| 479 | u8 rdma_msix_cnt; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 480 | }; |
| 481 | |
Tomer Tayar | c965db4 | 2016-09-07 16:36:24 +0300 | [diff] [blame] | 482 | struct qed_dbg_feature { |
| 483 | struct dentry *dentry; |
| 484 | u8 *dump_buf; |
| 485 | u32 buf_size; |
| 486 | u32 dumped_dwords; |
| 487 | }; |
| 488 | |
| 489 | struct qed_dbg_params { |
| 490 | struct qed_dbg_feature features[DBG_FEATURE_NUM]; |
| 491 | u8 engine_for_debug; |
| 492 | bool print_data; |
| 493 | }; |
| 494 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 495 | struct qed_dev { |
| 496 | u32 dp_module; |
| 497 | u8 dp_level; |
| 498 | char name[NAME_SIZE]; |
| 499 | |
| 500 | u8 type; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 501 | #define QED_DEV_TYPE_BB (0 << 0) |
| 502 | #define QED_DEV_TYPE_AH BIT(0) |
| 503 | /* Translate type/revision combo into the proper conditions */ |
| 504 | #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) |
| 505 | #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ |
| 506 | CHIP_REV_IS_A0(dev)) |
| 507 | #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ |
| 508 | CHIP_REV_IS_B0(dev)) |
Tomer Tayar | c965db4 | 2016-09-07 16:36:24 +0300 | [diff] [blame] | 509 | #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) |
| 510 | #define QED_IS_K2(dev) QED_IS_AH(dev) |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 511 | |
| 512 | #define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ |
| 513 | QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) |
| 514 | |
| 515 | u16 vendor_id; |
| 516 | u16 device_id; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 517 | |
| 518 | u16 chip_num; |
| 519 | #define CHIP_NUM_MASK 0xffff |
| 520 | #define CHIP_NUM_SHIFT 16 |
| 521 | |
| 522 | u16 chip_rev; |
| 523 | #define CHIP_REV_MASK 0xf |
| 524 | #define CHIP_REV_SHIFT 12 |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 525 | #define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) |
| 526 | #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 527 | |
| 528 | u16 chip_metal; |
| 529 | #define CHIP_METAL_MASK 0xff |
| 530 | #define CHIP_METAL_SHIFT 4 |
| 531 | |
| 532 | u16 chip_bond_id; |
| 533 | #define CHIP_BOND_ID_MASK 0xf |
| 534 | #define CHIP_BOND_ID_SHIFT 0 |
| 535 | |
| 536 | u8 num_engines; |
| 537 | u8 num_ports_in_engines; |
| 538 | u8 num_funcs_in_port; |
| 539 | |
| 540 | u8 path_id; |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 541 | enum qed_mf_mode mf_mode; |
| 542 | #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) |
| 543 | #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) |
| 544 | #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 545 | |
| 546 | int pcie_width; |
| 547 | int pcie_speed; |
| 548 | u8 ver_str[VER_SIZE]; |
| 549 | |
| 550 | /* Add MF related configuration */ |
| 551 | u8 mcp_rev; |
| 552 | u8 boot_mode; |
| 553 | |
Mintz, Yuval | 14d3964 | 2016-10-31 07:14:23 +0200 | [diff] [blame] | 554 | /* WoL related configurations */ |
| 555 | u8 wol_config; |
| 556 | u8 wol_mac[ETH_ALEN]; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 557 | |
| 558 | u32 int_mode; |
| 559 | enum qed_coalescing_mode int_coalescing_mode; |
Sudarsana Reddy Kalluru | 51d9988 | 2016-06-28 02:10:58 -0400 | [diff] [blame] | 560 | u16 rx_coalesce_usecs; |
| 561 | u16 tx_coalesce_usecs; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 562 | |
| 563 | /* Start Bar offset of first hwfn */ |
| 564 | void __iomem *regview; |
| 565 | void __iomem *doorbells; |
| 566 | u64 db_phys_addr; |
| 567 | unsigned long db_size; |
| 568 | |
| 569 | /* PCI */ |
| 570 | u8 cache_shift; |
| 571 | |
| 572 | /* Init */ |
| 573 | const struct iro *iro_arr; |
| 574 | #define IRO (p_hwfn->cdev->iro_arr) |
| 575 | |
| 576 | /* HW functions */ |
| 577 | u8 num_hwfns; |
| 578 | struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; |
| 579 | |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 580 | /* SRIOV */ |
| 581 | struct qed_hw_sriov_info *p_iov_info; |
| 582 | #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) |
| 583 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 584 | unsigned long tunn_mode; |
Yuval Mintz | 1408cc1f | 2016-05-11 16:36:14 +0300 | [diff] [blame] | 585 | |
| 586 | bool b_is_vf; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 587 | u32 drv_type; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 588 | struct qed_eth_stats *reset_stats; |
| 589 | struct qed_fw_data *fw_data; |
| 590 | |
| 591 | u32 mcp_nvm_resp; |
| 592 | |
| 593 | /* Linux specific here */ |
| 594 | struct qede_dev *edev; |
| 595 | struct pci_dev *pdev; |
| 596 | int msg_enable; |
| 597 | |
| 598 | struct pci_params pci_params; |
| 599 | |
| 600 | struct qed_int_params int_params; |
| 601 | |
| 602 | u8 protocol; |
| 603 | #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) |
| 604 | |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 605 | /* Callbacks to protocol driver */ |
| 606 | union { |
| 607 | struct qed_common_cb_ops *common; |
| 608 | struct qed_eth_cb_ops *eth; |
| 609 | } protocol_ops; |
| 610 | void *ops_cookie; |
| 611 | |
Tomer Tayar | c965db4 | 2016-09-07 16:36:24 +0300 | [diff] [blame] | 612 | struct qed_dbg_params dbg_params; |
| 613 | |
Yuval Mintz | 0a7fb11 | 2016-10-01 21:59:55 +0300 | [diff] [blame] | 614 | #ifdef CONFIG_QED_LL2 |
| 615 | struct qed_cb_ll2_info *ll2; |
| 616 | u8 ll2_mac_address[ETH_ALEN]; |
| 617 | #endif |
| 618 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 619 | const struct firmware *firmware; |
Ram Amrani | 51ff172 | 2016-10-01 21:59:57 +0300 | [diff] [blame] | 620 | |
| 621 | u32 rdma_max_sge; |
| 622 | u32 rdma_max_inline; |
| 623 | u32 rdma_max_srq_sge; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 624 | }; |
| 625 | |
Yuval Mintz | 32a47e7 | 2016-05-11 16:36:12 +0300 | [diff] [blame] | 626 | #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB |
Yuval Mintz | dacd88d | 2016-05-11 16:36:16 +0300 | [diff] [blame] | 627 | #define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 628 | #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB |
| 629 | #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB |
| 630 | |
| 631 | /** |
| 632 | * @brief qed_concrete_to_sw_fid - get the sw function id from |
| 633 | * the concrete value. |
| 634 | * |
| 635 | * @param concrete_fid |
| 636 | * |
| 637 | * @return inline u8 |
| 638 | */ |
| 639 | static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, |
| 640 | u32 concrete_fid) |
| 641 | { |
Yuval Mintz | 4870e70 | 2016-08-22 12:03:29 +0300 | [diff] [blame] | 642 | u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 643 | u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); |
Yuval Mintz | 4870e70 | 2016-08-22 12:03:29 +0300 | [diff] [blame] | 644 | u8 vf_valid = GET_FIELD(concrete_fid, |
| 645 | PXP_CONCRETE_FID_VFVALID); |
| 646 | u8 sw_fid; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 647 | |
Yuval Mintz | 4870e70 | 2016-08-22 12:03:29 +0300 | [diff] [blame] | 648 | if (vf_valid) |
| 649 | sw_fid = vfid + MAX_NUM_PFS; |
| 650 | else |
| 651 | sw_fid = pfid; |
| 652 | |
| 653 | return sw_fid; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | #define PURE_LB_TC 8 |
Yuval Mintz | dbb799c | 2016-06-03 14:35:35 +0300 | [diff] [blame] | 657 | #define OOO_LB_TC 9 |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 658 | |
Yuval Mintz | 733def6 | 2016-05-11 16:36:22 +0300 | [diff] [blame] | 659 | int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 660 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); |
| 661 | |
Yuval Mintz | 733def6 | 2016-05-11 16:36:22 +0300 | [diff] [blame] | 662 | void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 663 | #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) |
| 664 | |
| 665 | /* Other Linux specific common definitions */ |
| 666 | #define DP_NAME(cdev) ((cdev)->name) |
| 667 | |
| 668 | #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ |
| 669 | (cdev->regview) + \ |
| 670 | (offset)) |
| 671 | |
| 672 | #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) |
| 673 | #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) |
| 674 | #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset)) |
| 675 | |
| 676 | #define DOORBELL(cdev, db_addr, val) \ |
| 677 | writel((u32)val, (void __iomem *)((u8 __iomem *)\ |
| 678 | (cdev->doorbells) + (db_addr))) |
| 679 | |
| 680 | /* Prototypes */ |
| 681 | int qed_fill_dev_info(struct qed_dev *cdev, |
| 682 | struct qed_dev_info *dev_info); |
Yuval Mintz | cc875c2 | 2015-10-26 11:02:31 +0200 | [diff] [blame] | 683 | void qed_link_update(struct qed_hwfn *hwfn); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 684 | u32 qed_unzip_data(struct qed_hwfn *p_hwfn, |
| 685 | u32 input_len, u8 *input_buf, |
| 686 | u32 max_size, u8 *unzip_buf); |
Sudarsana Reddy Kalluru | 6c75424 | 2016-08-16 10:51:03 -0400 | [diff] [blame] | 687 | void qed_get_protocol_stats(struct qed_dev *cdev, |
| 688 | enum qed_mcp_protocol_type type, |
| 689 | union qed_mcp_protocol_stats *stats); |
Sudarsana Kalluru | 8f16bc9 | 2015-12-07 06:25:59 -0500 | [diff] [blame] | 690 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn); |
| 691 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 692 | #endif /* _QED_H */ |