Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 1 | /* QLogic qede NIC Driver |
Mintz, Yuval | e8f1cb5 | 2017-01-01 13:57:00 +0200 | [diff] [blame] | 2 | * Copyright (c) 2015-2017 QLogic Corporation |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and /or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 32 | #ifndef _QEDE_H_ |
| 33 | #define _QEDE_H_ |
| 34 | #include <linux/compiler.h> |
| 35 | #include <linux/version.h> |
| 36 | #include <linux/workqueue.h> |
| 37 | #include <linux/netdevice.h> |
| 38 | #include <linux/interrupt.h> |
| 39 | #include <linux/bitmap.h> |
| 40 | #include <linux/kernel.h> |
| 41 | #include <linux/mutex.h> |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 42 | #include <linux/bpf.h> |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 43 | #include <linux/io.h> |
| 44 | #include <linux/qed/common_hsi.h> |
| 45 | #include <linux/qed/eth_common.h> |
| 46 | #include <linux/qed/qed_if.h> |
| 47 | #include <linux/qed/qed_chain.h> |
| 48 | #include <linux/qed/qed_eth_if.h> |
| 49 | |
| 50 | #define QEDE_MAJOR_VERSION 8 |
Manish Chopra | 831a8e6 | 2016-06-30 02:35:22 -0400 | [diff] [blame] | 51 | #define QEDE_MINOR_VERSION 10 |
Yuval Mintz | 05fafbf | 2016-08-19 09:33:31 +0300 | [diff] [blame] | 52 | #define QEDE_REVISION_VERSION 9 |
Yuval Mintz | 7c2d7d7 | 2016-04-10 12:43:02 +0300 | [diff] [blame] | 53 | #define QEDE_ENGINEERING_VERSION 20 |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 54 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ |
| 55 | __stringify(QEDE_MINOR_VERSION) "." \ |
| 56 | __stringify(QEDE_REVISION_VERSION) "." \ |
| 57 | __stringify(QEDE_ENGINEERING_VERSION) |
| 58 | |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 59 | #define DRV_MODULE_SYM qede |
| 60 | |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 61 | struct qede_stats { |
| 62 | u64 no_buff_discards; |
Sudarsana Reddy Kalluru | 1a5a366 | 2016-08-16 10:51:01 -0400 | [diff] [blame] | 63 | u64 packet_too_big_discard; |
| 64 | u64 ttl0_discard; |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 65 | u64 rx_ucast_bytes; |
| 66 | u64 rx_mcast_bytes; |
| 67 | u64 rx_bcast_bytes; |
| 68 | u64 rx_ucast_pkts; |
| 69 | u64 rx_mcast_pkts; |
| 70 | u64 rx_bcast_pkts; |
| 71 | u64 mftag_filter_discards; |
| 72 | u64 mac_filter_discards; |
| 73 | u64 tx_ucast_bytes; |
| 74 | u64 tx_mcast_bytes; |
| 75 | u64 tx_bcast_bytes; |
| 76 | u64 tx_ucast_pkts; |
| 77 | u64 tx_mcast_pkts; |
| 78 | u64 tx_bcast_pkts; |
| 79 | u64 tx_err_drop_pkts; |
| 80 | u64 coalesced_pkts; |
| 81 | u64 coalesced_events; |
| 82 | u64 coalesced_aborts_num; |
| 83 | u64 non_coalesced_pkts; |
| 84 | u64 coalesced_bytes; |
| 85 | |
| 86 | /* port */ |
| 87 | u64 rx_64_byte_packets; |
Yuval Mintz | d4967cf | 2016-04-22 08:41:01 +0300 | [diff] [blame] | 88 | u64 rx_65_to_127_byte_packets; |
| 89 | u64 rx_128_to_255_byte_packets; |
| 90 | u64 rx_256_to_511_byte_packets; |
| 91 | u64 rx_512_to_1023_byte_packets; |
| 92 | u64 rx_1024_to_1518_byte_packets; |
| 93 | u64 rx_1519_to_1522_byte_packets; |
| 94 | u64 rx_1519_to_2047_byte_packets; |
| 95 | u64 rx_2048_to_4095_byte_packets; |
| 96 | u64 rx_4096_to_9216_byte_packets; |
| 97 | u64 rx_9217_to_16383_byte_packets; |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 98 | u64 rx_crc_errors; |
| 99 | u64 rx_mac_crtl_frames; |
| 100 | u64 rx_pause_frames; |
| 101 | u64 rx_pfc_frames; |
| 102 | u64 rx_align_errors; |
| 103 | u64 rx_carrier_errors; |
| 104 | u64 rx_oversize_packets; |
| 105 | u64 rx_jabbers; |
| 106 | u64 rx_undersize_packets; |
| 107 | u64 rx_fragments; |
| 108 | u64 tx_64_byte_packets; |
| 109 | u64 tx_65_to_127_byte_packets; |
| 110 | u64 tx_128_to_255_byte_packets; |
| 111 | u64 tx_256_to_511_byte_packets; |
| 112 | u64 tx_512_to_1023_byte_packets; |
| 113 | u64 tx_1024_to_1518_byte_packets; |
| 114 | u64 tx_1519_to_2047_byte_packets; |
| 115 | u64 tx_2048_to_4095_byte_packets; |
| 116 | u64 tx_4096_to_9216_byte_packets; |
| 117 | u64 tx_9217_to_16383_byte_packets; |
| 118 | u64 tx_pause_frames; |
| 119 | u64 tx_pfc_frames; |
| 120 | u64 tx_lpi_entry_count; |
| 121 | u64 tx_total_collisions; |
| 122 | u64 brb_truncates; |
| 123 | u64 brb_discards; |
| 124 | u64 tx_mac_ctrl_frames; |
| 125 | }; |
| 126 | |
Sudarsana Reddy Kalluru | 7c1bfca | 2016-02-18 17:00:40 +0200 | [diff] [blame] | 127 | struct qede_vlan { |
| 128 | struct list_head list; |
| 129 | u16 vid; |
| 130 | bool configured; |
| 131 | }; |
| 132 | |
Ram Amrani | cee9fbd | 2016-10-01 21:59:56 +0300 | [diff] [blame] | 133 | struct qede_rdma_dev { |
| 134 | struct qedr_dev *qedr_dev; |
| 135 | struct list_head entry; |
| 136 | struct list_head roce_event_list; |
| 137 | struct workqueue_struct *roce_wq; |
| 138 | }; |
| 139 | |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 140 | struct qede_dev { |
| 141 | struct qed_dev *cdev; |
| 142 | struct net_device *ndev; |
| 143 | struct pci_dev *pdev; |
| 144 | |
| 145 | u32 dp_module; |
| 146 | u8 dp_level; |
| 147 | |
Yuval Mintz | fefb020 | 2016-05-11 16:36:19 +0300 | [diff] [blame] | 148 | u32 flags; |
| 149 | #define QEDE_FLAG_IS_VF BIT(0) |
| 150 | #define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) |
| 151 | |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 152 | const struct qed_eth_ops *ops; |
| 153 | |
Mintz, Yuval | 80439a1 | 2016-11-29 16:47:02 +0200 | [diff] [blame] | 154 | struct qed_dev_eth_info dev_info; |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 155 | #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) |
Mintz, Yuval | 80439a1 | 2016-11-29 16:47:02 +0200 | [diff] [blame] | 156 | #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 157 | |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 158 | struct qede_fastpath *fp_array; |
Sudarsana Reddy Kalluru | 9a4d7e8 | 2016-08-23 10:56:55 -0400 | [diff] [blame] | 159 | u8 req_num_tx; |
| 160 | u8 fp_num_tx; |
| 161 | u8 req_num_rx; |
| 162 | u8 fp_num_rx; |
| 163 | u16 req_queues; |
| 164 | u16 num_queues; |
Sudarsana Reddy Kalluru | 9a4d7e8 | 2016-08-23 10:56:55 -0400 | [diff] [blame] | 165 | #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) |
| 166 | #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) |
Mintz, Yuval | 80439a1 | 2016-11-29 16:47:02 +0200 | [diff] [blame] | 167 | #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 168 | |
| 169 | struct qed_int_info int_info; |
| 170 | unsigned char primary_mac[ETH_ALEN]; |
| 171 | |
| 172 | /* Smaller private varaiant of the RTNL lock */ |
| 173 | struct mutex qede_lock; |
| 174 | u32 state; /* Protected by qede_lock */ |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 175 | u16 rx_buf_size; |
Manish Chopra | 3d78999 | 2016-06-30 02:35:21 -0400 | [diff] [blame] | 176 | u32 rx_copybreak; |
| 177 | |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 178 | /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ |
| 179 | #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) |
| 180 | /* Max supported alignment is 256 (8 shift) |
| 181 | * minimal alignment shift 6 is optimal for 57xxx HW performance |
| 182 | */ |
| 183 | #define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) |
| 184 | /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes |
| 185 | * at the end of skb->data, to avoid wasting a full cache line. |
| 186 | * This reduces memory use (skb->truesize). |
| 187 | */ |
| 188 | #define QEDE_FW_RX_ALIGN_END \ |
| 189 | max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \ |
| 190 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
| 191 | |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 192 | struct qede_stats stats; |
Sudarsana Reddy Kalluru | 961acde | 2016-04-10 12:43:01 +0300 | [diff] [blame] | 193 | #define QEDE_RSS_INDIR_INITED BIT(0) |
| 194 | #define QEDE_RSS_KEY_INITED BIT(1) |
| 195 | #define QEDE_RSS_CAPS_INITED BIT(2) |
| 196 | u32 rss_params_inited; /* bit-field to track initialized rss params */ |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 197 | struct qed_update_vport_rss_params rss_params; |
| 198 | u16 q_num_rx_buffers; /* Must be a power of two */ |
| 199 | u16 q_num_tx_buffers; /* Must be a power of two */ |
Sudarsana Kalluru | 0d8e0aa | 2015-10-26 11:02:30 +0200 | [diff] [blame] | 200 | |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 201 | bool gro_disable; |
Sudarsana Reddy Kalluru | 7c1bfca | 2016-02-18 17:00:40 +0200 | [diff] [blame] | 202 | struct list_head vlan_list; |
| 203 | u16 configured_vlans; |
| 204 | u16 non_configured_vlans; |
| 205 | bool accept_any_vlan; |
Sudarsana Kalluru | 0d8e0aa | 2015-10-26 11:02:30 +0200 | [diff] [blame] | 206 | struct delayed_work sp_task; |
| 207 | unsigned long sp_flags; |
Manish Chopra | b18e170 | 2016-04-14 01:38:30 -0400 | [diff] [blame] | 208 | u16 vxlan_dst_port; |
Manish Chopra | 9a109dd | 2016-04-14 01:38:31 -0400 | [diff] [blame] | 209 | u16 geneve_dst_port; |
Ram Amrani | cee9fbd | 2016-10-01 21:59:56 +0300 | [diff] [blame] | 210 | |
Mintz, Yuval | 14d3964 | 2016-10-31 07:14:23 +0200 | [diff] [blame] | 211 | bool wol_enabled; |
| 212 | |
Ram Amrani | cee9fbd | 2016-10-01 21:59:56 +0300 | [diff] [blame] | 213 | struct qede_rdma_dev rdma_info; |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 214 | |
| 215 | struct bpf_prog *xdp_prog; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 216 | }; |
| 217 | |
| 218 | enum QEDE_STATE { |
| 219 | QEDE_STATE_CLOSED, |
| 220 | QEDE_STATE_OPEN, |
| 221 | }; |
| 222 | |
| 223 | #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) |
| 224 | |
| 225 | #define MAX_NUM_TC 8 |
| 226 | #define MAX_NUM_PRI 8 |
| 227 | |
| 228 | /* The driver supports the new build_skb() API: |
| 229 | * RX ring buffer contains pointer to kmalloc() data only, |
| 230 | * skb are built only after the frame was DMA-ed. |
| 231 | */ |
| 232 | struct sw_rx_data { |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 233 | struct page *data; |
| 234 | dma_addr_t mapping; |
| 235 | unsigned int page_offset; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 236 | }; |
| 237 | |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 238 | enum qede_agg_state { |
| 239 | QEDE_AGG_STATE_NONE = 0, |
| 240 | QEDE_AGG_STATE_START = 1, |
| 241 | QEDE_AGG_STATE_ERROR = 2 |
| 242 | }; |
| 243 | |
| 244 | struct qede_agg_info { |
Mintz, Yuval | 01e2301 | 2016-11-29 16:47:00 +0200 | [diff] [blame] | 245 | /* rx_buf is a data buffer that can be placed / consumed from rx bd |
| 246 | * chain. It has two purposes: We will preallocate the data buffer |
| 247 | * for each aggregation when we open the interface and will place this |
| 248 | * buffer on the rx-bd-ring when we receive TPA_START. We don't want |
| 249 | * to be in a state where allocation fails, as we can't reuse the |
| 250 | * consumer buffer in the rx-chain since FW may still be writing to it |
| 251 | * (since header needs to be modified for TPA). |
| 252 | * The second purpose is to keep a pointer to the bd buffer during |
| 253 | * aggregation. |
| 254 | */ |
| 255 | struct sw_rx_data buffer; |
| 256 | dma_addr_t buffer_mapping; |
| 257 | |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 258 | struct sk_buff *skb; |
Mintz, Yuval | 01e2301 | 2016-11-29 16:47:00 +0200 | [diff] [blame] | 259 | |
| 260 | /* We need some structs from the start cookie until termination */ |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 261 | u16 vlan_tag; |
Mintz, Yuval | 01e2301 | 2016-11-29 16:47:00 +0200 | [diff] [blame] | 262 | u16 start_cqe_bd_len; |
| 263 | u8 start_cqe_placement_offset; |
| 264 | |
| 265 | u8 state; |
| 266 | u8 frag_id; |
| 267 | |
| 268 | u8 tunnel_type; |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 269 | }; |
| 270 | |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 271 | struct qede_rx_queue { |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 272 | __le16 *hw_cons_ptr; |
| 273 | void __iomem *hw_rxq_prod_addr; |
| 274 | |
| 275 | /* Required for the allocation of replacement buffers */ |
| 276 | struct device *dev; |
| 277 | |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 278 | struct bpf_prog *xdp_prog; |
| 279 | |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 280 | u16 sw_rx_cons; |
| 281 | u16 sw_rx_prod; |
| 282 | |
| 283 | u16 num_rx_buffers; /* Slowpath */ |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 284 | u8 data_direction; |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 285 | u8 rxq_id; |
| 286 | |
| 287 | u32 rx_buf_size; |
| 288 | u32 rx_buf_seg_size; |
| 289 | |
| 290 | u64 rcv_pkts; |
| 291 | |
| 292 | struct sw_rx_data *sw_rx_ring; |
| 293 | struct qed_chain rx_bd_ring; |
| 294 | struct qed_chain rx_comp_ring ____cacheline_aligned; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 295 | |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 296 | /* GRO */ |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 297 | struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; |
Manish Chopra | 55482ed | 2016-03-04 12:35:06 -0500 | [diff] [blame] | 298 | |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 299 | u64 rx_hw_errors; |
| 300 | u64 rx_alloc_errors; |
| 301 | u64 rx_ip_frags; |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 302 | |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 303 | u64 xdp_no_pass; |
| 304 | |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 305 | void *handle; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 306 | }; |
| 307 | |
| 308 | union db_prod { |
| 309 | struct eth_db_data data; |
| 310 | u32 raw; |
| 311 | }; |
| 312 | |
| 313 | struct sw_tx_bd { |
| 314 | struct sk_buff *skb; |
| 315 | u8 flags; |
| 316 | /* Set on the first BD descriptor when there is a split BD */ |
| 317 | #define QEDE_TSO_SPLIT_BD BIT(0) |
| 318 | }; |
| 319 | |
| 320 | struct qede_tx_queue { |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 321 | u8 is_xdp; |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 322 | bool is_legacy; |
| 323 | u16 sw_tx_cons; |
| 324 | u16 sw_tx_prod; |
| 325 | u16 num_tx_buffers; /* Slowpath only */ |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 326 | |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 327 | u64 xmit_pkts; |
| 328 | u64 stopped_cnt; |
Yuval Mintz | d8c2c7e | 2016-08-22 13:25:11 +0300 | [diff] [blame] | 329 | |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 330 | __le16 *hw_cons_ptr; |
| 331 | |
| 332 | /* Needed for the mapping of packets */ |
| 333 | struct device *dev; |
| 334 | |
| 335 | void __iomem *doorbell_addr; |
| 336 | union db_prod tx_db; |
| 337 | int index; /* Slowpath only */ |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 338 | #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ |
| 339 | QEDE_MAX_TSS_CNT(edev)) |
| 340 | #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 341 | |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 342 | /* Regular Tx requires skb + metadata for release purpose, |
| 343 | * while XDP requires only the pages themselves. |
| 344 | */ |
| 345 | union { |
| 346 | struct sw_tx_bd *skbs; |
| 347 | struct page **pages; |
| 348 | } sw_tx_ring; |
| 349 | |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 350 | struct qed_chain tx_pbl; |
| 351 | |
| 352 | /* Slowpath; Should be kept in end [unless missing padding] */ |
Mintz, Yuval | 3da7a37 | 2016-11-29 16:47:06 +0200 | [diff] [blame] | 353 | void *handle; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 354 | }; |
| 355 | |
| 356 | #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ |
| 357 | le32_to_cpu((bd)->addr.lo)) |
| 358 | #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \ |
| 359 | do { \ |
| 360 | (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \ |
| 361 | (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \ |
| 362 | (bd)->nbytes = cpu_to_le16(len); \ |
| 363 | } while (0) |
| 364 | #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) |
| 365 | |
| 366 | struct qede_fastpath { |
| 367 | struct qede_dev *edev; |
Sudarsana Reddy Kalluru | 9a4d7e8 | 2016-08-23 10:56:55 -0400 | [diff] [blame] | 368 | #define QEDE_FASTPATH_TX BIT(0) |
| 369 | #define QEDE_FASTPATH_RX BIT(1) |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 370 | #define QEDE_FASTPATH_XDP BIT(2) |
Sudarsana Reddy Kalluru | 9a4d7e8 | 2016-08-23 10:56:55 -0400 | [diff] [blame] | 371 | #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) |
| 372 | u8 type; |
| 373 | u8 id; |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 374 | u8 xdp_xmit; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 375 | struct napi_struct napi; |
| 376 | struct qed_sb_info *sb_info; |
| 377 | struct qede_rx_queue *rxq; |
Mintz, Yuval | 80439a1 | 2016-11-29 16:47:02 +0200 | [diff] [blame] | 378 | struct qede_tx_queue *txq; |
Mintz, Yuval | cb6aeb0 | 2016-11-29 16:47:10 +0200 | [diff] [blame] | 379 | struct qede_tx_queue *xdp_tx; |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 380 | |
| 381 | #define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) |
| 382 | char name[VEC_NAME_SIZE]; |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 383 | }; |
| 384 | |
| 385 | /* Debug print definitions */ |
| 386 | #define DP_NAME(edev) ((edev)->ndev->name) |
| 387 | |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 388 | #define XMIT_PLAIN 0 |
| 389 | #define XMIT_L4_CSUM BIT(0) |
| 390 | #define XMIT_LSO BIT(1) |
| 391 | #define XMIT_ENC BIT(2) |
Manish Chopra | a150241 | 2016-10-14 05:19:18 -0400 | [diff] [blame] | 392 | #define XMIT_ENC_GSO_L4_CSUM BIT(3) |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 393 | |
| 394 | #define QEDE_CSUM_ERROR BIT(0) |
| 395 | #define QEDE_CSUM_UNNECESSARY BIT(1) |
Manish Chopra | 14db81d | 2016-04-14 01:38:33 -0400 | [diff] [blame] | 396 | #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) |
Sudarsana Kalluru | 0d8e0aa | 2015-10-26 11:02:30 +0200 | [diff] [blame] | 397 | |
Manish Chopra | b18e170 | 2016-04-14 01:38:30 -0400 | [diff] [blame] | 398 | #define QEDE_SP_RX_MODE 1 |
| 399 | #define QEDE_SP_VXLAN_PORT_CONFIG 2 |
Manish Chopra | 9a109dd | 2016-04-14 01:38:31 -0400 | [diff] [blame] | 400 | #define QEDE_SP_GENEVE_PORT_CONFIG 3 |
Sudarsana Kalluru | 0d8e0aa | 2015-10-26 11:02:30 +0200 | [diff] [blame] | 401 | |
Mintz, Yuval | 567b3c1 | 2016-11-29 16:47:05 +0200 | [diff] [blame] | 402 | struct qede_reload_args { |
| 403 | void (*func)(struct qede_dev *edev, struct qede_reload_args *args); |
| 404 | union { |
| 405 | netdev_features_t features; |
Mintz, Yuval | 496e051 | 2016-11-29 16:47:09 +0200 | [diff] [blame] | 406 | struct bpf_prog *new_prog; |
Mintz, Yuval | 567b3c1 | 2016-11-29 16:47:05 +0200 | [diff] [blame] | 407 | u16 mtu; |
| 408 | } u; |
Sudarsana Kalluru | 0d8e0aa | 2015-10-26 11:02:30 +0200 | [diff] [blame] | 409 | }; |
| 410 | |
Mintz, Yuval | cdda926 | 2017-01-01 13:57:01 +0200 | [diff] [blame] | 411 | /* Datapath functions definition */ |
| 412 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
| 413 | netdev_features_t qede_features_check(struct sk_buff *skb, |
| 414 | struct net_device *dev, |
| 415 | netdev_features_t features); |
| 416 | void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); |
| 417 | int qede_alloc_rx_buffer(struct qede_rx_queue *rxq); |
| 418 | int qede_free_tx_pkt(struct qede_dev *edev, |
| 419 | struct qede_tx_queue *txq, int *len); |
| 420 | int qede_poll(struct napi_struct *napi, int budget); |
| 421 | irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); |
Mintz, Yuval | aed284c | 2017-01-01 13:57:02 +0200 | [diff] [blame] | 422 | |
| 423 | /* Filtering function definitions */ |
| 424 | void qede_force_mac(void *dev, u8 *mac, bool forced); |
| 425 | int qede_set_mac_addr(struct net_device *ndev, void *p); |
| 426 | |
| 427 | int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); |
| 428 | int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); |
| 429 | void qede_vlan_mark_nonconfigured(struct qede_dev *edev); |
| 430 | int qede_configure_vlan_filters(struct qede_dev *edev); |
| 431 | |
| 432 | int qede_set_features(struct net_device *dev, netdev_features_t features); |
| 433 | void qede_set_rx_mode(struct net_device *ndev); |
| 434 | void qede_config_rx_mode(struct net_device *ndev); |
| 435 | void qede_fill_rss_params(struct qede_dev *edev, |
| 436 | struct qed_update_vport_rss_params *rss, u8 *update); |
| 437 | |
| 438 | void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); |
| 439 | void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); |
| 440 | |
| 441 | int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); |
| 442 | |
Sudarsana Reddy Kalluru | 489e45a | 2016-06-08 06:22:12 -0400 | [diff] [blame] | 443 | #ifdef CONFIG_DCB |
| 444 | void qede_set_dcbnl_ops(struct net_device *ndev); |
| 445 | #endif |
Mintz, Yuval | aed284c | 2017-01-01 13:57:02 +0200 | [diff] [blame] | 446 | |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 447 | void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); |
| 448 | void qede_set_ethtool_ops(struct net_device *netdev); |
| 449 | void qede_reload(struct qede_dev *edev, |
Mintz, Yuval | 567b3c1 | 2016-11-29 16:47:05 +0200 | [diff] [blame] | 450 | struct qede_reload_args *args, bool is_locked); |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 451 | int qede_change_mtu(struct net_device *dev, int new_mtu); |
| 452 | void qede_fill_by_demand_stats(struct qede_dev *edev); |
Mintz, Yuval | 567b3c1 | 2016-11-29 16:47:05 +0200 | [diff] [blame] | 453 | void __qede_lock(struct qede_dev *edev); |
| 454 | void __qede_unlock(struct qede_dev *edev); |
Sudarsana Reddy Kalluru | 16f46bf | 2016-04-28 20:20:54 -0400 | [diff] [blame] | 455 | bool qede_has_rx_work(struct qede_rx_queue *rxq); |
| 456 | int qede_txq_has_work(struct qede_tx_queue *txq); |
Mintz, Yuval | 9eb2235 | 2016-11-29 16:47:08 +0200 | [diff] [blame] | 457 | void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); |
Sudarsana Reddy Kalluru | 837d4eb | 2016-10-21 04:43:41 -0400 | [diff] [blame] | 458 | void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); |
Sudarsana Kalluru | 133fac0 | 2015-10-26 11:02:34 +0200 | [diff] [blame] | 459 | |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 460 | #define RX_RING_SIZE_POW 13 |
Sudarsana Kalluru | 01ef7e0 | 2015-11-30 12:25:02 +0200 | [diff] [blame] | 461 | #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 462 | #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) |
| 463 | #define NUM_RX_BDS_MIN 128 |
Sudarsana Reddy Kalluru | 0e19182 | 2016-10-21 04:43:42 -0400 | [diff] [blame] | 464 | #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 465 | |
| 466 | #define TX_RING_SIZE_POW 13 |
Sudarsana Kalluru | 01ef7e0 | 2015-11-30 12:25:02 +0200 | [diff] [blame] | 467 | #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 468 | #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) |
| 469 | #define NUM_TX_BDS_MIN 128 |
| 470 | #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX |
| 471 | |
Jarod Wilson | caff2a8 | 2016-10-17 15:54:08 -0400 | [diff] [blame] | 472 | #define QEDE_MIN_PKT_LEN 64 |
| 473 | #define QEDE_RX_HDR_SIZE 256 |
| 474 | #define QEDE_MAX_JUMBO_PACKET_SIZE 9600 |
Sudarsana Reddy Kalluru | 9a4d7e8 | 2016-08-23 10:56:55 -0400 | [diff] [blame] | 475 | #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) |
Yuval Mintz | 2950219 | 2015-10-26 11:02:29 +0200 | [diff] [blame] | 476 | |
Yuval Mintz | e712d52 | 2015-10-26 11:02:27 +0200 | [diff] [blame] | 477 | #endif /* _QEDE_H_ */ |