blob: adb700512baa90ee252b0ca6a28c707ae527375f [file] [log] [blame]
Yuval Mintze712d522015-10-26 11:02:27 +02001/* QLogic qede NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Yuval Mintze712d522015-10-26 11:02:27 +020032#ifndef _QEDE_H_
33#define _QEDE_H_
34#include <linux/compiler.h>
35#include <linux/version.h>
36#include <linux/workqueue.h>
37#include <linux/netdevice.h>
38#include <linux/interrupt.h>
39#include <linux/bitmap.h>
40#include <linux/kernel.h>
41#include <linux/mutex.h>
Mintz, Yuval496e0512016-11-29 16:47:09 +020042#include <linux/bpf.h>
Michal Kalderonb262a062017-06-20 16:00:03 +030043#include <linux/qed/qede_rdma.h>
Yuval Mintze712d522015-10-26 11:02:27 +020044#include <linux/io.h>
Chopra, Manishe4917d42017-04-13 04:54:45 -070045#ifdef CONFIG_RFS_ACCEL
46#include <linux/cpu_rmap.h>
47#endif
Yuval Mintze712d522015-10-26 11:02:27 +020048#include <linux/qed/common_hsi.h>
49#include <linux/qed/eth_common.h>
50#include <linux/qed/qed_if.h>
51#include <linux/qed/qed_chain.h>
52#include <linux/qed/qed_eth_if.h>
53
54#define QEDE_MAJOR_VERSION 8
Manish Chopra831a8e62016-06-30 02:35:22 -040055#define QEDE_MINOR_VERSION 10
Mintz, Yuvalce742922017-01-01 13:57:11 +020056#define QEDE_REVISION_VERSION 10
Mintz, Yuvalbe086e72017-03-11 18:39:18 +020057#define QEDE_ENGINEERING_VERSION 21
Yuval Mintze712d522015-10-26 11:02:27 +020058#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
59 __stringify(QEDE_MINOR_VERSION) "." \
60 __stringify(QEDE_REVISION_VERSION) "." \
61 __stringify(QEDE_ENGINEERING_VERSION)
62
Yuval Mintze712d522015-10-26 11:02:27 +020063#define DRV_MODULE_SYM qede
64
Mintz, Yuval9c79dda2017-03-14 16:23:54 +020065struct qede_stats_common {
Sudarsana Kalluru133fac02015-10-26 11:02:34 +020066 u64 no_buff_discards;
Sudarsana Reddy Kalluru1a5a3662016-08-16 10:51:01 -040067 u64 packet_too_big_discard;
68 u64 ttl0_discard;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +020069 u64 rx_ucast_bytes;
70 u64 rx_mcast_bytes;
71 u64 rx_bcast_bytes;
72 u64 rx_ucast_pkts;
73 u64 rx_mcast_pkts;
74 u64 rx_bcast_pkts;
75 u64 mftag_filter_discards;
76 u64 mac_filter_discards;
77 u64 tx_ucast_bytes;
78 u64 tx_mcast_bytes;
79 u64 tx_bcast_bytes;
80 u64 tx_ucast_pkts;
81 u64 tx_mcast_pkts;
82 u64 tx_bcast_pkts;
83 u64 tx_err_drop_pkts;
84 u64 coalesced_pkts;
85 u64 coalesced_events;
86 u64 coalesced_aborts_num;
87 u64 non_coalesced_pkts;
88 u64 coalesced_bytes;
89
90 /* port */
91 u64 rx_64_byte_packets;
Yuval Mintzd4967cf2016-04-22 08:41:01 +030092 u64 rx_65_to_127_byte_packets;
93 u64 rx_128_to_255_byte_packets;
94 u64 rx_256_to_511_byte_packets;
95 u64 rx_512_to_1023_byte_packets;
96 u64 rx_1024_to_1518_byte_packets;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +020097 u64 rx_crc_errors;
98 u64 rx_mac_crtl_frames;
99 u64 rx_pause_frames;
100 u64 rx_pfc_frames;
101 u64 rx_align_errors;
102 u64 rx_carrier_errors;
103 u64 rx_oversize_packets;
104 u64 rx_jabbers;
105 u64 rx_undersize_packets;
106 u64 rx_fragments;
107 u64 tx_64_byte_packets;
108 u64 tx_65_to_127_byte_packets;
109 u64 tx_128_to_255_byte_packets;
110 u64 tx_256_to_511_byte_packets;
111 u64 tx_512_to_1023_byte_packets;
112 u64 tx_1024_to_1518_byte_packets;
Mintz, Yuval9c79dda2017-03-14 16:23:54 +0200113 u64 tx_pause_frames;
114 u64 tx_pfc_frames;
115 u64 brb_truncates;
116 u64 brb_discards;
117 u64 tx_mac_ctrl_frames;
118};
119
120struct qede_stats_bb {
121 u64 rx_1519_to_1522_byte_packets;
122 u64 rx_1519_to_2047_byte_packets;
123 u64 rx_2048_to_4095_byte_packets;
124 u64 rx_4096_to_9216_byte_packets;
125 u64 rx_9217_to_16383_byte_packets;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200126 u64 tx_1519_to_2047_byte_packets;
127 u64 tx_2048_to_4095_byte_packets;
128 u64 tx_4096_to_9216_byte_packets;
129 u64 tx_9217_to_16383_byte_packets;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200130 u64 tx_lpi_entry_count;
131 u64 tx_total_collisions;
Mintz, Yuval9c79dda2017-03-14 16:23:54 +0200132};
133
134struct qede_stats_ah {
135 u64 rx_1519_to_max_byte_packets;
136 u64 tx_1519_to_max_byte_packets;
137};
138
139struct qede_stats {
140 struct qede_stats_common common;
141
142 union {
143 struct qede_stats_bb bb;
144 struct qede_stats_ah ah;
145 };
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200146};
147
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200148struct qede_vlan {
149 struct list_head list;
150 u16 vid;
151 bool configured;
152};
153
Ram Amranicee9fbd2016-10-01 21:59:56 +0300154struct qede_rdma_dev {
155 struct qedr_dev *qedr_dev;
156 struct list_head entry;
Michal Kalderonbbfcd1e2017-06-20 16:00:04 +0300157 struct list_head rdma_event_list;
158 struct workqueue_struct *rdma_wq;
Ram Amranicee9fbd2016-10-01 21:59:56 +0300159};
160
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200161struct qede_ptp;
162
Chopra, Manishec9b8db2017-07-26 06:07:09 -0700163#define QEDE_RFS_MAX_FLTR 256
164
Yuval Mintze712d522015-10-26 11:02:27 +0200165struct qede_dev {
166 struct qed_dev *cdev;
167 struct net_device *ndev;
168 struct pci_dev *pdev;
169
170 u32 dp_module;
171 u8 dp_level;
172
sudarsana.kalluru@cavium.com461eec12017-05-02 01:11:02 -0700173 unsigned long flags;
174#define QEDE_FLAG_IS_VF BIT(0)
Yuval Mintzfefb0202016-05-11 16:36:19 +0300175#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200176#define QEDE_TX_TIMESTAMPING_EN BIT(1)
sudarsana.kalluru@cavium.com461eec12017-05-02 01:11:02 -0700177#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
Yuval Mintzfefb0202016-05-11 16:36:19 +0300178
Yuval Mintze712d522015-10-26 11:02:27 +0200179 const struct qed_eth_ops *ops;
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +0200180 struct qede_ptp *ptp;
Yuval Mintze712d522015-10-26 11:02:27 +0200181
Mintz, Yuval80439a12016-11-29 16:47:02 +0200182 struct qed_dev_eth_info dev_info;
Yuval Mintze712d522015-10-26 11:02:27 +0200183#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
Mintz, Yuval80439a12016-11-29 16:47:02 +0200184#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
Mintz, Yuval9c79dda2017-03-14 16:23:54 +0200185#define QEDE_IS_BB(edev) \
186 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
187#define QEDE_IS_AH(edev) \
188 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
Yuval Mintze712d522015-10-26 11:02:27 +0200189
Yuval Mintz29502192015-10-26 11:02:29 +0200190 struct qede_fastpath *fp_array;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400191 u8 req_num_tx;
192 u8 fp_num_tx;
193 u8 req_num_rx;
194 u8 fp_num_rx;
195 u16 req_queues;
196 u16 num_queues;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400197#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
198#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200199#define QEDE_RX_QUEUE_IDX(edev, i) (i)
Mintz, Yuval80439a12016-11-29 16:47:02 +0200200#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
Yuval Mintze712d522015-10-26 11:02:27 +0200201
202 struct qed_int_info int_info;
Yuval Mintze712d522015-10-26 11:02:27 +0200203
204 /* Smaller private varaiant of the RTNL lock */
205 struct mutex qede_lock;
206 u32 state; /* Protected by qede_lock */
Yuval Mintz29502192015-10-26 11:02:29 +0200207 u16 rx_buf_size;
Manish Chopra3d789992016-06-30 02:35:21 -0400208 u32 rx_copybreak;
209
Yuval Mintz29502192015-10-26 11:02:29 +0200210 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
211#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
212 /* Max supported alignment is 256 (8 shift)
213 * minimal alignment shift 6 is optimal for 57xxx HW performance
214 */
215#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
216 /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
217 * at the end of skb->data, to avoid wasting a full cache line.
218 * This reduces memory use (skb->truesize).
219 */
220#define QEDE_FW_RX_ALIGN_END \
221 max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
222 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
223
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200224 struct qede_stats stats;
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +0300225#define QEDE_RSS_INDIR_INITED BIT(0)
226#define QEDE_RSS_KEY_INITED BIT(1)
227#define QEDE_RSS_CAPS_INITED BIT(2)
228 u32 rss_params_inited; /* bit-field to track initialized rss params */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200229 u16 rss_ind_table[128];
230 u32 rss_key[10];
231 u8 rss_caps;
232
Yuval Mintz29502192015-10-26 11:02:29 +0200233 u16 q_num_rx_buffers; /* Must be a power of two */
234 u16 q_num_tx_buffers; /* Must be a power of two */
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200235
Manish Chopra55482ed2016-03-04 12:35:06 -0500236 bool gro_disable;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200237 struct list_head vlan_list;
238 u16 configured_vlans;
239 u16 non_configured_vlans;
240 bool accept_any_vlan;
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200241 struct delayed_work sp_task;
242 unsigned long sp_flags;
Manish Choprab18e1702016-04-14 01:38:30 -0400243 u16 vxlan_dst_port;
Manish Chopra9a109dd2016-04-14 01:38:31 -0400244 u16 geneve_dst_port;
Ram Amranicee9fbd2016-10-01 21:59:56 +0300245
Chopra, Manishe4917d42017-04-13 04:54:45 -0700246 struct qede_arfs *arfs;
Chopra, Manishe4917d42017-04-13 04:54:45 -0700247 bool wol_enabled;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200248
Ram Amranicee9fbd2016-10-01 21:59:56 +0300249 struct qede_rdma_dev rdma_info;
Mintz, Yuval496e0512016-11-29 16:47:09 +0200250
251 struct bpf_prog *xdp_prog;
Yuval Mintz29502192015-10-26 11:02:29 +0200252};
253
254enum QEDE_STATE {
255 QEDE_STATE_CLOSED,
256 QEDE_STATE_OPEN,
257};
258
259#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
260
261#define MAX_NUM_TC 8
262#define MAX_NUM_PRI 8
263
264/* The driver supports the new build_skb() API:
265 * RX ring buffer contains pointer to kmalloc() data only,
266 * skb are built only after the frame was DMA-ed.
267 */
268struct sw_rx_data {
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500269 struct page *data;
270 dma_addr_t mapping;
271 unsigned int page_offset;
Yuval Mintz29502192015-10-26 11:02:29 +0200272};
273
Manish Chopra55482ed2016-03-04 12:35:06 -0500274enum qede_agg_state {
275 QEDE_AGG_STATE_NONE = 0,
276 QEDE_AGG_STATE_START = 1,
277 QEDE_AGG_STATE_ERROR = 2
278};
279
280struct qede_agg_info {
Mintz, Yuval01e23012016-11-29 16:47:00 +0200281 /* rx_buf is a data buffer that can be placed / consumed from rx bd
282 * chain. It has two purposes: We will preallocate the data buffer
283 * for each aggregation when we open the interface and will place this
284 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
285 * to be in a state where allocation fails, as we can't reuse the
286 * consumer buffer in the rx-chain since FW may still be writing to it
287 * (since header needs to be modified for TPA).
288 * The second purpose is to keep a pointer to the bd buffer during
289 * aggregation.
290 */
291 struct sw_rx_data buffer;
292 dma_addr_t buffer_mapping;
293
Manish Chopra55482ed2016-03-04 12:35:06 -0500294 struct sk_buff *skb;
Mintz, Yuval01e23012016-11-29 16:47:00 +0200295
296 /* We need some structs from the start cookie until termination */
Manish Chopra55482ed2016-03-04 12:35:06 -0500297 u16 vlan_tag;
Mintz, Yuval01e23012016-11-29 16:47:00 +0200298 u16 start_cqe_bd_len;
299 u8 start_cqe_placement_offset;
300
301 u8 state;
302 u8 frag_id;
303
304 u8 tunnel_type;
Manish Chopra55482ed2016-03-04 12:35:06 -0500305};
306
Yuval Mintz29502192015-10-26 11:02:29 +0200307struct qede_rx_queue {
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200308 __le16 *hw_cons_ptr;
309 void __iomem *hw_rxq_prod_addr;
310
311 /* Required for the allocation of replacement buffers */
312 struct device *dev;
313
Mintz, Yuval496e0512016-11-29 16:47:09 +0200314 struct bpf_prog *xdp_prog;
315
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200316 u16 sw_rx_cons;
317 u16 sw_rx_prod;
318
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +0200319 u16 filled_buffers;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200320 u8 data_direction;
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200321 u8 rxq_id;
322
Mintz, Yuval15ed8a42017-04-07 11:05:00 +0300323 /* Used once per each NAPI run */
324 u16 num_rx_buffers;
325
326 u16 rx_headroom;
327
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200328 u32 rx_buf_size;
329 u32 rx_buf_seg_size;
330
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200331 struct sw_rx_data *sw_rx_ring;
332 struct qed_chain rx_bd_ring;
333 struct qed_chain rx_comp_ring ____cacheline_aligned;
Yuval Mintz29502192015-10-26 11:02:29 +0200334
Manish Chopra55482ed2016-03-04 12:35:06 -0500335 /* GRO */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200336 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
Manish Chopra55482ed2016-03-04 12:35:06 -0500337
Mintz, Yuval15ed8a42017-04-07 11:05:00 +0300338 /* Used once per each NAPI run */
339 u64 rcv_pkts;
340
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200341 u64 rx_hw_errors;
342 u64 rx_alloc_errors;
343 u64 rx_ip_frags;
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200344
Mintz, Yuval496e0512016-11-29 16:47:09 +0200345 u64 xdp_no_pass;
346
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200347 void *handle;
Yuval Mintz29502192015-10-26 11:02:29 +0200348};
349
350union db_prod {
351 struct eth_db_data data;
352 u32 raw;
353};
354
355struct sw_tx_bd {
356 struct sk_buff *skb;
357 u8 flags;
358/* Set on the first BD descriptor when there is a split BD */
359#define QEDE_TSO_SPLIT_BD BIT(0)
360};
361
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300362struct sw_tx_xdp {
363 struct page *page;
364 dma_addr_t mapping;
365};
366
Yuval Mintz29502192015-10-26 11:02:29 +0200367struct qede_tx_queue {
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200368 u8 is_xdp;
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200369 bool is_legacy;
370 u16 sw_tx_cons;
371 u16 sw_tx_prod;
372 u16 num_tx_buffers; /* Slowpath only */
Yuval Mintz29502192015-10-26 11:02:29 +0200373
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200374 u64 xmit_pkts;
375 u64 stopped_cnt;
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300376
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200377 __le16 *hw_cons_ptr;
378
379 /* Needed for the mapping of packets */
380 struct device *dev;
381
382 void __iomem *doorbell_addr;
383 union db_prod tx_db;
384 int index; /* Slowpath only */
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200385#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
386 QEDE_MAX_TSS_CNT(edev))
387#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200388
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200389 /* Regular Tx requires skb + metadata for release purpose,
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300390 * while XDP requires the pages and the mapped address.
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200391 */
392 union {
393 struct sw_tx_bd *skbs;
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300394 struct sw_tx_xdp *xdp;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200395 } sw_tx_ring;
396
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200397 struct qed_chain tx_pbl;
398
399 /* Slowpath; Should be kept in end [unless missing padding] */
Mintz, Yuval3da7a372016-11-29 16:47:06 +0200400 void *handle;
Yuval Mintz29502192015-10-26 11:02:29 +0200401};
402
403#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
404 le32_to_cpu((bd)->addr.lo))
405#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
406 do { \
407 (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
408 (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
409 (bd)->nbytes = cpu_to_le16(len); \
410 } while (0)
411#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
412
413struct qede_fastpath {
414 struct qede_dev *edev;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400415#define QEDE_FASTPATH_TX BIT(0)
416#define QEDE_FASTPATH_RX BIT(1)
Mintz, Yuval496e0512016-11-29 16:47:09 +0200417#define QEDE_FASTPATH_XDP BIT(2)
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400418#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
419 u8 type;
420 u8 id;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200421 u8 xdp_xmit;
Yuval Mintz29502192015-10-26 11:02:29 +0200422 struct napi_struct napi;
423 struct qed_sb_info *sb_info;
424 struct qede_rx_queue *rxq;
Mintz, Yuval80439a12016-11-29 16:47:02 +0200425 struct qede_tx_queue *txq;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200426 struct qede_tx_queue *xdp_tx;
Yuval Mintz29502192015-10-26 11:02:29 +0200427
428#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
429 char name[VEC_NAME_SIZE];
Yuval Mintze712d522015-10-26 11:02:27 +0200430};
431
432/* Debug print definitions */
433#define DP_NAME(edev) ((edev)->ndev->name)
434
Yuval Mintz29502192015-10-26 11:02:29 +0200435#define XMIT_PLAIN 0
436#define XMIT_L4_CSUM BIT(0)
437#define XMIT_LSO BIT(1)
438#define XMIT_ENC BIT(2)
Manish Chopraa1502412016-10-14 05:19:18 -0400439#define XMIT_ENC_GSO_L4_CSUM BIT(3)
Yuval Mintz29502192015-10-26 11:02:29 +0200440
441#define QEDE_CSUM_ERROR BIT(0)
442#define QEDE_CSUM_UNNECESSARY BIT(1)
Manish Chopra14db81d2016-04-14 01:38:33 -0400443#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200444
Manish Choprab18e1702016-04-14 01:38:30 -0400445#define QEDE_SP_RX_MODE 1
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200446
Chopra, Manishe4917d42017-04-13 04:54:45 -0700447#ifdef CONFIG_RFS_ACCEL
448int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
449 u16 rxq_index, u32 flow_id);
Chopra, Manish3f2a2b82017-07-26 06:07:10 -0700450#define QEDE_SP_ARFS_CONFIG 4
451#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
452#endif
453
Chopra, Manishe4917d42017-04-13 04:54:45 -0700454void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
455void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
456void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
457void qede_free_arfs(struct qede_dev *edev);
458int qede_alloc_arfs(struct qede_dev *edev);
Chopra, Manish3f2a2b82017-07-26 06:07:10 -0700459int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
460int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
Chopra, Manishec9b8db2017-07-26 06:07:09 -0700461int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
462int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
463 u32 *rule_locs);
464int qede_get_arfs_filter_count(struct qede_dev *edev);
465
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200466struct qede_reload_args {
467 void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
468 union {
469 netdev_features_t features;
Mintz, Yuval496e0512016-11-29 16:47:09 +0200470 struct bpf_prog *new_prog;
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200471 u16 mtu;
472 } u;
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200473};
474
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200475/* Datapath functions definition */
476netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
477netdev_features_t qede_features_check(struct sk_buff *skb,
478 struct net_device *dev,
479 netdev_features_t features);
480void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +0200481int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200482int qede_free_tx_pkt(struct qede_dev *edev,
483 struct qede_tx_queue *txq, int *len);
484int qede_poll(struct napi_struct *napi, int budget);
485irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200486
487/* Filtering function definitions */
488void qede_force_mac(void *dev, u8 *mac, bool forced);
Chopra, Manish97379f12017-04-24 10:00:48 -0700489void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200490int qede_set_mac_addr(struct net_device *ndev, void *p);
491
492int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
493int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
494void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
495int qede_configure_vlan_filters(struct qede_dev *edev);
496
497int qede_set_features(struct net_device *dev, netdev_features_t features);
498void qede_set_rx_mode(struct net_device *ndev);
499void qede_config_rx_mode(struct net_device *ndev);
500void qede_fill_rss_params(struct qede_dev *edev,
501 struct qed_update_vport_rss_params *rss, u8 *update);
502
503void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
504void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
505
506int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
507
Sudarsana Reddy Kalluru489e45a2016-06-08 06:22:12 -0400508#ifdef CONFIG_DCB
509void qede_set_dcbnl_ops(struct net_device *ndev);
510#endif
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200511
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200512void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
513void qede_set_ethtool_ops(struct net_device *netdev);
514void qede_reload(struct qede_dev *edev,
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200515 struct qede_reload_args *args, bool is_locked);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200516int qede_change_mtu(struct net_device *dev, int new_mtu);
517void qede_fill_by_demand_stats(struct qede_dev *edev);
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200518void __qede_lock(struct qede_dev *edev);
519void __qede_unlock(struct qede_dev *edev);
Sudarsana Reddy Kalluru16f46bf2016-04-28 20:20:54 -0400520bool qede_has_rx_work(struct qede_rx_queue *rxq);
521int qede_txq_has_work(struct qede_tx_queue *txq);
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200522void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
Sudarsana Reddy Kalluru837d4eb2016-10-21 04:43:41 -0400523void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200524
Yuval Mintz29502192015-10-26 11:02:29 +0200525#define RX_RING_SIZE_POW 13
Sudarsana Kalluru01ef7e02015-11-30 12:25:02 +0200526#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
Yuval Mintz29502192015-10-26 11:02:29 +0200527#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
528#define NUM_RX_BDS_MIN 128
Sudarsana Reddy Kalluru0e191822016-10-21 04:43:42 -0400529#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
Yuval Mintz29502192015-10-26 11:02:29 +0200530
531#define TX_RING_SIZE_POW 13
Sudarsana Kalluru01ef7e02015-11-30 12:25:02 +0200532#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
Yuval Mintz29502192015-10-26 11:02:29 +0200533#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
534#define NUM_TX_BDS_MIN 128
535#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
536
Jarod Wilsoncaff2a82016-10-17 15:54:08 -0400537#define QEDE_MIN_PKT_LEN 64
538#define QEDE_RX_HDR_SIZE 256
539#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400540#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
Yuval Mintz29502192015-10-26 11:02:29 +0200541
Yuval Mintze712d522015-10-26 11:02:27 +0200542#endif /* _QEDE_H_ */