blob: 3a9b356dff014b3be96d3fba3e42d52a91d042be [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00004 Copyright(c) 1999 - 2014 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_H_
29#define _IXGBEVF_H_
30
31#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000032#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000033#include <linux/timer.h>
34#include <linux/io.h>
35#include <linux/netdevice.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000036#include <linux/if_vlan.h>
Eric Dumazet4197aa72011-06-22 05:01:35 +000037#include <linux/u64_stats_sync.h>
Greg Rose92915f72010-01-09 02:24:10 +000038
39#include "vf.h"
40
Jacob Kellerc777cdf2013-09-21 06:24:20 +000041#ifdef CONFIG_NET_RX_BUSY_POLL
42#include <net/busy_poll.h>
Jacob Keller3b5dca22013-09-21 06:24:25 +000043#define BP_EXTENDED_STATS
Jacob Kellerc777cdf2013-09-21 06:24:20 +000044#endif
45
Emil Tantilove08400b2015-01-28 03:21:24 +000046#define IXGBE_MAX_TXD_PWR 14
47#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
48
49/* Tx Descriptors needed, worst case */
50#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
51#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
52
Greg Rose92915f72010-01-09 02:24:10 +000053/* wrapper around a pointer to a socket buffer,
54 * so a DMA handle can be stored along with the buffer */
55struct ixgbevf_tx_buffer {
Alexander Duycke757e3e2013-01-31 07:43:22 +000056 union ixgbe_adv_tx_desc *next_to_watch;
Emil Tantilov7ad1a092014-01-17 18:30:03 -080057 unsigned long time_stamp;
58 struct sk_buff *skb;
59 unsigned int bytecount;
60 unsigned short gso_segs;
61 __be16 protocol;
Emil Tantilov9bdfefd2014-01-17 18:30:04 -080062 DEFINE_DMA_UNMAP_ADDR(dma);
63 DEFINE_DMA_UNMAP_LEN(len);
Emil Tantilov7ad1a092014-01-17 18:30:03 -080064 u32 tx_flags;
Greg Rose92915f72010-01-09 02:24:10 +000065};
66
67struct ixgbevf_rx_buffer {
Greg Rose92915f72010-01-09 02:24:10 +000068 dma_addr_t dma;
Emil Tantilovbad17232014-11-21 02:57:15 +000069 struct page *page;
70 unsigned int page_offset;
Greg Rose92915f72010-01-09 02:24:10 +000071};
72
Emil Tantilov095e2612014-01-17 18:30:00 -080073struct ixgbevf_stats {
74 u64 packets;
75 u64 bytes;
76#ifdef BP_EXTENDED_STATS
77 u64 yields;
78 u64 misses;
79 u64 cleaned;
80#endif
81};
82
83struct ixgbevf_tx_queue_stats {
84 u64 restart_queue;
85 u64 tx_busy;
86 u64 tx_done_old;
87};
88
89struct ixgbevf_rx_queue_stats {
Emil Tantilov095e2612014-01-17 18:30:00 -080090 u64 alloc_rx_page_failed;
91 u64 alloc_rx_buff_failed;
92 u64 csum_err;
93};
94
Emil Tantilove08400b2015-01-28 03:21:24 +000095enum ixgbevf_ring_state_t {
96 __IXGBEVF_TX_DETECT_HANG,
97 __IXGBEVF_HANG_CHECK_ARMED,
98};
99
100#define check_for_tx_hang(ring) \
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102#define set_check_for_tx_hang(ring) \
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104#define clear_check_for_tx_hang(ring) \
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
106
Greg Rose92915f72010-01-09 02:24:10 +0000107struct ixgbevf_ring {
Alexander Duyck6b43c442012-05-11 08:32:45 +0000108 struct ixgbevf_ring *next;
Alexander Duyckfb401952012-05-11 08:33:16 +0000109 struct net_device *netdev;
110 struct device *dev;
Greg Rose92915f72010-01-09 02:24:10 +0000111 void *desc; /* descriptor ring memory */
112 dma_addr_t dma; /* phys. address of descriptor ring */
113 unsigned int size; /* length in bytes */
Emil Tantilovbad17232014-11-21 02:57:15 +0000114 u16 count; /* amount of descriptors */
115 u16 next_to_use;
116 u16 next_to_clean;
117 u16 next_to_alloc;
Greg Rose92915f72010-01-09 02:24:10 +0000118
Greg Rose92915f72010-01-09 02:24:10 +0000119 union {
120 struct ixgbevf_tx_buffer *tx_buffer_info;
121 struct ixgbevf_rx_buffer *rx_buffer_info;
122 };
Emil Tantilove08400b2015-01-28 03:21:24 +0000123 unsigned long state;
Emil Tantilov095e2612014-01-17 18:30:00 -0800124 struct ixgbevf_stats stats;
125 struct u64_stats_sync syncp;
126 union {
127 struct ixgbevf_tx_queue_stats tx_stats;
128 struct ixgbevf_rx_queue_stats rx_stats;
129 };
130
Greg Rose55fb2772012-11-06 05:53:32 +0000131 u64 hw_csum_rx_error;
Don Skidmore5cdab2f2013-10-30 07:45:39 +0000132 u8 __iomem *tail;
Emil Tantilovbad17232014-11-21 02:57:15 +0000133 struct sk_buff *skb;
Greg Rose92915f72010-01-09 02:24:10 +0000134
Greg Rose92915f72010-01-09 02:24:10 +0000135 u16 reg_idx; /* holds the special value that gets the hardware register
136 * offset associated with this ring, which is different
137 * for DCB and RSS modes */
Emil Tantilov095e2612014-01-17 18:30:00 -0800138 int queue_index; /* needed for multiqueue queue management */
Greg Rose92915f72010-01-09 02:24:10 +0000139};
140
Greg Rose92915f72010-01-09 02:24:10 +0000141/* How many Rx Buffers do we bundle into one write to the hardware ? */
142#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
143
Alexander Duyck56e94092012-07-20 08:10:03 +0000144#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
145#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
Emil Tantilov2dc571a2014-12-06 09:19:02 +0000146#define IXGBEVF_MAX_RSS_QUEUES 2
Greg Rose92915f72010-01-09 02:24:10 +0000147
148#define IXGBEVF_DEFAULT_TXD 1024
149#define IXGBEVF_DEFAULT_RXD 512
150#define IXGBEVF_MAX_TXD 4096
151#define IXGBEVF_MIN_TXD 64
152#define IXGBEVF_MAX_RXD 4096
153#define IXGBEVF_MIN_RXD 64
154
155/* Supported Rx Buffer Sizes */
Greg Rose92915f72010-01-09 02:24:10 +0000156#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
Emil Tantilovbad17232014-11-21 02:57:15 +0000157#define IXGBEVF_RXBUFFER_2048 2048
Greg Rose92915f72010-01-09 02:24:10 +0000158
159#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
Emil Tantilovbad17232014-11-21 02:57:15 +0000160#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
Greg Rose92915f72010-01-09 02:24:10 +0000161
162#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
163
164#define IXGBE_TX_FLAGS_CSUM (u32)(1)
165#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
166#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
167#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
Greg Rose92915f72010-01-09 02:24:10 +0000168#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
169#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
170#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
171
Alexander Duyck6b43c442012-05-11 08:32:45 +0000172struct ixgbevf_ring_container {
173 struct ixgbevf_ring *ring; /* pointer to linked list of rings */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000174 unsigned int total_bytes; /* total bytes processed this int */
175 unsigned int total_packets; /* total packets processed this int */
Alexander Duyck6b43c442012-05-11 08:32:45 +0000176 u8 count; /* total number of rings in vector */
177 u8 itr; /* current ITR setting for ring */
178};
179
180/* iterator for handling rings in ring container */
181#define ixgbevf_for_each_ring(pos, head) \
182 for (pos = (head).ring; pos != NULL; pos = pos->next)
183
Greg Rose92915f72010-01-09 02:24:10 +0000184/* MAX_MSIX_Q_VECTORS of these are allocated,
185 * but we only use one per queue-specific vector.
186 */
187struct ixgbevf_q_vector {
188 struct ixgbevf_adapter *adapter;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000189 u16 v_idx; /* index of q_vector within array, also used for
190 * finding the bit in EICR and friends that
191 * represents the vector for this ring */
192 u16 itr; /* Interrupt throttle rate written to EITR */
Greg Rose92915f72010-01-09 02:24:10 +0000193 struct napi_struct napi;
Alexander Duyck6b43c442012-05-11 08:32:45 +0000194 struct ixgbevf_ring_container rx, tx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000195 char name[IFNAMSIZ + 9];
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000196#ifdef CONFIG_NET_RX_BUSY_POLL
197 unsigned int state;
198#define IXGBEVF_QV_STATE_IDLE 0
199#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
200#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
201#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
202#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
203#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
204#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
205#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
206#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
207#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
208 spinlock_t lock;
209#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +0000210};
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000211#ifdef CONFIG_NET_RX_BUSY_POLL
212static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
213{
214
215 spin_lock_init(&q_vector->lock);
216 q_vector->state = IXGBEVF_QV_STATE_IDLE;
217}
218
219/* called from the device poll routine to get ownership of a q_vector */
220static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
221{
222 int rc = true;
223 spin_lock_bh(&q_vector->lock);
224 if (q_vector->state & IXGBEVF_QV_LOCKED) {
225 WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
226 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
227 rc = false;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000228#ifdef BP_EXTENDED_STATS
Emil Tantilov095e2612014-01-17 18:30:00 -0800229 q_vector->tx.ring->stats.yields++;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000230#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000231 } else {
232 /* we don't care if someone yielded */
233 q_vector->state = IXGBEVF_QV_STATE_NAPI;
234 }
235 spin_unlock_bh(&q_vector->lock);
236 return rc;
237}
238
239/* returns true is someone tried to get the qv while napi had it */
240static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
241{
242 int rc = false;
243 spin_lock_bh(&q_vector->lock);
244 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
245 IXGBEVF_QV_STATE_NAPI_YIELD));
246
247 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
248 rc = true;
249 /* reset state to idle, unless QV is disabled */
250 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
251 spin_unlock_bh(&q_vector->lock);
252 return rc;
253}
254
255/* called from ixgbevf_low_latency_poll() */
256static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
257{
258 int rc = true;
259 spin_lock_bh(&q_vector->lock);
260 if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
261 q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
262 rc = false;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000263#ifdef BP_EXTENDED_STATS
Emil Tantilov095e2612014-01-17 18:30:00 -0800264 q_vector->rx.ring->stats.yields++;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000265#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000266 } else {
267 /* preserve yield marks */
268 q_vector->state |= IXGBEVF_QV_STATE_POLL;
269 }
270 spin_unlock_bh(&q_vector->lock);
271 return rc;
272}
273
274/* returns true if someone tried to get the qv while it was locked */
275static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
276{
277 int rc = false;
278 spin_lock_bh(&q_vector->lock);
279 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
280
281 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
282 rc = true;
283 /* reset state to idle, unless QV is disabled */
284 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
285 spin_unlock_bh(&q_vector->lock);
286 return rc;
287}
288
289/* true if a socket is polling, even if it did not get the lock */
290static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
291{
292 WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
293 return q_vector->state & IXGBEVF_QV_USER_PEND;
294}
295
296/* false if QV is currently owned */
297static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
298{
299 int rc = true;
300 spin_lock_bh(&q_vector->lock);
301 if (q_vector->state & IXGBEVF_QV_OWNED)
302 rc = false;
Jacob Kellere689e722014-01-16 02:30:06 -0800303 q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000304 spin_unlock_bh(&q_vector->lock);
305 return rc;
306}
307
308#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +0000309
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000310/*
311 * microsecond values for various ITR rates shifted by 2 to fit itr register
312 * with the first 3 bits reserved 0
313 */
314#define IXGBE_MIN_RSC_ITR 24
315#define IXGBE_100K_ITR 40
316#define IXGBE_20K_ITR 200
317#define IXGBE_10K_ITR 400
318#define IXGBE_8K_ITR 500
319
Greg Rose92915f72010-01-09 02:24:10 +0000320/* Helper macros to switch between ints/sec and what the register uses.
321 * And yes, it's the same math going both ways. The lowest value
322 * supported by all of the ixgbe hardware is 8.
323 */
324#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
325 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
326#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
327
Emil Tantilovec62fe22014-11-08 01:39:20 +0000328/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
329static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
330 const u32 stat_err_bits)
331{
332 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
333}
334
Don Skidmoref880d072013-10-23 02:17:52 +0000335static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
336{
337 u16 ntc = ring->next_to_clean;
338 u16 ntu = ring->next_to_use;
339
340 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
341}
Greg Rose92915f72010-01-09 02:24:10 +0000342
Mark Rustad06380db2014-03-04 03:02:23 +0000343static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
344{
345 writel(value, ring->tail);
346}
347
Alexander Duyck908421f2012-05-11 08:33:00 +0000348#define IXGBEVF_RX_DESC(R, i) \
349 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
350#define IXGBEVF_TX_DESC(R, i) \
351 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
352#define IXGBEVF_TX_CTXTDESC(R, i) \
353 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
Greg Rose92915f72010-01-09 02:24:10 +0000354
Alexander Duyckc88887e2012-08-22 02:04:37 +0000355#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
Greg Rose92915f72010-01-09 02:24:10 +0000356
357#define OTHER_VECTOR 1
358#define NON_Q_VECTORS (OTHER_VECTOR)
359
360#define MAX_MSIX_Q_VECTORS 2
Greg Rose92915f72010-01-09 02:24:10 +0000361
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000362#define MIN_MSIX_Q_VECTORS 1
Greg Rose92915f72010-01-09 02:24:10 +0000363#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
364
365/* board specific private data structure */
366struct ixgbevf_adapter {
Emil Tantilovdff80522014-11-08 01:39:25 +0000367 /* this field must be first, see ixgbevf_process_skb_fields */
Jiri Pirkodadcd652011-07-21 03:25:09 +0000368 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
Emil Tantilovdff80522014-11-08 01:39:25 +0000369
Greg Rose92915f72010-01-09 02:24:10 +0000370 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
Greg Rose92915f72010-01-09 02:24:10 +0000371
372 /* Interrupt Throttle Rate */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000373 u16 rx_itr_setting;
374 u16 tx_itr_setting;
375
376 /* interrupt masks */
377 u32 eims_enable_mask;
378 u32 eims_other;
Greg Rose92915f72010-01-09 02:24:10 +0000379
380 /* TX */
Greg Rose92915f72010-01-09 02:24:10 +0000381 int num_tx_queues;
Emil Tantilov97031922014-01-17 18:30:01 -0800382 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
Greg Rose92915f72010-01-09 02:24:10 +0000383 u64 restart_queue;
Greg Rose92915f72010-01-09 02:24:10 +0000384 u32 tx_timeout_count;
Greg Rose92915f72010-01-09 02:24:10 +0000385
386 /* RX */
Greg Rose92915f72010-01-09 02:24:10 +0000387 int num_rx_queues;
Emil Tantilov97031922014-01-17 18:30:01 -0800388 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
Greg Rose92915f72010-01-09 02:24:10 +0000389 u64 hw_csum_rx_error;
390 u64 hw_rx_no_dma_resources;
Greg Rose92915f72010-01-09 02:24:10 +0000391 int num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +0000392 u32 alloc_rx_page_failed;
393 u32 alloc_rx_buff_failed;
394
395 /* Some features need tri-state capability,
396 * thus the additional *_CAPABLE flags.
397 */
398 u32 flags;
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000399#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
Don Skidmore220fe052013-09-21 01:40:49 +0000400#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000401
Emil Tantilov97031922014-01-17 18:30:01 -0800402 struct msix_entry *msix_entries;
403
Greg Rose92915f72010-01-09 02:24:10 +0000404 /* OS defined structs */
405 struct net_device *netdev;
406 struct pci_dev *pdev;
Greg Rose92915f72010-01-09 02:24:10 +0000407
408 /* structs defined in ixgbe_vf.h */
409 struct ixgbe_hw hw;
410 u16 msg_enable;
Greg Rose92915f72010-01-09 02:24:10 +0000411 /* Interrupt Throttle Rate */
412 u32 eitr_param;
413
Emil Tantilov97031922014-01-17 18:30:01 -0800414 struct ixgbevf_hw_stats stats;
415
Greg Rose92915f72010-01-09 02:24:10 +0000416 unsigned long state;
Greg Rose92915f72010-01-09 02:24:10 +0000417 u64 tx_busy;
418 unsigned int tx_ring_count;
419 unsigned int rx_ring_count;
420
Jacob Keller3b5dca22013-09-21 06:24:25 +0000421#ifdef BP_EXTENDED_STATS
422 u64 bp_rx_yields;
423 u64 bp_rx_cleaned;
424 u64 bp_rx_missed;
425
426 u64 bp_tx_yields;
427 u64 bp_tx_cleaned;
428 u64 bp_tx_missed;
429#endif
430
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000431 u8 __iomem *io_addr; /* Mainly for iounmap use */
Greg Rose92915f72010-01-09 02:24:10 +0000432 u32 link_speed;
433 bool link_up;
Greg Rose92915f72010-01-09 02:24:10 +0000434
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000435 struct timer_list service_timer;
436 struct work_struct service_task;
437
Alexander Duyck1c55ed72012-05-11 08:33:06 +0000438 spinlock_t mbx_lock;
Emil Tantilove66c92a2015-01-28 03:21:29 +0000439 unsigned long last_reset;
Greg Rose92915f72010-01-09 02:24:10 +0000440};
441
442enum ixbgevf_state_t {
443 __IXGBEVF_TESTING,
444 __IXGBEVF_RESETTING,
Mark Rustad2e7cfbd2014-03-04 03:02:13 +0000445 __IXGBEVF_DOWN,
Mark Rustadbc0c7152014-03-12 00:38:45 +0000446 __IXGBEVF_DISABLED,
Mark Rustad2e7cfbd2014-03-04 03:02:13 +0000447 __IXGBEVF_REMOVING,
Emil Tantilov9ac5c5c2015-01-28 03:21:34 +0000448 __IXGBEVF_SERVICE_SCHED,
449 __IXGBEVF_SERVICE_INITED,
Greg Rose92915f72010-01-09 02:24:10 +0000450};
451
452enum ixgbevf_boards {
453 board_82599_vf,
Greg Rose2316aa22010-12-02 07:12:26 +0000454 board_X540_vf,
Emil Tantilov47068b02014-11-22 07:59:56 +0000455 board_X550_vf,
456 board_X550EM_x_vf,
Greg Rose92915f72010-01-09 02:24:10 +0000457};
458
Stephen Hemminger3d8fe982012-01-18 22:13:34 +0000459extern const struct ixgbevf_info ixgbevf_82599_vf_info;
460extern const struct ixgbevf_info ixgbevf_X540_vf_info;
Emil Tantilov47068b02014-11-22 07:59:56 +0000461extern const struct ixgbevf_info ixgbevf_X550_vf_info;
462extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
Stephen Hemmingerb5417bf2012-01-18 22:13:33 +0000463extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
Greg Rose92915f72010-01-09 02:24:10 +0000464
465/* needed by ethtool.c */
Stephen Hemminger3d8fe982012-01-18 22:13:34 +0000466extern const char ixgbevf_driver_name[];
Greg Rose92915f72010-01-09 02:24:10 +0000467extern const char ixgbevf_driver_version[];
468
Joe Perches5ccc9212013-09-23 11:37:59 -0700469void ixgbevf_up(struct ixgbevf_adapter *adapter);
470void ixgbevf_down(struct ixgbevf_adapter *adapter);
471void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
472void ixgbevf_reset(struct ixgbevf_adapter *adapter);
473void ixgbevf_set_ethtool_ops(struct net_device *netdev);
Emil Tantilov05d063a2014-01-17 18:29:59 -0800474int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
475int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
476void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
477void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
Joe Perches5ccc9212013-09-23 11:37:59 -0700478void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
479int ethtool_ioctl(struct ifreq *ifr);
Greg Rose92915f72010-01-09 02:24:10 +0000480
Jacob Keller38496232013-10-22 06:19:18 +0000481extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
482
Joe Perches5ccc9212013-09-23 11:37:59 -0700483void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
484void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000485
486#ifdef DEBUG
Joe Perches5ccc9212013-09-23 11:37:59 -0700487char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
Greg Rose92915f72010-01-09 02:24:10 +0000488#define hw_dbg(hw, format, arg...) \
489 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
490#else
491#define hw_dbg(hw, format, arg...) do {} while (0)
492#endif
493
494#endif /* _IXGBEVF_H_ */