blob: 06e29bd73777ac4b3e5be44b81a3dde835c08e36 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Greg Rose5c47a2b2012-01-06 02:53:30 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000032
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000036#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000037#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000045#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000048#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000051#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000052#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000054
55#include "ixgbevf.h"
56
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000057const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000058static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000059 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000060
Don Skidmore9e6fcae2013-09-21 05:21:18 +000061#define DRV_VERSION "2.11.3-k"
Greg Rose92915f72010-01-09 02:24:10 +000062const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080063static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000064 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000065
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000067 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000069};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000079static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
Greg Rose92915f72010-01-09 02:24:10 +000082 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
stephen hemmingerb3f4d592012-03-13 06:04:20 +000092#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +000096
97/* forward decls */
Alexander Duyckfa71ae22012-05-11 08:32:50 +000098static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +000099static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000100
101static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
102 struct ixgbevf_ring *rx_ring,
103 u32 val)
104{
105 /*
106 * Force memory writes to complete before letting h/w
107 * know there are new descriptors to fetch. (Only
108 * applicable for weak-ordered memory model archs,
109 * such as IA-64).
110 */
111 wmb();
112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
113}
114
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000115/**
Greg Rose65d676c2011-02-03 06:54:13 +0000116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000117 * @adapter: pointer to adapter struct
118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119 * @queue: queue to map the corresponding interrupt to
120 * @msix_vector: the vector to map to the corresponding queue
Greg Rose92915f72010-01-09 02:24:10 +0000121 */
122static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
123 u8 queue, u8 msix_vector)
124{
125 u32 ivar, index;
126 struct ixgbe_hw *hw = &adapter->hw;
127 if (direction == -1) {
128 /* other causes */
129 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
130 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
131 ivar &= ~0xFF;
132 ivar |= msix_vector;
133 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
134 } else {
135 /* tx or rx causes */
136 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
137 index = ((16 * (queue & 1)) + (8 * direction));
138 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
139 ivar &= ~(0xFF << index);
140 ivar |= (msix_vector << index);
141 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
142 }
143}
144
Alexander Duyck70a10e22012-05-11 08:33:21 +0000145static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000146 struct ixgbevf_tx_buffer
147 *tx_buffer_info)
148{
149 if (tx_buffer_info->dma) {
150 if (tx_buffer_info->mapped_as_page)
Alexander Duyck70a10e22012-05-11 08:33:21 +0000151 dma_unmap_page(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000152 tx_buffer_info->dma,
153 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000154 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000155 else
Alexander Duyck70a10e22012-05-11 08:33:21 +0000156 dma_unmap_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000157 tx_buffer_info->dma,
158 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000159 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000160 tx_buffer_info->dma = 0;
161 }
162 if (tx_buffer_info->skb) {
163 dev_kfree_skb_any(tx_buffer_info->skb);
164 tx_buffer_info->skb = NULL;
165 }
166 tx_buffer_info->time_stamp = 0;
167 /* tx_buffer_info must be completely set up in the transmit path */
168}
169
Greg Rose92915f72010-01-09 02:24:10 +0000170#define IXGBE_MAX_TXD_PWR 14
171#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
172
173/* Tx Descriptors needed, worst case */
Alexander Duyck35959902012-05-11 08:32:40 +0000174#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
175#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
Greg Rose92915f72010-01-09 02:24:10 +0000176
177static void ixgbevf_tx_timeout(struct net_device *netdev);
178
179/**
180 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000181 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000182 * @tx_ring: tx ring to clean
183 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000184static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000185 struct ixgbevf_ring *tx_ring)
186{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000187 struct ixgbevf_adapter *adapter = q_vector->adapter;
Greg Rose92915f72010-01-09 02:24:10 +0000188 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
189 struct ixgbevf_tx_buffer *tx_buffer_info;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000190 unsigned int i, count = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000191 unsigned int total_bytes = 0, total_packets = 0;
192
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000193 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
194 return true;
195
Greg Rose92915f72010-01-09 02:24:10 +0000196 i = tx_ring->next_to_clean;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000197 tx_buffer_info = &tx_ring->tx_buffer_info[i];
198 eop_desc = tx_buffer_info->next_to_watch;
Greg Rose92915f72010-01-09 02:24:10 +0000199
Alexander Duycke757e3e2013-01-31 07:43:22 +0000200 do {
Greg Rose92915f72010-01-09 02:24:10 +0000201 bool cleaned = false;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000202
203 /* if next_to_watch is not set then there is no work pending */
204 if (!eop_desc)
205 break;
206
207 /* prevent any other reads prior to eop_desc */
208 read_barrier_depends();
209
210 /* if DD is not set pending work has not been completed */
211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
212 break;
213
214 /* clear next_to_watch to prevent false hangs */
215 tx_buffer_info->next_to_watch = NULL;
216
Greg Rose92915f72010-01-09 02:24:10 +0000217 for ( ; !cleaned; count++) {
218 struct sk_buff *skb;
Alexander Duyck908421f2012-05-11 08:33:00 +0000219 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Alexander Duycke757e3e2013-01-31 07:43:22 +0000220 cleaned = (tx_desc == eop_desc);
Greg Rose92915f72010-01-09 02:24:10 +0000221 skb = tx_buffer_info->skb;
222
223 if (cleaned && skb) {
224 unsigned int segs, bytecount;
225
226 /* gso_segs is currently only valid for tcp */
227 segs = skb_shinfo(skb)->gso_segs ?: 1;
228 /* multiply data chunks by size of headers */
229 bytecount = ((segs - 1) * skb_headlen(skb)) +
230 skb->len;
231 total_packets += segs;
232 total_bytes += bytecount;
233 }
234
Alexander Duyck70a10e22012-05-11 08:33:21 +0000235 ixgbevf_unmap_and_free_tx_resource(tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000236 tx_buffer_info);
237
238 tx_desc->wb.status = 0;
239
240 i++;
241 if (i == tx_ring->count)
242 i = 0;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000243
244 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Greg Rose92915f72010-01-09 02:24:10 +0000245 }
246
Alexander Duycke757e3e2013-01-31 07:43:22 +0000247 eop_desc = tx_buffer_info->next_to_watch;
248 } while (count < tx_ring->count);
Greg Rose92915f72010-01-09 02:24:10 +0000249
250 tx_ring->next_to_clean = i;
251
252#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyckfb401952012-05-11 08:33:16 +0000253 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
Greg Rose92915f72010-01-09 02:24:10 +0000254 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
255 /* Make sure that anybody stopping the queue after this
256 * sees the new next_to_clean.
257 */
258 smp_mb();
Alexander Duyckfb401952012-05-11 08:33:16 +0000259 if (__netif_subqueue_stopped(tx_ring->netdev,
260 tx_ring->queue_index) &&
Greg Rose92915f72010-01-09 02:24:10 +0000261 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
Alexander Duyckfb401952012-05-11 08:33:16 +0000262 netif_wake_subqueue(tx_ring->netdev,
263 tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +0000264 ++adapter->restart_queue;
265 }
Greg Rose92915f72010-01-09 02:24:10 +0000266 }
267
Eric Dumazet4197aa72011-06-22 05:01:35 +0000268 u64_stats_update_begin(&tx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000269 tx_ring->total_bytes += total_bytes;
270 tx_ring->total_packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000271 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000272 q_vector->tx.total_bytes += total_bytes;
273 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000274
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000275 return count < tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000276}
277
278/**
279 * ixgbevf_receive_skb - Send a completed packet up the stack
280 * @q_vector: structure containing interrupt and ring information
281 * @skb: packet to send up
282 * @status: hardware indication of status of receive
Greg Rose92915f72010-01-09 02:24:10 +0000283 * @rx_desc: rx descriptor
284 **/
285static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
286 struct sk_buff *skb, u8 status,
Greg Rose92915f72010-01-09 02:24:10 +0000287 union ixgbe_adv_rx_desc *rx_desc)
288{
289 struct ixgbevf_adapter *adapter = q_vector->adapter;
290 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
Greg Rosedd1ed3b2011-08-27 02:06:25 +0000291 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Greg Rose92915f72010-01-09 02:24:10 +0000292
Pascal Bouchareine5d9a5332012-06-14 02:18:18 +0000293 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000294 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
Jiri Pirkodadcd652011-07-21 03:25:09 +0000295
Greg Rose366c1092012-11-13 04:03:18 +0000296 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
297 napi_gro_receive(&q_vector->napi, skb);
298 else
299 netif_rx(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000300}
301
302/**
Jacob Keller08681612013-09-21 06:24:09 +0000303 * ixgbevf_rx_skb - Helper function to determine proper Rx method
304 * @q_vector: structure containing interrupt and ring information
305 * @skb: packet to send up
306 * @status: hardware indication of status of receive
307 * @rx_desc: rx descriptor
308 **/
309static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
310 struct sk_buff *skb, u8 status,
311 union ixgbe_adv_rx_desc *rx_desc)
312{
313 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
314}
315
316/**
Greg Rose92915f72010-01-09 02:24:10 +0000317 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
Greg Rose55fb2772012-11-06 05:53:32 +0000318 * @ring: pointer to Rx descriptor ring structure
Greg Rose92915f72010-01-09 02:24:10 +0000319 * @status_err: hardware indication of status of receive
320 * @skb: skb currently being received and modified
321 **/
Greg Rose55fb2772012-11-06 05:53:32 +0000322static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
Greg Rose92915f72010-01-09 02:24:10 +0000323 u32 status_err, struct sk_buff *skb)
324{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700325 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000326
327 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000328 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000329 return;
330
331 /* if IP and error */
332 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
333 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Greg Rose55fb2772012-11-06 05:53:32 +0000334 ring->hw_csum_rx_error++;
Greg Rose92915f72010-01-09 02:24:10 +0000335 return;
336 }
337
338 if (!(status_err & IXGBE_RXD_STAT_L4CS))
339 return;
340
341 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Greg Rose55fb2772012-11-06 05:53:32 +0000342 ring->hw_csum_rx_error++;
Greg Rose92915f72010-01-09 02:24:10 +0000343 return;
344 }
345
346 /* It must be a TCP or UDP packet with a valid checksum */
347 skb->ip_summed = CHECKSUM_UNNECESSARY;
Greg Rose55fb2772012-11-06 05:53:32 +0000348 ring->hw_csum_rx_good++;
Greg Rose92915f72010-01-09 02:24:10 +0000349}
350
351/**
352 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
353 * @adapter: address of board private structure
354 **/
355static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
356 struct ixgbevf_ring *rx_ring,
357 int cleaned_count)
358{
359 struct pci_dev *pdev = adapter->pdev;
360 union ixgbe_adv_rx_desc *rx_desc;
361 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000362 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000363
Greg Rose92915f72010-01-09 02:24:10 +0000364 bi = &rx_ring->rx_buffer_info[i];
365
366 while (cleaned_count--) {
Alexander Duyck908421f2012-05-11 08:33:00 +0000367 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Roseb9dd2452012-11-02 05:50:21 +0000368
369 if (!bi->skb) {
370 struct sk_buff *skb;
371
Alexander Duyckfb401952012-05-11 08:33:16 +0000372 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
373 rx_ring->rx_buf_len);
Greg Rose92915f72010-01-09 02:24:10 +0000374 if (!skb) {
375 adapter->alloc_rx_buff_failed++;
376 goto no_buffers;
377 }
Greg Rose92915f72010-01-09 02:24:10 +0000378 bi->skb = skb;
Greg Roseb9dd2452012-11-02 05:50:21 +0000379
Nick Nunley2a1f8792010-04-27 13:10:50 +0000380 bi->dma = dma_map_single(&pdev->dev, skb->data,
Greg Rose92915f72010-01-09 02:24:10 +0000381 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000382 DMA_FROM_DEVICE);
Greg Rose6132ee82012-09-21 00:14:14 +0000383 if (dma_mapping_error(&pdev->dev, bi->dma)) {
384 dev_kfree_skb(skb);
385 bi->skb = NULL;
386 dev_err(&pdev->dev, "RX DMA map failed\n");
387 break;
388 }
Greg Rose92915f72010-01-09 02:24:10 +0000389 }
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000390 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Greg Rose92915f72010-01-09 02:24:10 +0000391
392 i++;
393 if (i == rx_ring->count)
394 i = 0;
395 bi = &rx_ring->rx_buffer_info[i];
396 }
397
398no_buffers:
399 if (rx_ring->next_to_use != i) {
400 rx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +0000401 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
402 }
403}
404
405static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000406 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000407{
Greg Rose92915f72010-01-09 02:24:10 +0000408 struct ixgbe_hw *hw = &adapter->hw;
409
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000410 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000411}
412
Jacob Keller08e50a22013-09-21 06:24:14 +0000413static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
414 struct ixgbevf_ring *rx_ring,
415 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000416{
417 struct ixgbevf_adapter *adapter = q_vector->adapter;
418 struct pci_dev *pdev = adapter->pdev;
419 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
420 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
421 struct sk_buff *skb;
422 unsigned int i;
423 u32 len, staterr;
Greg Rose92915f72010-01-09 02:24:10 +0000424 int cleaned_count = 0;
425 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
426
427 i = rx_ring->next_to_clean;
Alexander Duyck908421f2012-05-11 08:33:00 +0000428 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000429 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
430 rx_buffer_info = &rx_ring->rx_buffer_info[i];
431
432 while (staterr & IXGBE_RXD_STAT_DD) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000433 if (!budget)
Greg Rose92915f72010-01-09 02:24:10 +0000434 break;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000435 budget--;
Greg Rose92915f72010-01-09 02:24:10 +0000436
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000437 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000438 len = le16_to_cpu(rx_desc->wb.upper.length);
Greg Rose92915f72010-01-09 02:24:10 +0000439 skb = rx_buffer_info->skb;
440 prefetch(skb->data - NET_IP_ALIGN);
441 rx_buffer_info->skb = NULL;
442
443 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +0000444 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +0000445 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000446 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000447 rx_buffer_info->dma = 0;
448 skb_put(skb, len);
449 }
450
Greg Rose92915f72010-01-09 02:24:10 +0000451 i++;
452 if (i == rx_ring->count)
453 i = 0;
454
Alexander Duyck908421f2012-05-11 08:33:00 +0000455 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000456 prefetch(next_rxd);
457 cleaned_count++;
458
459 next_buffer = &rx_ring->rx_buffer_info[i];
460
461 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000462 skb->next = next_buffer->skb;
Alexander Duyck5c60f812012-09-01 05:12:38 +0000463 IXGBE_CB(skb->next)->prev = skb;
Greg Rose92915f72010-01-09 02:24:10 +0000464 adapter->non_eop_descs++;
465 goto next_desc;
466 }
467
Alexander Duyck5c60f812012-09-01 05:12:38 +0000468 /* we should not be chaining buffers, if we did drop the skb */
469 if (IXGBE_CB(skb)->prev) {
470 do {
471 struct sk_buff *this = skb;
472 skb = IXGBE_CB(skb)->prev;
473 dev_kfree_skb(this);
474 } while (skb);
475 goto next_desc;
476 }
477
Greg Rose92915f72010-01-09 02:24:10 +0000478 /* ERR_MASK will only have valid bits if EOP set */
479 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
480 dev_kfree_skb_irq(skb);
481 goto next_desc;
482 }
483
Greg Rose55fb2772012-11-06 05:53:32 +0000484 ixgbevf_rx_checksum(rx_ring, staterr, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000485
486 /* probably a little skewed due to removing CRC */
487 total_rx_bytes += skb->len;
488 total_rx_packets++;
489
490 /*
491 * Work around issue of some types of VM to VM loop back
492 * packets not getting split correctly
493 */
494 if (staterr & IXGBE_RXD_STAT_LB) {
Eric Dumazete743d312010-04-14 15:59:40 -0700495 u32 header_fixup_len = skb_headlen(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000496 if (header_fixup_len < 14)
497 skb_push(skb, header_fixup_len);
498 }
Alexander Duyckfb401952012-05-11 08:33:16 +0000499 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +0000500
John Fastabend815cccb2012-10-24 08:13:09 +0000501 /* Workaround hardware that can't do proper VEPA multicast
502 * source pruning.
503 */
504 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
Joe Perches7367d0b2013-09-01 11:51:23 -0700505 ether_addr_equal(adapter->netdev->dev_addr,
506 eth_hdr(skb)->h_source)) {
John Fastabend815cccb2012-10-24 08:13:09 +0000507 dev_kfree_skb_irq(skb);
508 goto next_desc;
509 }
510
Jacob Keller08681612013-09-21 06:24:09 +0000511 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
Greg Rose92915f72010-01-09 02:24:10 +0000512
513next_desc:
514 rx_desc->wb.upper.status_error = 0;
515
516 /* return some buffers to hardware, one at a time is too slow */
517 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
518 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
519 cleaned_count);
520 cleaned_count = 0;
521 }
522
523 /* use prefetched values */
524 rx_desc = next_rxd;
525 rx_buffer_info = &rx_ring->rx_buffer_info[i];
526
527 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
528 }
529
530 rx_ring->next_to_clean = i;
531 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
532
533 if (cleaned_count)
534 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
535
Eric Dumazet4197aa72011-06-22 05:01:35 +0000536 u64_stats_update_begin(&rx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000537 rx_ring->total_packets += total_rx_packets;
538 rx_ring->total_bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000539 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000540 q_vector->rx.total_packets += total_rx_packets;
541 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +0000542
Jacob Keller08e50a22013-09-21 06:24:14 +0000543 return total_rx_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000544}
545
546/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000547 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +0000548 * @napi: napi struct with our devices info in it
549 * @budget: amount of work driver is allowed to do this pass, in packets
550 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000551 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +0000552 * q_vector.
553 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000554static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000555{
556 struct ixgbevf_q_vector *q_vector =
557 container_of(napi, struct ixgbevf_q_vector, napi);
558 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000559 struct ixgbevf_ring *ring;
560 int per_ring_budget;
561 bool clean_complete = true;
562
563 ixgbevf_for_each_ring(ring, q_vector->tx)
564 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +0000565
566 /* attempt to distribute budget to each queue fairly, but don't allow
567 * the budget to go below 1 because we'll exit polling */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000568 if (q_vector->rx.count > 1)
569 per_ring_budget = max(budget/q_vector->rx.count, 1);
570 else
571 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +0000572
Greg Rose366c1092012-11-13 04:03:18 +0000573 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000574 ixgbevf_for_each_ring(ring, q_vector->rx)
Jacob Keller08e50a22013-09-21 06:24:14 +0000575 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
576 per_ring_budget)
577 < per_ring_budget);
Greg Rose366c1092012-11-13 04:03:18 +0000578 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Greg Rose92915f72010-01-09 02:24:10 +0000579
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000580 /* If all work not completed, return budget and keep polling */
581 if (!clean_complete)
582 return budget;
583 /* all work done, exit the polling mode */
584 napi_complete(napi);
585 if (adapter->rx_itr_setting & 1)
586 ixgbevf_set_itr(q_vector);
587 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
588 ixgbevf_irq_enable_queues(adapter,
589 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000590
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000591 return 0;
Greg Rose92915f72010-01-09 02:24:10 +0000592}
593
Greg Rosece422602012-05-22 02:17:49 +0000594/**
595 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
596 * @q_vector: structure containing interrupt and ring information
597 */
Jacob Keller38496232013-10-22 06:19:18 +0000598void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
Greg Rosece422602012-05-22 02:17:49 +0000599{
600 struct ixgbevf_adapter *adapter = q_vector->adapter;
601 struct ixgbe_hw *hw = &adapter->hw;
602 int v_idx = q_vector->v_idx;
603 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
604
605 /*
606 * set the WDIS bit to not clear the timer bits and cause an
607 * immediate assertion of the interrupt
608 */
609 itr_reg |= IXGBE_EITR_CNT_WDIS;
610
611 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
612}
Greg Rose92915f72010-01-09 02:24:10 +0000613
614/**
615 * ixgbevf_configure_msix - Configure MSI-X hardware
616 * @adapter: board private structure
617 *
618 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
619 * interrupts.
620 **/
621static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
622{
623 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +0000624 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +0000625
626 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000627 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000628
629 /*
630 * Populate the IVAR table and set the ITR values to the
631 * corresponding register.
632 */
633 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +0000634 struct ixgbevf_ring *ring;
Greg Rose92915f72010-01-09 02:24:10 +0000635 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +0000636
Alexander Duyck6b43c442012-05-11 08:32:45 +0000637 ixgbevf_for_each_ring(ring, q_vector->rx)
638 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000639
Alexander Duyck6b43c442012-05-11 08:32:45 +0000640 ixgbevf_for_each_ring(ring, q_vector->tx)
641 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000642
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000643 if (q_vector->tx.ring && !q_vector->rx.ring) {
644 /* tx only vector */
645 if (adapter->tx_itr_setting == 1)
646 q_vector->itr = IXGBE_10K_ITR;
647 else
648 q_vector->itr = adapter->tx_itr_setting;
649 } else {
650 /* rx or rx/tx vector */
651 if (adapter->rx_itr_setting == 1)
652 q_vector->itr = IXGBE_20K_ITR;
653 else
654 q_vector->itr = adapter->rx_itr_setting;
655 }
Greg Rose92915f72010-01-09 02:24:10 +0000656
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000657 /* add q_vector eims value to global eims_enable_mask */
658 adapter->eims_enable_mask |= 1 << v_idx;
659
660 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000661 }
662
663 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000664 /* setup eims_other and add value to global eims_enable_mask */
665 adapter->eims_other = 1 << v_idx;
666 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +0000667}
668
669enum latency_range {
670 lowest_latency = 0,
671 low_latency = 1,
672 bulk_latency = 2,
673 latency_invalid = 255
674};
675
676/**
677 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000678 * @q_vector: structure containing interrupt and ring information
679 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +0000680 *
681 * Stores a new ITR value based on packets and byte
682 * counts during the last interrupt. The advantage of per interrupt
683 * computation is faster updates and more accurate ITR for the current
684 * traffic pattern. Constants in this function were computed
685 * based on theoretical maximum wire speed and thresholds were set based
686 * on testing data as well as attempting to minimize response time
687 * while increasing bulk throughput.
688 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000689static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
690 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +0000691{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000692 int bytes = ring_container->total_bytes;
693 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000694 u32 timepassed_us;
695 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000696 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +0000697
698 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000699 return;
Greg Rose92915f72010-01-09 02:24:10 +0000700
701 /* simple throttlerate management
702 * 0-20MB/s lowest (100000 ints/s)
703 * 20-100MB/s low (20000 ints/s)
704 * 100-1249MB/s bulk (8000 ints/s)
705 */
706 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000707 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +0000708 bytes_perint = bytes / timepassed_us; /* bytes/usec */
709
710 switch (itr_setting) {
711 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000712 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000713 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000714 break;
715 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000716 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000717 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000718 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000719 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000720 break;
721 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000722 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000723 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000724 break;
725 }
726
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000727 /* clear work counters since we have the values we need */
728 ring_container->total_bytes = 0;
729 ring_container->total_packets = 0;
730
731 /* write updated itr to ring container */
732 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +0000733}
734
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000735static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +0000736{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000737 u32 new_itr = q_vector->itr;
738 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +0000739
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000740 ixgbevf_update_itr(q_vector, &q_vector->tx);
741 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +0000742
Alexander Duyck6b43c442012-05-11 08:32:45 +0000743 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +0000744
745 switch (current_itr) {
746 /* counts and packets in update_itr are dependent on these numbers */
747 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000748 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000749 break;
750 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000751 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000752 break;
753 case bulk_latency:
754 default:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000755 new_itr = IXGBE_8K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000756 break;
757 }
758
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000759 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +0000760 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000761 new_itr = (10 * new_itr * q_vector->itr) /
762 ((9 * new_itr) + q_vector->itr);
763
764 /* save the algorithm value here */
765 q_vector->itr = new_itr;
766
767 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000768 }
Greg Rose92915f72010-01-09 02:24:10 +0000769}
770
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000771static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000772{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000773 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +0000774 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000775
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000776 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +0000777
Don Skidmorec7bb4172013-10-01 04:33:49 -0700778 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
779 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose3a2c4032012-02-01 01:28:15 +0000780
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000781 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
782
Greg Rose92915f72010-01-09 02:24:10 +0000783 return IRQ_HANDLED;
784}
785
Greg Rose92915f72010-01-09 02:24:10 +0000786/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000787 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +0000788 * @irq: unused
789 * @data: pointer to our q_vector struct for this interrupt vector
790 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000791static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000792{
793 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +0000794
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000795 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000796 if (q_vector->rx.ring || q_vector->tx.ring)
797 napi_schedule(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +0000798
799 return IRQ_HANDLED;
800}
801
802static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
803 int r_idx)
804{
805 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
806
Alexander Duyck6b43c442012-05-11 08:32:45 +0000807 a->rx_ring[r_idx].next = q_vector->rx.ring;
808 q_vector->rx.ring = &a->rx_ring[r_idx];
809 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000810}
811
812static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
813 int t_idx)
814{
815 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
816
Alexander Duyck6b43c442012-05-11 08:32:45 +0000817 a->tx_ring[t_idx].next = q_vector->tx.ring;
818 q_vector->tx.ring = &a->tx_ring[t_idx];
819 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000820}
821
822/**
823 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
824 * @adapter: board private structure to initialize
825 *
826 * This function maps descriptor rings to the queue-specific vectors
827 * we were allotted through the MSI-X enabling code. Ideally, we'd have
828 * one vector per ring/queue, but on a constrained vector budget, we
829 * group the rings as "efficiently" as possible. You would add new
830 * mapping configurations in here.
831 **/
832static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
833{
834 int q_vectors;
835 int v_start = 0;
836 int rxr_idx = 0, txr_idx = 0;
837 int rxr_remaining = adapter->num_rx_queues;
838 int txr_remaining = adapter->num_tx_queues;
839 int i, j;
840 int rqpv, tqpv;
841 int err = 0;
842
843 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
844
845 /*
846 * The ideal configuration...
847 * We have enough vectors to map one per queue.
848 */
849 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
850 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
851 map_vector_to_rxq(adapter, v_start, rxr_idx);
852
853 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
854 map_vector_to_txq(adapter, v_start, txr_idx);
855 goto out;
856 }
857
858 /*
859 * If we don't have enough vectors for a 1-to-1
860 * mapping, we'll have to group them so there are
861 * multiple queues per vector.
862 */
863 /* Re-adjusting *qpv takes care of the remainder. */
864 for (i = v_start; i < q_vectors; i++) {
865 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
866 for (j = 0; j < rqpv; j++) {
867 map_vector_to_rxq(adapter, i, rxr_idx);
868 rxr_idx++;
869 rxr_remaining--;
870 }
871 }
872 for (i = v_start; i < q_vectors; i++) {
873 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
874 for (j = 0; j < tqpv; j++) {
875 map_vector_to_txq(adapter, i, txr_idx);
876 txr_idx++;
877 txr_remaining--;
878 }
879 }
880
881out:
882 return err;
883}
884
885/**
886 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
887 * @adapter: board private structure
888 *
889 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
890 * interrupts from the kernel.
891 **/
892static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
893{
894 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000895 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
896 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +0000897 int ri = 0, ti = 0;
898
Greg Rose92915f72010-01-09 02:24:10 +0000899 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000900 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
901 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +0000902
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000903 if (q_vector->tx.ring && q_vector->rx.ring) {
904 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
905 "%s-%s-%d", netdev->name, "TxRx", ri++);
906 ti++;
907 } else if (q_vector->rx.ring) {
908 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
909 "%s-%s-%d", netdev->name, "rx", ri++);
910 } else if (q_vector->tx.ring) {
911 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
912 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +0000913 } else {
914 /* skip this unused q_vector */
915 continue;
916 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000917 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
918 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000919 if (err) {
920 hw_dbg(&adapter->hw,
921 "request_irq failed for MSIX interrupt "
922 "Error: %d\n", err);
923 goto free_queue_irqs;
924 }
925 }
926
Greg Rose92915f72010-01-09 02:24:10 +0000927 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000928 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000929 if (err) {
930 hw_dbg(&adapter->hw,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000931 "request_irq for msix_other failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +0000932 goto free_queue_irqs;
933 }
934
935 return 0;
936
937free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000938 while (vector) {
939 vector--;
940 free_irq(adapter->msix_entries[vector].vector,
941 adapter->q_vector[vector]);
942 }
xunleera1f6c6b2013-03-05 07:44:20 +0000943 /* This failure is non-recoverable - it indicates the system is
944 * out of MSIX vector resources and the VF driver cannot run
945 * without them. Set the number of msix vectors to zero
946 * indicating that not enough can be allocated. The error
947 * will be returned to the user indicating device open failed.
948 * Any further attempts to force the driver to open will also
949 * fail. The only way to recover is to unload the driver and
950 * reload it again. If the system has recovered some MSIX
951 * vectors then it may succeed.
952 */
953 adapter->num_msix_vectors = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000954 return err;
955}
956
957static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
958{
959 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
960
961 for (i = 0; i < q_vectors; i++) {
962 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +0000963 q_vector->rx.ring = NULL;
964 q_vector->tx.ring = NULL;
965 q_vector->rx.count = 0;
966 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000967 }
968}
969
970/**
971 * ixgbevf_request_irq - initialize interrupts
972 * @adapter: board private structure
973 *
974 * Attempts to configure interrupts using the best available
975 * capabilities of the hardware and kernel.
976 **/
977static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
978{
979 int err = 0;
980
981 err = ixgbevf_request_msix_irqs(adapter);
982
983 if (err)
984 hw_dbg(&adapter->hw,
985 "request_irq failed, Error %d\n", err);
986
987 return err;
988}
989
990static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
991{
Greg Rose92915f72010-01-09 02:24:10 +0000992 int i, q_vectors;
993
994 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +0000995 i = q_vectors - 1;
996
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000997 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000998 i--;
999
1000 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001001 /* free only the irqs that were actually requested */
1002 if (!adapter->q_vector[i]->rx.ring &&
1003 !adapter->q_vector[i]->tx.ring)
1004 continue;
1005
Greg Rose92915f72010-01-09 02:24:10 +00001006 free_irq(adapter->msix_entries[i].vector,
1007 adapter->q_vector[i]);
1008 }
1009
1010 ixgbevf_reset_q_vectors(adapter);
1011}
1012
1013/**
1014 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1015 * @adapter: board private structure
1016 **/
1017static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1018{
Greg Rose92915f72010-01-09 02:24:10 +00001019 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001020 int i;
Greg Rose92915f72010-01-09 02:24:10 +00001021
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001022 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001023 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001024 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001025
1026 IXGBE_WRITE_FLUSH(hw);
1027
1028 for (i = 0; i < adapter->num_msix_vectors; i++)
1029 synchronize_irq(adapter->msix_entries[i].vector);
1030}
1031
1032/**
1033 * ixgbevf_irq_enable - Enable default interrupt generation settings
1034 * @adapter: board private structure
1035 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001036static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001037{
1038 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001039
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001040 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1041 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1042 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001043}
1044
1045/**
1046 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1047 * @adapter: board private structure
1048 *
1049 * Configure the Tx unit of the MAC after a reset.
1050 **/
1051static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1052{
1053 u64 tdba;
1054 struct ixgbe_hw *hw = &adapter->hw;
1055 u32 i, j, tdlen, txctrl;
1056
1057 /* Setup the HW Tx Head and Tail descriptor pointers */
1058 for (i = 0; i < adapter->num_tx_queues; i++) {
1059 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1060 j = ring->reg_idx;
1061 tdba = ring->dma;
1062 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1063 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1064 (tdba & DMA_BIT_MASK(32)));
1065 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1066 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1067 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1068 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1069 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1070 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1071 /* Disable Tx Head Writeback RO bit, since this hoses
1072 * bookkeeping if things aren't delivered in order.
1073 */
1074 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1075 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1076 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1077 }
1078}
1079
1080#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1081
1082static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1083{
1084 struct ixgbevf_ring *rx_ring;
1085 struct ixgbe_hw *hw = &adapter->hw;
1086 u32 srrctl;
1087
1088 rx_ring = &adapter->rx_ring[index];
1089
1090 srrctl = IXGBE_SRRCTL_DROP_EN;
1091
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001092 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001093
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001094 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1095 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1096
Greg Rose92915f72010-01-09 02:24:10 +00001097 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1098}
1099
Don Skidmore1bb9c632013-09-21 01:57:33 +00001100static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1101{
1102 struct ixgbe_hw *hw = &adapter->hw;
1103
1104 /* PSRTYPE must be initialized in 82599 */
1105 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1106 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1107 IXGBE_PSRTYPE_L2HDR;
1108
1109 if (adapter->num_rx_queues > 1)
1110 psrtype |= 1 << 29;
1111
1112 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1113}
1114
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001115static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1116{
1117 struct ixgbe_hw *hw = &adapter->hw;
1118 struct net_device *netdev = adapter->netdev;
1119 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1120 int i;
1121 u16 rx_buf_len;
1122
1123 /* notify the PF of our intent to use this size of frame */
1124 ixgbevf_rlpml_set_vf(hw, max_frame);
1125
1126 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1127 max_frame += VLAN_HLEN;
1128
1129 /*
Greg Rose85624ca2012-11-13 04:03:19 +00001130 * Allocate buffer sizes that fit well into 32K and
1131 * take into account max frame size of 9.5K
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001132 */
1133 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1134 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1135 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose85624ca2012-11-13 04:03:19 +00001136 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1137 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1138 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1139 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1140 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1141 rx_buf_len = IXGBEVF_RXBUFFER_8K;
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001142 else
Greg Rose85624ca2012-11-13 04:03:19 +00001143 rx_buf_len = IXGBEVF_RXBUFFER_10K;
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001144
1145 for (i = 0; i < adapter->num_rx_queues; i++)
1146 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1147}
1148
Greg Rose92915f72010-01-09 02:24:10 +00001149/**
1150 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1151 * @adapter: board private structure
1152 *
1153 * Configure the Rx unit of the MAC after a reset.
1154 **/
1155static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1156{
1157 u64 rdba;
1158 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001159 int i, j;
1160 u32 rdlen;
Greg Rose92915f72010-01-09 02:24:10 +00001161
Don Skidmore1bb9c632013-09-21 01:57:33 +00001162 ixgbevf_setup_psrtype(adapter);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001163
1164 /* set_rx_buffer_len must be called before ring initialization */
1165 ixgbevf_set_rx_buffer_len(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001166
1167 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1168 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1169 * the Base and Length of the Rx Descriptor Ring */
1170 for (i = 0; i < adapter->num_rx_queues; i++) {
1171 rdba = adapter->rx_ring[i].dma;
1172 j = adapter->rx_ring[i].reg_idx;
1173 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1174 (rdba & DMA_BIT_MASK(32)));
1175 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1176 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1177 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1178 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1179 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1180 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
Greg Rose92915f72010-01-09 02:24:10 +00001181
1182 ixgbevf_configure_srrctl(adapter, j);
1183 }
1184}
1185
Patrick McHardy80d5c362013-04-19 02:04:28 +00001186static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1187 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001188{
1189 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1190 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001191 int err;
1192
John Fastabend55fdd45b2012-10-01 14:52:20 +00001193 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001194
Greg Rose92915f72010-01-09 02:24:10 +00001195 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001196 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001197
John Fastabend55fdd45b2012-10-01 14:52:20 +00001198 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001199
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001200 /* translate error return types so error makes sense */
1201 if (err == IXGBE_ERR_MBX)
1202 return -EIO;
1203
1204 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1205 return -EACCES;
1206
Jiri Pirkodadcd652011-07-21 03:25:09 +00001207 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001208
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001209 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001210}
1211
Patrick McHardy80d5c362013-04-19 02:04:28 +00001212static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1213 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001214{
1215 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1216 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001217 int err = -EOPNOTSUPP;
Greg Rose92915f72010-01-09 02:24:10 +00001218
John Fastabend55fdd45b2012-10-01 14:52:20 +00001219 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001220
Greg Rose92915f72010-01-09 02:24:10 +00001221 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001222 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001223
John Fastabend55fdd45b2012-10-01 14:52:20 +00001224 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001225
Jiri Pirkodadcd652011-07-21 03:25:09 +00001226 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001227
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001228 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001229}
1230
1231static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1232{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001233 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001234
Jiri Pirkodadcd652011-07-21 03:25:09 +00001235 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00001236 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1237 htons(ETH_P_8021Q), vid);
Greg Rose92915f72010-01-09 02:24:10 +00001238}
1239
Greg Rose46ec20f2011-05-13 01:33:42 +00001240static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1241{
1242 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1243 struct ixgbe_hw *hw = &adapter->hw;
1244 int count = 0;
1245
1246 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001247 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001248 return -ENOSPC;
1249 }
1250
1251 if (!netdev_uc_empty(netdev)) {
1252 struct netdev_hw_addr *ha;
1253 netdev_for_each_uc_addr(ha, netdev) {
1254 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1255 udelay(200);
1256 }
1257 } else {
1258 /*
1259 * If the list is empty then send message to PF driver to
1260 * clear all macvlans on this VF.
1261 */
1262 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1263 }
1264
1265 return count;
1266}
1267
Greg Rose92915f72010-01-09 02:24:10 +00001268/**
Greg Rosedee847f2012-11-02 05:50:57 +00001269 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001270 * @netdev: network interface device structure
1271 *
1272 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001273 * list, unicast address list or the network interface flags are updated.
1274 * This routine is responsible for configuring the hardware for proper
1275 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001276 **/
1277static void ixgbevf_set_rx_mode(struct net_device *netdev)
1278{
1279 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1280 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001281
John Fastabend55fdd45b2012-10-01 14:52:20 +00001282 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001283
Greg Rose92915f72010-01-09 02:24:10 +00001284 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001285 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001286
1287 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001288
John Fastabend55fdd45b2012-10-01 14:52:20 +00001289 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001290}
1291
1292static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1293{
1294 int q_idx;
1295 struct ixgbevf_q_vector *q_vector;
1296 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1297
1298 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001299 q_vector = adapter->q_vector[q_idx];
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001300 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001301 }
1302}
1303
1304static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1305{
1306 int q_idx;
1307 struct ixgbevf_q_vector *q_vector;
1308 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1309
1310 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1311 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001312 napi_disable(&q_vector->napi);
1313 }
1314}
1315
1316static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1317{
1318 struct net_device *netdev = adapter->netdev;
1319 int i;
1320
1321 ixgbevf_set_rx_mode(netdev);
1322
1323 ixgbevf_restore_vlan(adapter);
1324
1325 ixgbevf_configure_tx(adapter);
1326 ixgbevf_configure_rx(adapter);
1327 for (i = 0; i < adapter->num_rx_queues; i++) {
1328 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
Alexander Duyck18c63082012-05-11 08:33:11 +00001329 ixgbevf_alloc_rx_buffers(adapter, ring,
1330 IXGBE_DESC_UNUSED(ring));
Greg Rose92915f72010-01-09 02:24:10 +00001331 }
1332}
1333
Don Skidmore858c3dd2013-10-01 04:33:50 -07001334#define IXGBEVF_MAX_RX_DESC_POLL 10
1335static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1336 int rxr)
Greg Rose92915f72010-01-09 02:24:10 +00001337{
1338 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore858c3dd2013-10-01 04:33:50 -07001339 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1340 u32 rxdctl;
Greg Rose92915f72010-01-09 02:24:10 +00001341 int j = adapter->rx_ring[rxr].reg_idx;
Greg Rose92915f72010-01-09 02:24:10 +00001342
Don Skidmore858c3dd2013-10-01 04:33:50 -07001343 do {
1344 usleep_range(1000, 2000);
1345 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1346 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
Greg Rose92915f72010-01-09 02:24:10 +00001347
Don Skidmore858c3dd2013-10-01 04:33:50 -07001348 if (!wait_loop)
1349 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1350 rxr);
1351
1352 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1353 (adapter->rx_ring[rxr].count - 1));
1354}
1355
1356static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1357 struct ixgbevf_ring *ring)
1358{
1359 struct ixgbe_hw *hw = &adapter->hw;
1360 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1361 u32 rxdctl;
1362 u8 reg_idx = ring->reg_idx;
1363
1364 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1365 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1366
1367 /* write value back with RXDCTL.ENABLE bit cleared */
1368 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1369
1370 /* the hardware may take up to 100us to really disable the rx queue */
1371 do {
1372 udelay(10);
1373 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1374 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1375
1376 if (!wait_loop)
1377 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1378 reg_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001379}
1380
Greg Rose33bd9f62010-03-19 02:59:52 +00001381static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1382{
1383 /* Only save pre-reset stats if there are some */
1384 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1385 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1386 adapter->stats.base_vfgprc;
1387 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1388 adapter->stats.base_vfgptc;
1389 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1390 adapter->stats.base_vfgorc;
1391 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1392 adapter->stats.base_vfgotc;
1393 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1394 adapter->stats.base_vfmprc;
1395 }
1396}
1397
1398static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1399{
1400 struct ixgbe_hw *hw = &adapter->hw;
1401
1402 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1403 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1404 adapter->stats.last_vfgorc |=
1405 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1406 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1407 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1408 adapter->stats.last_vfgotc |=
1409 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1410 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1411
1412 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1413 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1414 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1415 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1416 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1417}
1418
Alexander Duyck31186782012-07-20 08:09:58 +00001419static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1420{
1421 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck56e94092012-07-20 08:10:03 +00001422 int api[] = { ixgbe_mbox_api_11,
1423 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00001424 ixgbe_mbox_api_unknown };
1425 int err = 0, idx = 0;
1426
John Fastabend55fdd45b2012-10-01 14:52:20 +00001427 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001428
1429 while (api[idx] != ixgbe_mbox_api_unknown) {
1430 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1431 if (!err)
1432 break;
1433 idx++;
1434 }
1435
John Fastabend55fdd45b2012-10-01 14:52:20 +00001436 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001437}
1438
Greg Rose795180d2012-04-17 04:29:34 +00001439static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001440{
1441 struct net_device *netdev = adapter->netdev;
1442 struct ixgbe_hw *hw = &adapter->hw;
1443 int i, j = 0;
1444 int num_rx_rings = adapter->num_rx_queues;
1445 u32 txdctl, rxdctl;
1446
1447 for (i = 0; i < adapter->num_tx_queues; i++) {
1448 j = adapter->tx_ring[i].reg_idx;
1449 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1450 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1451 txdctl |= (8 << 16);
1452 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1453 }
1454
1455 for (i = 0; i < adapter->num_tx_queues; i++) {
1456 j = adapter->tx_ring[i].reg_idx;
1457 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1458 txdctl |= IXGBE_TXDCTL_ENABLE;
1459 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1460 }
1461
1462 for (i = 0; i < num_rx_rings; i++) {
1463 j = adapter->rx_ring[i].reg_idx;
1464 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
Jiri Pirkodadcd652011-07-21 03:25:09 +00001465 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
Greg Rose69bfbec2011-01-26 01:06:12 +00001466 if (hw->mac.type == ixgbe_mac_X540_vf) {
1467 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1468 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1469 IXGBE_RXDCTL_RLPML_EN);
1470 }
Greg Rose92915f72010-01-09 02:24:10 +00001471 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1472 ixgbevf_rx_desc_queue_enable(adapter, i);
1473 }
1474
1475 ixgbevf_configure_msix(adapter);
1476
John Fastabend55fdd45b2012-10-01 14:52:20 +00001477 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001478
Greg Rose92fe0bf2012-11-02 05:50:47 +00001479 if (is_valid_ether_addr(hw->mac.addr))
1480 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1481 else
1482 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001483
John Fastabend55fdd45b2012-10-01 14:52:20 +00001484 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001485
Greg Rose92915f72010-01-09 02:24:10 +00001486 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1487 ixgbevf_napi_enable_all(adapter);
1488
1489 /* enable transmits */
1490 netif_tx_start_all_queues(netdev);
1491
Greg Rose33bd9f62010-03-19 02:59:52 +00001492 ixgbevf_save_reset_stats(adapter);
1493 ixgbevf_init_last_counter_stats(adapter);
1494
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001495 hw->mac.get_link_status = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001496 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00001497}
1498
Alexander Duyck56e94092012-07-20 08:10:03 +00001499static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1500{
1501 struct ixgbe_hw *hw = &adapter->hw;
1502 struct ixgbevf_ring *rx_ring;
1503 unsigned int def_q = 0;
1504 unsigned int num_tcs = 0;
1505 unsigned int num_rx_queues = 1;
1506 int err, i;
1507
John Fastabend55fdd45b2012-10-01 14:52:20 +00001508 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00001509
1510 /* fetch queue configuration from the PF */
1511 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1512
John Fastabend55fdd45b2012-10-01 14:52:20 +00001513 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00001514
1515 if (err)
1516 return err;
1517
1518 if (num_tcs > 1) {
1519 /* update default Tx ring register index */
1520 adapter->tx_ring[0].reg_idx = def_q;
1521
1522 /* we need as many queues as traffic classes */
1523 num_rx_queues = num_tcs;
1524 }
1525
1526 /* nothing to do if we have the correct number of queues */
1527 if (adapter->num_rx_queues == num_rx_queues)
1528 return 0;
1529
1530 /* allocate new rings */
1531 rx_ring = kcalloc(num_rx_queues,
1532 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1533 if (!rx_ring)
1534 return -ENOMEM;
1535
1536 /* setup ring fields */
1537 for (i = 0; i < num_rx_queues; i++) {
1538 rx_ring[i].count = adapter->rx_ring_count;
1539 rx_ring[i].queue_index = i;
1540 rx_ring[i].reg_idx = i;
1541 rx_ring[i].dev = &adapter->pdev->dev;
1542 rx_ring[i].netdev = adapter->netdev;
1543
1544 /* allocate resources on the ring */
1545 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1546 if (err) {
1547 while (i) {
1548 i--;
1549 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1550 }
1551 kfree(rx_ring);
1552 return err;
1553 }
1554 }
1555
1556 /* free the existing rings and queues */
1557 ixgbevf_free_all_rx_resources(adapter);
1558 adapter->num_rx_queues = 0;
1559 kfree(adapter->rx_ring);
1560
1561 /* move new rings into position on the adapter struct */
1562 adapter->rx_ring = rx_ring;
1563 adapter->num_rx_queues = num_rx_queues;
1564
1565 /* reset ring to vector mapping */
1566 ixgbevf_reset_q_vectors(adapter);
1567 ixgbevf_map_rings_to_vectors(adapter);
1568
1569 return 0;
1570}
1571
Greg Rose795180d2012-04-17 04:29:34 +00001572void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001573{
Greg Rose92915f72010-01-09 02:24:10 +00001574 struct ixgbe_hw *hw = &adapter->hw;
1575
Alexander Duyck56e94092012-07-20 08:10:03 +00001576 ixgbevf_reset_queues(adapter);
1577
Greg Rose92915f72010-01-09 02:24:10 +00001578 ixgbevf_configure(adapter);
1579
Greg Rose795180d2012-04-17 04:29:34 +00001580 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001581
1582 /* clear any pending interrupts, may auto mask */
1583 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1584
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001585 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001586}
1587
1588/**
1589 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1590 * @adapter: board private structure
1591 * @rx_ring: ring to free buffers from
1592 **/
1593static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1594 struct ixgbevf_ring *rx_ring)
1595{
1596 struct pci_dev *pdev = adapter->pdev;
1597 unsigned long size;
1598 unsigned int i;
1599
Greg Rosec0456c22010-01-22 22:47:18 +00001600 if (!rx_ring->rx_buffer_info)
1601 return;
Greg Rose92915f72010-01-09 02:24:10 +00001602
Greg Rosec0456c22010-01-22 22:47:18 +00001603 /* Free all the Rx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00001604 for (i = 0; i < rx_ring->count; i++) {
1605 struct ixgbevf_rx_buffer *rx_buffer_info;
1606
1607 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1608 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00001609 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +00001610 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +00001611 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +00001612 rx_buffer_info->dma = 0;
1613 }
1614 if (rx_buffer_info->skb) {
1615 struct sk_buff *skb = rx_buffer_info->skb;
1616 rx_buffer_info->skb = NULL;
1617 do {
1618 struct sk_buff *this = skb;
Alexander Duyck5c60f812012-09-01 05:12:38 +00001619 skb = IXGBE_CB(skb)->prev;
Greg Rose92915f72010-01-09 02:24:10 +00001620 dev_kfree_skb(this);
1621 } while (skb);
1622 }
Greg Rose92915f72010-01-09 02:24:10 +00001623 }
1624
1625 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1626 memset(rx_ring->rx_buffer_info, 0, size);
1627
1628 /* Zero out the descriptor ring */
1629 memset(rx_ring->desc, 0, rx_ring->size);
1630
1631 rx_ring->next_to_clean = 0;
1632 rx_ring->next_to_use = 0;
1633
1634 if (rx_ring->head)
1635 writel(0, adapter->hw.hw_addr + rx_ring->head);
1636 if (rx_ring->tail)
1637 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1638}
1639
1640/**
1641 * ixgbevf_clean_tx_ring - Free Tx Buffers
1642 * @adapter: board private structure
1643 * @tx_ring: ring to be cleaned
1644 **/
1645static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1646 struct ixgbevf_ring *tx_ring)
1647{
1648 struct ixgbevf_tx_buffer *tx_buffer_info;
1649 unsigned long size;
1650 unsigned int i;
1651
Greg Rosec0456c22010-01-22 22:47:18 +00001652 if (!tx_ring->tx_buffer_info)
1653 return;
1654
Greg Rose92915f72010-01-09 02:24:10 +00001655 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00001656 for (i = 0; i < tx_ring->count; i++) {
1657 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00001658 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00001659 }
1660
1661 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1662 memset(tx_ring->tx_buffer_info, 0, size);
1663
1664 memset(tx_ring->desc, 0, tx_ring->size);
1665
1666 tx_ring->next_to_use = 0;
1667 tx_ring->next_to_clean = 0;
1668
1669 if (tx_ring->head)
1670 writel(0, adapter->hw.hw_addr + tx_ring->head);
1671 if (tx_ring->tail)
1672 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1673}
1674
1675/**
1676 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1677 * @adapter: board private structure
1678 **/
1679static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1680{
1681 int i;
1682
1683 for (i = 0; i < adapter->num_rx_queues; i++)
1684 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1685}
1686
1687/**
1688 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1689 * @adapter: board private structure
1690 **/
1691static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1692{
1693 int i;
1694
1695 for (i = 0; i < adapter->num_tx_queues; i++)
1696 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1697}
1698
1699void ixgbevf_down(struct ixgbevf_adapter *adapter)
1700{
1701 struct net_device *netdev = adapter->netdev;
1702 struct ixgbe_hw *hw = &adapter->hw;
1703 u32 txdctl;
1704 int i, j;
1705
1706 /* signal that we are down to the interrupt handler */
1707 set_bit(__IXGBEVF_DOWN, &adapter->state);
Don Skidmore858c3dd2013-10-01 04:33:50 -07001708
1709 /* disable all enabled rx queues */
1710 for (i = 0; i < adapter->num_rx_queues; i++)
1711 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001712
1713 netif_tx_disable(netdev);
1714
1715 msleep(10);
1716
1717 netif_tx_stop_all_queues(netdev);
1718
1719 ixgbevf_irq_disable(adapter);
1720
1721 ixgbevf_napi_disable_all(adapter);
1722
1723 del_timer_sync(&adapter->watchdog_timer);
1724 /* can't call flush scheduled work here because it can deadlock
1725 * if linkwatch_event tries to acquire the rtnl_lock which we are
1726 * holding */
1727 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1728 msleep(1);
1729
1730 /* disable transmits in the hardware now that interrupts are off */
1731 for (i = 0; i < adapter->num_tx_queues; i++) {
1732 j = adapter->tx_ring[i].reg_idx;
1733 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1734 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1735 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1736 }
1737
1738 netif_carrier_off(netdev);
1739
1740 if (!pci_channel_offline(adapter->pdev))
1741 ixgbevf_reset(adapter);
1742
1743 ixgbevf_clean_all_tx_rings(adapter);
1744 ixgbevf_clean_all_rx_rings(adapter);
1745}
1746
1747void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1748{
1749 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00001750
Greg Rose92915f72010-01-09 02:24:10 +00001751 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1752 msleep(1);
1753
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001754 ixgbevf_down(adapter);
1755 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001756
1757 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1758}
1759
1760void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1761{
1762 struct ixgbe_hw *hw = &adapter->hw;
1763 struct net_device *netdev = adapter->netdev;
1764
Don Skidmore798e3812013-10-01 04:33:51 -07001765 if (hw->mac.ops.reset_hw(hw)) {
Greg Rose92915f72010-01-09 02:24:10 +00001766 hw_dbg(hw, "PF still resetting\n");
Don Skidmore798e3812013-10-01 04:33:51 -07001767 } else {
Greg Rose92915f72010-01-09 02:24:10 +00001768 hw->mac.ops.init_hw(hw);
Don Skidmore798e3812013-10-01 04:33:51 -07001769 ixgbevf_negotiate_api(adapter);
1770 }
Greg Rose92915f72010-01-09 02:24:10 +00001771
1772 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1773 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1774 netdev->addr_len);
1775 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1776 netdev->addr_len);
1777 }
1778}
1779
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001780static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1781 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00001782{
Emil Tantilova5f93372012-11-13 04:03:17 +00001783 int err = 0;
1784 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00001785
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001786 /* We'll want at least 2 (vector_threshold):
1787 * 1) TxQ[0] + RxQ[0] handler
1788 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00001789 */
1790 vector_threshold = MIN_MSIX_COUNT;
1791
1792 /* The more we get, the more we will assign to Tx/Rx Cleanup
1793 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1794 * Right now, we simply care about how many we'll get; we'll
1795 * set them up later while requesting irq's.
1796 */
1797 while (vectors >= vector_threshold) {
1798 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1799 vectors);
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001800 if (!err || err < 0) /* Success or a nasty failure. */
Greg Rose92915f72010-01-09 02:24:10 +00001801 break;
Greg Rose92915f72010-01-09 02:24:10 +00001802 else /* err == number of vectors we should try again with */
1803 vectors = err;
1804 }
1805
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001806 if (vectors < vector_threshold)
1807 err = -ENOMEM;
1808
1809 if (err) {
1810 dev_err(&adapter->pdev->dev,
1811 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00001812 kfree(adapter->msix_entries);
1813 adapter->msix_entries = NULL;
1814 } else {
1815 /*
1816 * Adjust for only the vectors we'll use, which is minimum
1817 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1818 * vectors we were allocated.
1819 */
1820 adapter->num_msix_vectors = vectors;
1821 }
Greg Rosedee847f2012-11-02 05:50:57 +00001822
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001823 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001824}
1825
Ben Hutchings49ce9c22012-07-10 10:56:00 +00001826/**
1827 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00001828 * @adapter: board private structure to initialize
1829 *
1830 * This is the top level queue allocation routine. The order here is very
1831 * important, starting with the "most" number of features turned on at once,
1832 * and ending with the smallest set of features. This way large combinations
1833 * can be allocated if they're turned on, and smaller combinations are the
1834 * fallthrough conditions.
1835 *
1836 **/
1837static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1838{
1839 /* Start with base case */
1840 adapter->num_rx_queues = 1;
1841 adapter->num_tx_queues = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001842}
1843
1844/**
1845 * ixgbevf_alloc_queues - Allocate memory for all rings
1846 * @adapter: board private structure to initialize
1847 *
1848 * We allocate one ring per queue at run-time since we don't know the
1849 * number of queues at compile-time. The polling_netdev array is
1850 * intended for Multiqueue, but should work fine with a single queue.
1851 **/
1852static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1853{
1854 int i;
1855
1856 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1857 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1858 if (!adapter->tx_ring)
1859 goto err_tx_ring_allocation;
1860
1861 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1862 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1863 if (!adapter->rx_ring)
1864 goto err_rx_ring_allocation;
1865
1866 for (i = 0; i < adapter->num_tx_queues; i++) {
1867 adapter->tx_ring[i].count = adapter->tx_ring_count;
1868 adapter->tx_ring[i].queue_index = i;
Alexander Duyck56e94092012-07-20 08:10:03 +00001869 /* reg_idx may be remapped later by DCB config */
Greg Rose92915f72010-01-09 02:24:10 +00001870 adapter->tx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001871 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1872 adapter->tx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001873 }
1874
1875 for (i = 0; i < adapter->num_rx_queues; i++) {
1876 adapter->rx_ring[i].count = adapter->rx_ring_count;
1877 adapter->rx_ring[i].queue_index = i;
1878 adapter->rx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001879 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1880 adapter->rx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001881 }
1882
1883 return 0;
1884
1885err_rx_ring_allocation:
1886 kfree(adapter->tx_ring);
1887err_tx_ring_allocation:
1888 return -ENOMEM;
1889}
1890
1891/**
1892 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1893 * @adapter: board private structure to initialize
1894 *
1895 * Attempt to configure the interrupts using the best available
1896 * capabilities of the hardware and the kernel.
1897 **/
1898static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1899{
Greg Rose91e2b892012-10-03 00:57:23 +00001900 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001901 int err = 0;
1902 int vector, v_budget;
1903
1904 /*
1905 * It's easy to be greedy for MSI-X vectors, but it really
1906 * doesn't do us much good if we have a lot more vectors
1907 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001908 * (roughly) the same number of vectors as there are CPU's.
1909 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00001910 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001911 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1912 v_budget = min_t(int, v_budget, num_online_cpus());
1913 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001914
1915 /* A failure in MSI-X entry allocation isn't fatal, but it does
1916 * mean we disable MSI-X capabilities of the adapter. */
1917 adapter->msix_entries = kcalloc(v_budget,
1918 sizeof(struct msix_entry), GFP_KERNEL);
1919 if (!adapter->msix_entries) {
1920 err = -ENOMEM;
1921 goto out;
1922 }
1923
1924 for (vector = 0; vector < v_budget; vector++)
1925 adapter->msix_entries[vector].entry = vector;
1926
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00001927 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1928 if (err)
1929 goto out;
Greg Rose92915f72010-01-09 02:24:10 +00001930
Greg Rose91e2b892012-10-03 00:57:23 +00001931 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1932 if (err)
1933 goto out;
1934
1935 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1936
Greg Rose92915f72010-01-09 02:24:10 +00001937out:
1938 return err;
1939}
1940
1941/**
1942 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1943 * @adapter: board private structure to initialize
1944 *
1945 * We allocate one q_vector per queue interrupt. If allocation fails we
1946 * return -ENOMEM.
1947 **/
1948static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1949{
1950 int q_idx, num_q_vectors;
1951 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00001952
1953 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001954
1955 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1956 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1957 if (!q_vector)
1958 goto err_out;
1959 q_vector->adapter = adapter;
1960 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001961 netif_napi_add(adapter->netdev, &q_vector->napi,
1962 ixgbevf_poll, 64);
Greg Rose92915f72010-01-09 02:24:10 +00001963 adapter->q_vector[q_idx] = q_vector;
1964 }
1965
1966 return 0;
1967
1968err_out:
1969 while (q_idx) {
1970 q_idx--;
1971 q_vector = adapter->q_vector[q_idx];
1972 netif_napi_del(&q_vector->napi);
1973 kfree(q_vector);
1974 adapter->q_vector[q_idx] = NULL;
1975 }
1976 return -ENOMEM;
1977}
1978
1979/**
1980 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1981 * @adapter: board private structure to initialize
1982 *
1983 * This function frees the memory allocated to the q_vectors. In addition if
1984 * NAPI is enabled it will delete any references to the NAPI struct prior
1985 * to freeing the q_vector.
1986 **/
1987static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1988{
John Fastabendf4477702012-09-16 08:19:46 +00001989 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001990
1991 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1992 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1993
1994 adapter->q_vector[q_idx] = NULL;
John Fastabendf4477702012-09-16 08:19:46 +00001995 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001996 kfree(q_vector);
1997 }
1998}
1999
2000/**
2001 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2002 * @adapter: board private structure
2003 *
2004 **/
2005static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2006{
2007 pci_disable_msix(adapter->pdev);
2008 kfree(adapter->msix_entries);
2009 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002010}
2011
2012/**
2013 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2014 * @adapter: board private structure to initialize
2015 *
2016 **/
2017static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2018{
2019 int err;
2020
2021 /* Number of supported queues */
2022 ixgbevf_set_num_queues(adapter);
2023
2024 err = ixgbevf_set_interrupt_capability(adapter);
2025 if (err) {
2026 hw_dbg(&adapter->hw,
2027 "Unable to setup interrupt capabilities\n");
2028 goto err_set_interrupt;
2029 }
2030
2031 err = ixgbevf_alloc_q_vectors(adapter);
2032 if (err) {
2033 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2034 "vectors\n");
2035 goto err_alloc_q_vectors;
2036 }
2037
2038 err = ixgbevf_alloc_queues(adapter);
2039 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002040 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00002041 goto err_alloc_queues;
2042 }
2043
2044 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2045 "Tx Queue count = %u\n",
2046 (adapter->num_rx_queues > 1) ? "Enabled" :
2047 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2048
2049 set_bit(__IXGBEVF_DOWN, &adapter->state);
2050
2051 return 0;
2052err_alloc_queues:
2053 ixgbevf_free_q_vectors(adapter);
2054err_alloc_q_vectors:
2055 ixgbevf_reset_interrupt_capability(adapter);
2056err_set_interrupt:
2057 return err;
2058}
2059
2060/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002061 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2062 * @adapter: board private structure to clear interrupt scheme on
2063 *
2064 * We go through and clear interrupt specific resources and reset the structure
2065 * to pre-load conditions
2066 **/
2067static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2068{
2069 adapter->num_tx_queues = 0;
2070 adapter->num_rx_queues = 0;
2071
2072 ixgbevf_free_q_vectors(adapter);
2073 ixgbevf_reset_interrupt_capability(adapter);
2074}
2075
2076/**
Greg Rose92915f72010-01-09 02:24:10 +00002077 * ixgbevf_sw_init - Initialize general software structures
2078 * (struct ixgbevf_adapter)
2079 * @adapter: board private structure to initialize
2080 *
2081 * ixgbevf_sw_init initializes the Adapter private data structure.
2082 * Fields are initialized based on PCI device information and
2083 * OS network device settings (MTU size).
2084 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002085static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002086{
2087 struct ixgbe_hw *hw = &adapter->hw;
2088 struct pci_dev *pdev = adapter->pdev;
Greg Rosee1941a72013-02-13 03:02:05 +00002089 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002090 int err;
2091
2092 /* PCI config space info */
2093
2094 hw->vendor_id = pdev->vendor;
2095 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002096 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002097 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2098 hw->subsystem_device_id = pdev->subsystem_device;
2099
2100 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002101
2102 /* assume legacy case in which PF would only give VF 2 queues */
2103 hw->mac.max_tx_queues = 2;
2104 hw->mac.max_rx_queues = 2;
2105
Don Skidmore798e3812013-10-01 04:33:51 -07002106 /* lock to protect mailbox accesses */
2107 spin_lock_init(&adapter->mbx_lock);
2108
Greg Rose92915f72010-01-09 02:24:10 +00002109 err = hw->mac.ops.reset_hw(hw);
2110 if (err) {
2111 dev_info(&pdev->dev,
Greg Rosee1941a72013-02-13 03:02:05 +00002112 "PF still in reset state. Is the PF interface up?\n");
Greg Rose92915f72010-01-09 02:24:10 +00002113 } else {
2114 err = hw->mac.ops.init_hw(hw);
2115 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002116 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002117 goto out;
2118 }
Don Skidmore798e3812013-10-01 04:33:51 -07002119 ixgbevf_negotiate_api(adapter);
Greg Rosee1941a72013-02-13 03:02:05 +00002120 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2121 if (err)
2122 dev_info(&pdev->dev, "Error reading MAC address\n");
2123 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2124 dev_info(&pdev->dev,
2125 "MAC address not assigned by administrator.\n");
2126 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2127 }
2128
2129 if (!is_valid_ether_addr(netdev->dev_addr)) {
2130 dev_info(&pdev->dev, "Assigning random MAC address\n");
2131 eth_hw_addr_random(netdev);
2132 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002133 }
2134
2135 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002136 adapter->rx_itr_setting = 1;
2137 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002138
Greg Rose92915f72010-01-09 02:24:10 +00002139 /* set default ring sizes */
2140 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2141 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2142
Greg Rose92915f72010-01-09 02:24:10 +00002143 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002144 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002145
2146out:
2147 return err;
2148}
2149
Greg Rose92915f72010-01-09 02:24:10 +00002150#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2151 { \
2152 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2153 if (current_counter < last_counter) \
2154 counter += 0x100000000LL; \
2155 last_counter = current_counter; \
2156 counter &= 0xFFFFFFFF00000000LL; \
2157 counter |= current_counter; \
2158 }
2159
2160#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2161 { \
2162 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2163 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2164 u64 current_counter = (current_counter_msb << 32) | \
2165 current_counter_lsb; \
2166 if (current_counter < last_counter) \
2167 counter += 0x1000000000LL; \
2168 last_counter = current_counter; \
2169 counter &= 0xFFFFFFF000000000LL; \
2170 counter |= current_counter; \
2171 }
2172/**
2173 * ixgbevf_update_stats - Update the board statistics counters.
2174 * @adapter: board private structure
2175 **/
2176void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2177{
2178 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose55fb2772012-11-06 05:53:32 +00002179 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002180
Greg Rose088245a2013-01-04 07:37:31 +00002181 if (!adapter->link_up)
2182 return;
2183
Greg Rose92915f72010-01-09 02:24:10 +00002184 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2185 adapter->stats.vfgprc);
2186 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2187 adapter->stats.vfgptc);
2188 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2189 adapter->stats.last_vfgorc,
2190 adapter->stats.vfgorc);
2191 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2192 adapter->stats.last_vfgotc,
2193 adapter->stats.vfgotc);
2194 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2195 adapter->stats.vfmprc);
Greg Rose55fb2772012-11-06 05:53:32 +00002196
2197 for (i = 0; i < adapter->num_rx_queues; i++) {
2198 adapter->hw_csum_rx_error +=
2199 adapter->rx_ring[i].hw_csum_rx_error;
2200 adapter->hw_csum_rx_good +=
2201 adapter->rx_ring[i].hw_csum_rx_good;
2202 adapter->rx_ring[i].hw_csum_rx_error = 0;
2203 adapter->rx_ring[i].hw_csum_rx_good = 0;
2204 }
Greg Rose92915f72010-01-09 02:24:10 +00002205}
2206
2207/**
2208 * ixgbevf_watchdog - Timer Call-back
2209 * @data: pointer to adapter cast into an unsigned long
2210 **/
2211static void ixgbevf_watchdog(unsigned long data)
2212{
2213 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2214 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002215 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002216 int i;
2217
2218 /*
2219 * Do the watchdog outside of interrupt context due to the lovely
2220 * delays that some of the newer hardware requires
2221 */
2222
2223 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2224 goto watchdog_short_circuit;
2225
2226 /* get one bit for every active tx/rx interrupt vector */
2227 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2228 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +00002229 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002230 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002231 }
2232
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002233 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002234
2235watchdog_short_circuit:
2236 schedule_work(&adapter->watchdog_task);
2237}
2238
2239/**
2240 * ixgbevf_tx_timeout - Respond to a Tx Hang
2241 * @netdev: network interface device structure
2242 **/
2243static void ixgbevf_tx_timeout(struct net_device *netdev)
2244{
2245 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2246
2247 /* Do the reset outside of interrupt context */
2248 schedule_work(&adapter->reset_task);
2249}
2250
2251static void ixgbevf_reset_task(struct work_struct *work)
2252{
2253 struct ixgbevf_adapter *adapter;
2254 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2255
2256 /* If we're already down or resetting, just bail */
2257 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2258 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2259 return;
2260
2261 adapter->tx_timeout_count++;
2262
2263 ixgbevf_reinit_locked(adapter);
2264}
2265
2266/**
2267 * ixgbevf_watchdog_task - worker thread to bring link up
2268 * @work: pointer to work_struct containing our data
2269 **/
2270static void ixgbevf_watchdog_task(struct work_struct *work)
2271{
2272 struct ixgbevf_adapter *adapter = container_of(work,
2273 struct ixgbevf_adapter,
2274 watchdog_task);
2275 struct net_device *netdev = adapter->netdev;
2276 struct ixgbe_hw *hw = &adapter->hw;
2277 u32 link_speed = adapter->link_speed;
2278 bool link_up = adapter->link_up;
Greg Rose92fe0bf2012-11-02 05:50:47 +00002279 s32 need_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002280
2281 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2282
2283 /*
2284 * Always check the link on the watchdog because we have
2285 * no LSC interrupt
2286 */
Greg Rose92fe0bf2012-11-02 05:50:47 +00002287 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002288
Greg Rose92fe0bf2012-11-02 05:50:47 +00002289 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002290
Greg Rose92fe0bf2012-11-02 05:50:47 +00002291 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002292
Greg Rose92fe0bf2012-11-02 05:50:47 +00002293 if (need_reset) {
2294 adapter->link_up = link_up;
2295 adapter->link_speed = link_speed;
2296 netif_carrier_off(netdev);
2297 netif_tx_stop_all_queues(netdev);
2298 schedule_work(&adapter->reset_task);
2299 goto pf_has_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002300 }
2301 adapter->link_up = link_up;
2302 adapter->link_speed = link_speed;
2303
2304 if (link_up) {
2305 if (!netif_carrier_ok(netdev)) {
Greg Roseb876a742013-01-19 06:40:22 +00002306 char *link_speed_string;
2307 switch (link_speed) {
2308 case IXGBE_LINK_SPEED_10GB_FULL:
2309 link_speed_string = "10 Gbps";
2310 break;
2311 case IXGBE_LINK_SPEED_1GB_FULL:
2312 link_speed_string = "1 Gbps";
2313 break;
2314 case IXGBE_LINK_SPEED_100_FULL:
2315 link_speed_string = "100 Mbps";
2316 break;
2317 default:
2318 link_speed_string = "unknown speed";
2319 break;
2320 }
Greg Rose6fe59672013-01-04 07:37:26 +00002321 dev_info(&adapter->pdev->dev,
Greg Roseb876a742013-01-19 06:40:22 +00002322 "NIC Link is Up, %s\n", link_speed_string);
Greg Rose92915f72010-01-09 02:24:10 +00002323 netif_carrier_on(netdev);
2324 netif_tx_wake_all_queues(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002325 }
2326 } else {
2327 adapter->link_up = false;
2328 adapter->link_speed = 0;
2329 if (netif_carrier_ok(netdev)) {
Greg Rose6fe59672013-01-04 07:37:26 +00002330 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
Greg Rose92915f72010-01-09 02:24:10 +00002331 netif_carrier_off(netdev);
2332 netif_tx_stop_all_queues(netdev);
2333 }
2334 }
2335
Greg Rose92915f72010-01-09 02:24:10 +00002336 ixgbevf_update_stats(adapter);
2337
Greg Rose33bd9f62010-03-19 02:59:52 +00002338pf_has_reset:
Greg Rose92915f72010-01-09 02:24:10 +00002339 /* Reset the timer */
2340 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2341 mod_timer(&adapter->watchdog_timer,
2342 round_jiffies(jiffies + (2 * HZ)));
2343
2344 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2345}
2346
2347/**
2348 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2349 * @adapter: board private structure
2350 * @tx_ring: Tx descriptor ring for a specific queue
2351 *
2352 * Free all transmit software resources
2353 **/
2354void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2355 struct ixgbevf_ring *tx_ring)
2356{
2357 struct pci_dev *pdev = adapter->pdev;
2358
Greg Rose92915f72010-01-09 02:24:10 +00002359 ixgbevf_clean_tx_ring(adapter, tx_ring);
2360
2361 vfree(tx_ring->tx_buffer_info);
2362 tx_ring->tx_buffer_info = NULL;
2363
Nick Nunley2a1f8792010-04-27 13:10:50 +00002364 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2365 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002366
2367 tx_ring->desc = NULL;
2368}
2369
2370/**
2371 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2372 * @adapter: board private structure
2373 *
2374 * Free all transmit software resources
2375 **/
2376static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2377{
2378 int i;
2379
2380 for (i = 0; i < adapter->num_tx_queues; i++)
2381 if (adapter->tx_ring[i].desc)
2382 ixgbevf_free_tx_resources(adapter,
2383 &adapter->tx_ring[i]);
2384
2385}
2386
2387/**
2388 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2389 * @adapter: board private structure
2390 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2391 *
2392 * Return 0 on success, negative on failure
2393 **/
2394int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2395 struct ixgbevf_ring *tx_ring)
2396{
2397 struct pci_dev *pdev = adapter->pdev;
2398 int size;
2399
2400 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002401 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002402 if (!tx_ring->tx_buffer_info)
2403 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002404
2405 /* round up to nearest 4K */
2406 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2407 tx_ring->size = ALIGN(tx_ring->size, 4096);
2408
Nick Nunley2a1f8792010-04-27 13:10:50 +00002409 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2410 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002411 if (!tx_ring->desc)
2412 goto err;
2413
2414 tx_ring->next_to_use = 0;
2415 tx_ring->next_to_clean = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002416 return 0;
2417
2418err:
2419 vfree(tx_ring->tx_buffer_info);
2420 tx_ring->tx_buffer_info = NULL;
2421 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2422 "descriptor ring\n");
2423 return -ENOMEM;
2424}
2425
2426/**
2427 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2428 * @adapter: board private structure
2429 *
2430 * If this function returns with an error, then it's possible one or
2431 * more of the rings is populated (while the rest are not). It is the
2432 * callers duty to clean those orphaned rings.
2433 *
2434 * Return 0 on success, negative on failure
2435 **/
2436static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2437{
2438 int i, err = 0;
2439
2440 for (i = 0; i < adapter->num_tx_queues; i++) {
2441 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2442 if (!err)
2443 continue;
2444 hw_dbg(&adapter->hw,
2445 "Allocation for Tx Queue %u failed\n", i);
2446 break;
2447 }
2448
2449 return err;
2450}
2451
2452/**
2453 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2454 * @adapter: board private structure
2455 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2456 *
2457 * Returns 0 on success, negative on failure
2458 **/
2459int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2460 struct ixgbevf_ring *rx_ring)
2461{
2462 struct pci_dev *pdev = adapter->pdev;
2463 int size;
2464
2465 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002466 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00002467 if (!rx_ring->rx_buffer_info)
Greg Rose92915f72010-01-09 02:24:10 +00002468 goto alloc_failed;
Greg Rose92915f72010-01-09 02:24:10 +00002469
2470 /* Round up to nearest 4K */
2471 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2472 rx_ring->size = ALIGN(rx_ring->size, 4096);
2473
Nick Nunley2a1f8792010-04-27 13:10:50 +00002474 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2475 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002476
2477 if (!rx_ring->desc) {
Greg Rose92915f72010-01-09 02:24:10 +00002478 vfree(rx_ring->rx_buffer_info);
2479 rx_ring->rx_buffer_info = NULL;
2480 goto alloc_failed;
2481 }
2482
2483 rx_ring->next_to_clean = 0;
2484 rx_ring->next_to_use = 0;
2485
2486 return 0;
2487alloc_failed:
2488 return -ENOMEM;
2489}
2490
2491/**
2492 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2493 * @adapter: board private structure
2494 *
2495 * If this function returns with an error, then it's possible one or
2496 * more of the rings is populated (while the rest are not). It is the
2497 * callers duty to clean those orphaned rings.
2498 *
2499 * Return 0 on success, negative on failure
2500 **/
2501static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2502{
2503 int i, err = 0;
2504
2505 for (i = 0; i < adapter->num_rx_queues; i++) {
2506 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2507 if (!err)
2508 continue;
2509 hw_dbg(&adapter->hw,
2510 "Allocation for Rx Queue %u failed\n", i);
2511 break;
2512 }
2513 return err;
2514}
2515
2516/**
2517 * ixgbevf_free_rx_resources - Free Rx Resources
2518 * @adapter: board private structure
2519 * @rx_ring: ring to clean the resources from
2520 *
2521 * Free all receive software resources
2522 **/
2523void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2524 struct ixgbevf_ring *rx_ring)
2525{
2526 struct pci_dev *pdev = adapter->pdev;
2527
2528 ixgbevf_clean_rx_ring(adapter, rx_ring);
2529
2530 vfree(rx_ring->rx_buffer_info);
2531 rx_ring->rx_buffer_info = NULL;
2532
Nick Nunley2a1f8792010-04-27 13:10:50 +00002533 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2534 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002535
2536 rx_ring->desc = NULL;
2537}
2538
2539/**
2540 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2541 * @adapter: board private structure
2542 *
2543 * Free all receive software resources
2544 **/
2545static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2546{
2547 int i;
2548
2549 for (i = 0; i < adapter->num_rx_queues; i++)
2550 if (adapter->rx_ring[i].desc)
2551 ixgbevf_free_rx_resources(adapter,
2552 &adapter->rx_ring[i]);
2553}
2554
Alexander Duyck56e94092012-07-20 08:10:03 +00002555static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2556{
2557 struct ixgbe_hw *hw = &adapter->hw;
2558 struct ixgbevf_ring *rx_ring;
2559 unsigned int def_q = 0;
2560 unsigned int num_tcs = 0;
2561 unsigned int num_rx_queues = 1;
2562 int err, i;
2563
John Fastabend55fdd45b2012-10-01 14:52:20 +00002564 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00002565
2566 /* fetch queue configuration from the PF */
2567 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2568
John Fastabend55fdd45b2012-10-01 14:52:20 +00002569 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck56e94092012-07-20 08:10:03 +00002570
2571 if (err)
2572 return err;
2573
2574 if (num_tcs > 1) {
2575 /* update default Tx ring register index */
2576 adapter->tx_ring[0].reg_idx = def_q;
2577
2578 /* we need as many queues as traffic classes */
2579 num_rx_queues = num_tcs;
2580 }
2581
2582 /* nothing to do if we have the correct number of queues */
2583 if (adapter->num_rx_queues == num_rx_queues)
2584 return 0;
2585
2586 /* allocate new rings */
2587 rx_ring = kcalloc(num_rx_queues,
2588 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2589 if (!rx_ring)
2590 return -ENOMEM;
2591
2592 /* setup ring fields */
2593 for (i = 0; i < num_rx_queues; i++) {
2594 rx_ring[i].count = adapter->rx_ring_count;
2595 rx_ring[i].queue_index = i;
2596 rx_ring[i].reg_idx = i;
2597 rx_ring[i].dev = &adapter->pdev->dev;
2598 rx_ring[i].netdev = adapter->netdev;
2599 }
2600
2601 /* free the existing ring and queues */
2602 adapter->num_rx_queues = 0;
2603 kfree(adapter->rx_ring);
2604
2605 /* move new rings into position on the adapter struct */
2606 adapter->rx_ring = rx_ring;
2607 adapter->num_rx_queues = num_rx_queues;
2608
2609 return 0;
2610}
2611
Greg Rose92915f72010-01-09 02:24:10 +00002612/**
2613 * ixgbevf_open - Called when a network interface is made active
2614 * @netdev: network interface device structure
2615 *
2616 * Returns 0 on success, negative value on failure
2617 *
2618 * The open entry point is called when a network interface is made
2619 * active by the system (IFF_UP). At this point all resources needed
2620 * for transmit and receive operations are allocated, the interrupt
2621 * handler is registered with the OS, the watchdog timer is started,
2622 * and the stack is notified that the interface is ready.
2623 **/
2624static int ixgbevf_open(struct net_device *netdev)
2625{
2626 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2627 struct ixgbe_hw *hw = &adapter->hw;
2628 int err;
2629
xunleera1f6c6b2013-03-05 07:44:20 +00002630 /* A previous failure to open the device because of a lack of
2631 * available MSIX vector resources may have reset the number
2632 * of msix vectors variable to zero. The only way to recover
2633 * is to unload/reload the driver and hope that the system has
2634 * been able to recover some MSIX vector resources.
2635 */
2636 if (!adapter->num_msix_vectors)
2637 return -ENOMEM;
2638
Greg Rose92915f72010-01-09 02:24:10 +00002639 /* disallow open during test */
2640 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2641 return -EBUSY;
2642
2643 if (hw->adapter_stopped) {
2644 ixgbevf_reset(adapter);
2645 /* if adapter is still stopped then PF isn't up and
2646 * the vf can't start. */
2647 if (hw->adapter_stopped) {
2648 err = IXGBE_ERR_MBX;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002649 pr_err("Unable to start - perhaps the PF Driver isn't "
2650 "up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00002651 goto err_setup_reset;
2652 }
2653 }
2654
Alexander Duyck56e94092012-07-20 08:10:03 +00002655 /* setup queue reg_idx and Rx queue count */
2656 err = ixgbevf_setup_queues(adapter);
2657 if (err)
2658 goto err_setup_queues;
2659
Greg Rose92915f72010-01-09 02:24:10 +00002660 /* allocate transmit descriptors */
2661 err = ixgbevf_setup_all_tx_resources(adapter);
2662 if (err)
2663 goto err_setup_tx;
2664
2665 /* allocate receive descriptors */
2666 err = ixgbevf_setup_all_rx_resources(adapter);
2667 if (err)
2668 goto err_setup_rx;
2669
2670 ixgbevf_configure(adapter);
2671
2672 /*
2673 * Map the Tx/Rx rings to the vectors we were allotted.
2674 * if request_irq will be called in this function map_rings
2675 * must be called *before* up_complete
2676 */
2677 ixgbevf_map_rings_to_vectors(adapter);
2678
Greg Rose795180d2012-04-17 04:29:34 +00002679 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002680
2681 /* clear any pending interrupts, may auto mask */
2682 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2683 err = ixgbevf_request_irq(adapter);
2684 if (err)
2685 goto err_req_irq;
2686
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002687 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002688
2689 return 0;
2690
2691err_req_irq:
2692 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002693err_setup_rx:
2694 ixgbevf_free_all_rx_resources(adapter);
2695err_setup_tx:
2696 ixgbevf_free_all_tx_resources(adapter);
Alexander Duyck56e94092012-07-20 08:10:03 +00002697err_setup_queues:
Greg Rose92915f72010-01-09 02:24:10 +00002698 ixgbevf_reset(adapter);
2699
2700err_setup_reset:
2701
2702 return err;
2703}
2704
2705/**
2706 * ixgbevf_close - Disables a network interface
2707 * @netdev: network interface device structure
2708 *
2709 * Returns 0, this is not allowed to fail
2710 *
2711 * The close entry point is called when an interface is de-activated
2712 * by the OS. The hardware is still under the drivers control, but
2713 * needs to be disabled. A global MAC reset is issued to stop the
2714 * hardware, and all transmit and receive resources are freed.
2715 **/
2716static int ixgbevf_close(struct net_device *netdev)
2717{
2718 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2719
2720 ixgbevf_down(adapter);
2721 ixgbevf_free_irq(adapter);
2722
2723 ixgbevf_free_all_tx_resources(adapter);
2724 ixgbevf_free_all_rx_resources(adapter);
2725
2726 return 0;
2727}
2728
Alexander Duyck70a10e22012-05-11 08:33:21 +00002729static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2730 u32 vlan_macip_lens, u32 type_tucmd,
2731 u32 mss_l4len_idx)
2732{
2733 struct ixgbe_adv_tx_context_desc *context_desc;
2734 u16 i = tx_ring->next_to_use;
2735
2736 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2737
2738 i++;
2739 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2740
2741 /* set bits to identify this as an advanced context descriptor */
2742 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2743
2744 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2745 context_desc->seqnum_seed = 0;
2746 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2747 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2748}
2749
2750static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002751 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2752{
Alexander Duyck70a10e22012-05-11 08:33:21 +00002753 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00002754 u32 mss_l4len_idx, l4len;
2755
Alexander Duyck70a10e22012-05-11 08:33:21 +00002756 if (!skb_is_gso(skb))
2757 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002758
Alexander Duyck70a10e22012-05-11 08:33:21 +00002759 if (skb_header_cloned(skb)) {
2760 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2761 if (err)
2762 return err;
Greg Rose92915f72010-01-09 02:24:10 +00002763 }
2764
Alexander Duyck70a10e22012-05-11 08:33:21 +00002765 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2766 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2767
2768 if (skb->protocol == htons(ETH_P_IP)) {
2769 struct iphdr *iph = ip_hdr(skb);
2770 iph->tot_len = 0;
2771 iph->check = 0;
2772 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2773 iph->daddr, 0,
2774 IPPROTO_TCP,
2775 0);
2776 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2777 } else if (skb_is_gso_v6(skb)) {
2778 ipv6_hdr(skb)->payload_len = 0;
2779 tcp_hdr(skb)->check =
2780 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2781 &ipv6_hdr(skb)->daddr,
2782 0, IPPROTO_TCP, 0);
2783 }
2784
2785 /* compute header lengths */
2786 l4len = tcp_hdrlen(skb);
2787 *hdr_len += l4len;
2788 *hdr_len = skb_transport_offset(skb) + l4len;
2789
2790 /* mss_l4len_id: use 1 as index for TSO */
2791 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2792 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2793 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2794
2795 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2796 vlan_macip_lens = skb_network_header_len(skb);
2797 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2798 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2799
2800 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2801 type_tucmd, mss_l4len_idx);
2802
2803 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00002804}
2805
Alexander Duyck70a10e22012-05-11 08:33:21 +00002806static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002807 struct sk_buff *skb, u32 tx_flags)
2808{
Alexander Duyck70a10e22012-05-11 08:33:21 +00002809 u32 vlan_macip_lens = 0;
2810 u32 mss_l4len_idx = 0;
2811 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002812
Alexander Duyck70a10e22012-05-11 08:33:21 +00002813 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2814 u8 l4_hdr = 0;
2815 switch (skb->protocol) {
2816 case __constant_htons(ETH_P_IP):
2817 vlan_macip_lens |= skb_network_header_len(skb);
2818 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2819 l4_hdr = ip_hdr(skb)->protocol;
2820 break;
2821 case __constant_htons(ETH_P_IPV6):
2822 vlan_macip_lens |= skb_network_header_len(skb);
2823 l4_hdr = ipv6_hdr(skb)->nexthdr;
2824 break;
2825 default:
2826 if (unlikely(net_ratelimit())) {
2827 dev_warn(tx_ring->dev,
2828 "partial checksum but proto=%x!\n",
2829 skb->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00002830 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00002831 break;
Greg Rose92915f72010-01-09 02:24:10 +00002832 }
2833
Alexander Duyck70a10e22012-05-11 08:33:21 +00002834 switch (l4_hdr) {
2835 case IPPROTO_TCP:
2836 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2837 mss_l4len_idx = tcp_hdrlen(skb) <<
2838 IXGBE_ADVTXD_L4LEN_SHIFT;
2839 break;
2840 case IPPROTO_SCTP:
2841 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2842 mss_l4len_idx = sizeof(struct sctphdr) <<
2843 IXGBE_ADVTXD_L4LEN_SHIFT;
2844 break;
2845 case IPPROTO_UDP:
2846 mss_l4len_idx = sizeof(struct udphdr) <<
2847 IXGBE_ADVTXD_L4LEN_SHIFT;
2848 break;
2849 default:
2850 if (unlikely(net_ratelimit())) {
2851 dev_warn(tx_ring->dev,
2852 "partial checksum but l4 proto=%x!\n",
2853 l4_hdr);
2854 }
2855 break;
2856 }
Greg Rose92915f72010-01-09 02:24:10 +00002857 }
2858
Alexander Duyck70a10e22012-05-11 08:33:21 +00002859 /* vlan_macip_lens: MACLEN, VLAN tag */
2860 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2861 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2862
2863 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2864 type_tucmd, mss_l4len_idx);
2865
2866 return (skb->ip_summed == CHECKSUM_PARTIAL);
Greg Rose92915f72010-01-09 02:24:10 +00002867}
2868
Alexander Duyck70a10e22012-05-11 08:33:21 +00002869static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
Alexander Duycke757e3e2013-01-31 07:43:22 +00002870 struct sk_buff *skb, u32 tx_flags)
Greg Rose92915f72010-01-09 02:24:10 +00002871{
Greg Rose92915f72010-01-09 02:24:10 +00002872 struct ixgbevf_tx_buffer *tx_buffer_info;
2873 unsigned int len;
2874 unsigned int total = skb->len;
Kulikov Vasiliy2540ddb2010-07-15 08:45:57 +00002875 unsigned int offset = 0, size;
2876 int count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002877 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2878 unsigned int f;
Greg Rose65deeed2010-03-24 09:35:42 +00002879 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002880
2881 i = tx_ring->next_to_use;
2882
2883 len = min(skb_headlen(skb), total);
2884 while (len) {
2885 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2886 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2887
2888 tx_buffer_info->length = size;
2889 tx_buffer_info->mapped_as_page = false;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002890 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +00002891 skb->data + offset,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002892 size, DMA_TO_DEVICE);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002893 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002894 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00002895
2896 len -= size;
2897 total -= size;
2898 offset += size;
2899 count++;
2900 i++;
2901 if (i == tx_ring->count)
2902 i = 0;
2903 }
2904
2905 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002906 const struct skb_frag_struct *frag;
Greg Rose92915f72010-01-09 02:24:10 +00002907
2908 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002909 len = min((unsigned int)skb_frag_size(frag), total);
Ian Campbell877749b2011-08-29 23:18:26 +00002910 offset = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002911
2912 while (len) {
2913 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2914 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2915
2916 tx_buffer_info->length = size;
Ian Campbell877749b2011-08-29 23:18:26 +00002917 tx_buffer_info->dma =
Alexander Duyck70a10e22012-05-11 08:33:21 +00002918 skb_frag_dma_map(tx_ring->dev, frag,
Ian Campbell877749b2011-08-29 23:18:26 +00002919 offset, size, DMA_TO_DEVICE);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002920 if (dma_mapping_error(tx_ring->dev,
2921 tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002922 goto dma_error;
Greg Rose6132ee82012-09-21 00:14:14 +00002923 tx_buffer_info->mapped_as_page = true;
Greg Rose92915f72010-01-09 02:24:10 +00002924
2925 len -= size;
2926 total -= size;
2927 offset += size;
2928 count++;
2929 i++;
2930 if (i == tx_ring->count)
2931 i = 0;
2932 }
2933 if (total == 0)
2934 break;
2935 }
2936
2937 if (i == 0)
2938 i = tx_ring->count - 1;
2939 else
2940 i = i - 1;
2941 tx_ring->tx_buffer_info[i].skb = skb;
Greg Rose92915f72010-01-09 02:24:10 +00002942
2943 return count;
2944
2945dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00002946 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00002947
2948 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2949 tx_buffer_info->dma = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002950 count--;
2951
2952 /* clear timestamp and dma mappings for remaining portion of packet */
2953 while (count >= 0) {
2954 count--;
2955 i--;
2956 if (i < 0)
2957 i += tx_ring->count;
2958 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002959 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002960 }
2961
2962 return count;
2963}
2964
Alexander Duyck70a10e22012-05-11 08:33:21 +00002965static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
Alexander Duycke757e3e2013-01-31 07:43:22 +00002966 int count, unsigned int first, u32 paylen,
2967 u8 hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00002968{
2969 union ixgbe_adv_tx_desc *tx_desc = NULL;
2970 struct ixgbevf_tx_buffer *tx_buffer_info;
2971 u32 olinfo_status = 0, cmd_type_len = 0;
2972 unsigned int i;
2973
2974 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2975
2976 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2977
2978 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2979
2980 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2981 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2982
Alexander Duyck70a10e22012-05-11 08:33:21 +00002983 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2984 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2985
Greg Rose92915f72010-01-09 02:24:10 +00002986 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2987 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2988
Greg Rose92915f72010-01-09 02:24:10 +00002989 /* use index 1 context for tso */
2990 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2991 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
Alexander Duyck70a10e22012-05-11 08:33:21 +00002992 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002993 }
2994
2995 /*
2996 * Check Context must be set if Tx switch is enabled, which it
2997 * always is for case where virtual functions are running
2998 */
2999 olinfo_status |= IXGBE_ADVTXD_CC;
Greg Rose92915f72010-01-09 02:24:10 +00003000
3001 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3002
3003 i = tx_ring->next_to_use;
3004 while (count--) {
3005 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck908421f2012-05-11 08:33:00 +00003006 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00003007 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3008 tx_desc->read.cmd_type_len =
3009 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3010 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3011 i++;
3012 if (i == tx_ring->count)
3013 i = 0;
3014 }
3015
3016 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3017
Alexander Duycke757e3e2013-01-31 07:43:22 +00003018 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3019
3020 /* Force memory writes to complete before letting h/w
3021 * know there are new descriptors to fetch. (Only
3022 * applicable for weak-ordered memory model archs,
3023 * such as IA-64).
3024 */
3025 wmb();
3026
3027 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
Greg Rose92915f72010-01-09 02:24:10 +00003028 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00003029}
3030
Alexander Duyckfb401952012-05-11 08:33:16 +00003031static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003032{
Alexander Duyckfb401952012-05-11 08:33:16 +00003033 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003034
Alexander Duyckfb401952012-05-11 08:33:16 +00003035 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003036 /* Herbert's original patch had:
3037 * smp_mb__after_netif_stop_queue();
3038 * but since that doesn't exist yet, just open code it. */
3039 smp_mb();
3040
3041 /* We need to check again in a case another CPU has just
3042 * made room available. */
3043 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3044 return -EBUSY;
3045
3046 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00003047 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003048 ++adapter->restart_queue;
3049 return 0;
3050}
3051
Alexander Duyckfb401952012-05-11 08:33:16 +00003052static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003053{
3054 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3055 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00003056 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00003057}
3058
3059static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3060{
3061 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3062 struct ixgbevf_ring *tx_ring;
3063 unsigned int first;
3064 unsigned int tx_flags = 0;
3065 u8 hdr_len = 0;
3066 int r_idx = 0, tso;
Alexander Duyck35959902012-05-11 08:32:40 +00003067 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3068#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3069 unsigned short f;
3070#endif
Greg Rosef9d08f162012-10-02 00:50:52 +00003071 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Ben Hutchings46acc462012-11-01 09:11:11 +00003072 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Greg Rosef9d08f162012-10-02 00:50:52 +00003073 dev_kfree_skb(skb);
3074 return NETDEV_TX_OK;
3075 }
Greg Rose92915f72010-01-09 02:24:10 +00003076
3077 tx_ring = &adapter->tx_ring[r_idx];
3078
Alexander Duyck35959902012-05-11 08:32:40 +00003079 /*
3080 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3081 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3082 * + 2 desc gap to keep tail from touching head,
3083 * + 1 desc for context descriptor,
3084 * otherwise try next time
3085 */
3086#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3087 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3088 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3089#else
3090 count += skb_shinfo(skb)->nr_frags;
3091#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00003092 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Alexander Duyck35959902012-05-11 08:32:40 +00003093 adapter->tx_busy++;
3094 return NETDEV_TX_BUSY;
3095 }
3096
Jesse Grosseab6d182010-10-20 13:56:03 +00003097 if (vlan_tx_tag_present(skb)) {
Greg Rose92915f72010-01-09 02:24:10 +00003098 tx_flags |= vlan_tx_tag_get(skb);
3099 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3100 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3101 }
3102
Greg Rose92915f72010-01-09 02:24:10 +00003103 first = tx_ring->next_to_use;
3104
3105 if (skb->protocol == htons(ETH_P_IP))
3106 tx_flags |= IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003107 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00003108 if (tso < 0) {
3109 dev_kfree_skb_any(skb);
3110 return NETDEV_TX_OK;
3111 }
3112
3113 if (tso)
Alexander Duyck70a10e22012-05-11 08:33:21 +00003114 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3115 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
Greg Rose92915f72010-01-09 02:24:10 +00003116 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3117
Alexander Duyck70a10e22012-05-11 08:33:21 +00003118 ixgbevf_tx_queue(tx_ring, tx_flags,
Alexander Duycke757e3e2013-01-31 07:43:22 +00003119 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3120 first, skb->len, hdr_len);
Alexander Duyck70a10e22012-05-11 08:33:21 +00003121
3122 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
Greg Rose92915f72010-01-09 02:24:10 +00003123
Alexander Duyckfb401952012-05-11 08:33:16 +00003124 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003125
3126 return NETDEV_TX_OK;
3127}
3128
3129/**
Greg Rose92915f72010-01-09 02:24:10 +00003130 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3131 * @netdev: network interface device structure
3132 * @p: pointer to an address structure
3133 *
3134 * Returns 0 on success, negative on failure
3135 **/
3136static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3137{
3138 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3139 struct ixgbe_hw *hw = &adapter->hw;
3140 struct sockaddr *addr = p;
3141
3142 if (!is_valid_ether_addr(addr->sa_data))
3143 return -EADDRNOTAVAIL;
3144
3145 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3146 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3147
John Fastabend55fdd45b2012-10-01 14:52:20 +00003148 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003149
Greg Rose92fe0bf2012-11-02 05:50:47 +00003150 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003151
John Fastabend55fdd45b2012-10-01 14:52:20 +00003152 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003153
Greg Rose92915f72010-01-09 02:24:10 +00003154 return 0;
3155}
3156
3157/**
3158 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3159 * @netdev: network interface device structure
3160 * @new_mtu: new value for maximum frame size
3161 *
3162 * Returns 0 on success, negative on failure
3163 **/
3164static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3165{
3166 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3167 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00003168 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00003169
Alexander Duyck56e94092012-07-20 08:10:03 +00003170 switch (adapter->hw.api_version) {
3171 case ixgbe_mbox_api_11:
Greg Rose69bfbec2011-01-26 01:06:12 +00003172 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Alexander Duyck56e94092012-07-20 08:10:03 +00003173 break;
3174 default:
3175 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3176 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3177 break;
3178 }
Greg Rose92915f72010-01-09 02:24:10 +00003179
3180 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00003181 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00003182 return -EINVAL;
3183
3184 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3185 netdev->mtu, new_mtu);
3186 /* must set new MTU before calling down or up */
3187 netdev->mtu = new_mtu;
3188
3189 if (netif_running(netdev))
3190 ixgbevf_reinit_locked(adapter);
3191
3192 return 0;
3193}
3194
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003195static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003196{
3197 struct net_device *netdev = pci_get_drvdata(pdev);
3198 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003199#ifdef CONFIG_PM
3200 int retval = 0;
3201#endif
Greg Rose92915f72010-01-09 02:24:10 +00003202
3203 netif_device_detach(netdev);
3204
3205 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003206 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003207 ixgbevf_down(adapter);
3208 ixgbevf_free_irq(adapter);
3209 ixgbevf_free_all_tx_resources(adapter);
3210 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003211 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003212 }
3213
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003214 ixgbevf_clear_interrupt_scheme(adapter);
3215
3216#ifdef CONFIG_PM
3217 retval = pci_save_state(pdev);
3218 if (retval)
3219 return retval;
3220
3221#endif
3222 pci_disable_device(pdev);
3223
3224 return 0;
3225}
3226
3227#ifdef CONFIG_PM
3228static int ixgbevf_resume(struct pci_dev *pdev)
3229{
3230 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3231 struct net_device *netdev = adapter->netdev;
3232 u32 err;
3233
3234 pci_set_power_state(pdev, PCI_D0);
3235 pci_restore_state(pdev);
3236 /*
3237 * pci_restore_state clears dev->state_saved so call
3238 * pci_save_state to restore it.
3239 */
Greg Rose92915f72010-01-09 02:24:10 +00003240 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003241
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003242 err = pci_enable_device_mem(pdev);
3243 if (err) {
3244 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3245 return err;
3246 }
3247 pci_set_master(pdev);
3248
Don Skidmore798e3812013-10-01 04:33:51 -07003249 ixgbevf_reset(adapter);
3250
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003251 rtnl_lock();
3252 err = ixgbevf_init_interrupt_scheme(adapter);
3253 rtnl_unlock();
3254 if (err) {
3255 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3256 return err;
3257 }
3258
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003259 if (netif_running(netdev)) {
3260 err = ixgbevf_open(netdev);
3261 if (err)
3262 return err;
3263 }
3264
3265 netif_device_attach(netdev);
3266
3267 return err;
3268}
3269
3270#endif /* CONFIG_PM */
3271static void ixgbevf_shutdown(struct pci_dev *pdev)
3272{
3273 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003274}
3275
Eric Dumazet4197aa72011-06-22 05:01:35 +00003276static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3277 struct rtnl_link_stats64 *stats)
3278{
3279 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3280 unsigned int start;
3281 u64 bytes, packets;
3282 const struct ixgbevf_ring *ring;
3283 int i;
3284
3285 ixgbevf_update_stats(adapter);
3286
3287 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3288
3289 for (i = 0; i < adapter->num_rx_queues; i++) {
3290 ring = &adapter->rx_ring[i];
3291 do {
3292 start = u64_stats_fetch_begin_bh(&ring->syncp);
3293 bytes = ring->total_bytes;
3294 packets = ring->total_packets;
3295 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3296 stats->rx_bytes += bytes;
3297 stats->rx_packets += packets;
3298 }
3299
3300 for (i = 0; i < adapter->num_tx_queues; i++) {
3301 ring = &adapter->tx_ring[i];
3302 do {
3303 start = u64_stats_fetch_begin_bh(&ring->syncp);
3304 bytes = ring->total_bytes;
3305 packets = ring->total_packets;
3306 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3307 stats->tx_bytes += bytes;
3308 stats->tx_packets += packets;
3309 }
3310
3311 return stats;
3312}
3313
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003314static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003315 .ndo_open = ixgbevf_open,
3316 .ndo_stop = ixgbevf_close,
3317 .ndo_start_xmit = ixgbevf_xmit_frame,
3318 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003319 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003320 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003321 .ndo_set_mac_address = ixgbevf_set_mac,
3322 .ndo_change_mtu = ixgbevf_change_mtu,
3323 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003324 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3325 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Greg Rose92915f72010-01-09 02:24:10 +00003326};
Greg Rose92915f72010-01-09 02:24:10 +00003327
3328static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3329{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003330 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003331 ixgbevf_set_ethtool_ops(dev);
3332 dev->watchdog_timeo = 5 * HZ;
3333}
3334
3335/**
3336 * ixgbevf_probe - Device Initialization Routine
3337 * @pdev: PCI device information struct
3338 * @ent: entry in ixgbevf_pci_tbl
3339 *
3340 * Returns 0 on success, negative on failure
3341 *
3342 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3343 * The OS initialization, configuring of the adapter private structure,
3344 * and a hardware reset occur.
3345 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003346static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose92915f72010-01-09 02:24:10 +00003347{
3348 struct net_device *netdev;
3349 struct ixgbevf_adapter *adapter = NULL;
3350 struct ixgbe_hw *hw = NULL;
3351 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3352 static int cards_found;
3353 int err, pci_using_dac;
3354
3355 err = pci_enable_device(pdev);
3356 if (err)
3357 return err;
3358
Nick Nunley2a1f8792010-04-27 13:10:50 +00003359 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3360 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003361 pci_using_dac = 1;
3362 } else {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003363 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003364 if (err) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003365 err = dma_set_coherent_mask(&pdev->dev,
3366 DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003367 if (err) {
3368 dev_err(&pdev->dev, "No usable DMA "
3369 "configuration, aborting\n");
3370 goto err_dma;
3371 }
3372 }
3373 pci_using_dac = 0;
3374 }
3375
3376 err = pci_request_regions(pdev, ixgbevf_driver_name);
3377 if (err) {
3378 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3379 goto err_pci_reg;
3380 }
3381
3382 pci_set_master(pdev);
3383
Greg Rose92915f72010-01-09 02:24:10 +00003384 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3385 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003386 if (!netdev) {
3387 err = -ENOMEM;
3388 goto err_alloc_etherdev;
3389 }
3390
3391 SET_NETDEV_DEV(netdev, &pdev->dev);
3392
3393 pci_set_drvdata(pdev, netdev);
3394 adapter = netdev_priv(netdev);
3395
3396 adapter->netdev = netdev;
3397 adapter->pdev = pdev;
3398 hw = &adapter->hw;
3399 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003400 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003401
3402 /*
3403 * call save state here in standalone driver because it relies on
3404 * adapter struct to exist, and needs to call netdev_priv
3405 */
3406 pci_save_state(pdev);
3407
3408 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3409 pci_resource_len(pdev, 0));
3410 if (!hw->hw_addr) {
3411 err = -EIO;
3412 goto err_ioremap;
3413 }
3414
3415 ixgbevf_assign_netdev_ops(netdev);
3416
3417 adapter->bd_number = cards_found;
3418
3419 /* Setup hw api */
3420 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3421 hw->mac.type = ii->mac;
3422
3423 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003424 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003425
Greg Rose92915f72010-01-09 02:24:10 +00003426 /* setup the private structure */
3427 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003428 if (err)
3429 goto err_sw_init;
3430
3431 /* The HW MAC address was set and/or determined in sw_init */
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003432 if (!is_valid_ether_addr(netdev->dev_addr)) {
3433 pr_err("invalid MAC address\n");
3434 err = -EIO;
3435 goto err_sw_init;
3436 }
Greg Rose92915f72010-01-09 02:24:10 +00003437
Michał Mirosław471a76d2011-06-08 08:53:03 +00003438 netdev->hw_features = NETIF_F_SG |
Greg Rose92915f72010-01-09 02:24:10 +00003439 NETIF_F_IP_CSUM |
Michał Mirosław471a76d2011-06-08 08:53:03 +00003440 NETIF_F_IPV6_CSUM |
3441 NETIF_F_TSO |
3442 NETIF_F_TSO6 |
3443 NETIF_F_RXCSUM;
3444
3445 netdev->features = netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003446 NETIF_F_HW_VLAN_CTAG_TX |
3447 NETIF_F_HW_VLAN_CTAG_RX |
3448 NETIF_F_HW_VLAN_CTAG_FILTER;
Greg Rose92915f72010-01-09 02:24:10 +00003449
Greg Rose92915f72010-01-09 02:24:10 +00003450 netdev->vlan_features |= NETIF_F_TSO;
3451 netdev->vlan_features |= NETIF_F_TSO6;
3452 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyck3bfacf92010-08-02 14:59:04 +00003453 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003454 netdev->vlan_features |= NETIF_F_SG;
3455
3456 if (pci_using_dac)
3457 netdev->features |= NETIF_F_HIGHDMA;
3458
Jiri Pirko01789342011-08-16 06:29:00 +00003459 netdev->priv_flags |= IFF_UNICAST_FLT;
3460
Greg Rose92915f72010-01-09 02:24:10 +00003461 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00003462 adapter->watchdog_timer.function = ixgbevf_watchdog;
Greg Rose92915f72010-01-09 02:24:10 +00003463 adapter->watchdog_timer.data = (unsigned long)adapter;
3464
3465 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3466 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3467
3468 err = ixgbevf_init_interrupt_scheme(adapter);
3469 if (err)
3470 goto err_sw_init;
3471
Greg Rose92915f72010-01-09 02:24:10 +00003472 strcpy(netdev->name, "eth%d");
3473
3474 err = register_netdev(netdev);
3475 if (err)
3476 goto err_register;
3477
Greg Rose5d426ad2010-11-16 19:27:19 -08003478 netif_carrier_off(netdev);
3479
Greg Rose33bd9f62010-03-19 02:59:52 +00003480 ixgbevf_init_last_counter_stats(adapter);
3481
Greg Rose92915f72010-01-09 02:24:10 +00003482 /* print the MAC address */
Danny Kukawkaf794e7e2012-02-24 03:45:56 +00003483 hw_dbg(hw, "%pM\n", netdev->dev_addr);
Greg Rose92915f72010-01-09 02:24:10 +00003484
3485 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3486
Greg Rose92915f72010-01-09 02:24:10 +00003487 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3488 cards_found++;
3489 return 0;
3490
3491err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003492 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003493err_sw_init:
3494 ixgbevf_reset_interrupt_capability(adapter);
3495 iounmap(hw->hw_addr);
3496err_ioremap:
3497 free_netdev(netdev);
3498err_alloc_etherdev:
3499 pci_release_regions(pdev);
3500err_pci_reg:
3501err_dma:
3502 pci_disable_device(pdev);
3503 return err;
3504}
3505
3506/**
3507 * ixgbevf_remove - Device Removal Routine
3508 * @pdev: PCI device information struct
3509 *
3510 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3511 * that it should release a PCI device. The could be caused by a
3512 * Hot-Plug event, or because the driver is going to be removed from
3513 * memory.
3514 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003515static void ixgbevf_remove(struct pci_dev *pdev)
Greg Rose92915f72010-01-09 02:24:10 +00003516{
3517 struct net_device *netdev = pci_get_drvdata(pdev);
3518 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3519
3520 set_bit(__IXGBEVF_DOWN, &adapter->state);
3521
3522 del_timer_sync(&adapter->watchdog_timer);
3523
Tejun Heo23f333a2010-12-12 16:45:14 +01003524 cancel_work_sync(&adapter->reset_task);
Greg Rose92915f72010-01-09 02:24:10 +00003525 cancel_work_sync(&adapter->watchdog_task);
3526
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00003527 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00003528 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003529
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003530 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003531 ixgbevf_reset_interrupt_capability(adapter);
3532
3533 iounmap(adapter->hw.hw_addr);
3534 pci_release_regions(pdev);
3535
3536 hw_dbg(&adapter->hw, "Remove complete\n");
3537
3538 kfree(adapter->tx_ring);
3539 kfree(adapter->rx_ring);
3540
3541 free_netdev(netdev);
3542
3543 pci_disable_device(pdev);
3544}
3545
Alexander Duyck9f19f312012-05-11 08:33:32 +00003546/**
3547 * ixgbevf_io_error_detected - called when PCI error is detected
3548 * @pdev: Pointer to PCI device
3549 * @state: The current pci connection state
3550 *
3551 * This function is called after a PCI bus error affecting
3552 * this device has been detected.
3553 */
3554static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3555 pci_channel_state_t state)
3556{
3557 struct net_device *netdev = pci_get_drvdata(pdev);
3558 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3559
3560 netif_device_detach(netdev);
3561
3562 if (state == pci_channel_io_perm_failure)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 if (netif_running(netdev))
3566 ixgbevf_down(adapter);
3567
3568 pci_disable_device(pdev);
3569
3570 /* Request a slot slot reset. */
3571 return PCI_ERS_RESULT_NEED_RESET;
3572}
3573
3574/**
3575 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3576 * @pdev: Pointer to PCI device
3577 *
3578 * Restart the card from scratch, as if from a cold-boot. Implementation
3579 * resembles the first-half of the ixgbevf_resume routine.
3580 */
3581static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3582{
3583 struct net_device *netdev = pci_get_drvdata(pdev);
3584 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3585
3586 if (pci_enable_device_mem(pdev)) {
3587 dev_err(&pdev->dev,
3588 "Cannot re-enable PCI device after reset.\n");
3589 return PCI_ERS_RESULT_DISCONNECT;
3590 }
3591
3592 pci_set_master(pdev);
3593
3594 ixgbevf_reset(adapter);
3595
3596 return PCI_ERS_RESULT_RECOVERED;
3597}
3598
3599/**
3600 * ixgbevf_io_resume - called when traffic can start flowing again.
3601 * @pdev: Pointer to PCI device
3602 *
3603 * This callback is called when the error recovery driver tells us that
3604 * its OK to resume normal operation. Implementation resembles the
3605 * second-half of the ixgbevf_resume routine.
3606 */
3607static void ixgbevf_io_resume(struct pci_dev *pdev)
3608{
3609 struct net_device *netdev = pci_get_drvdata(pdev);
3610 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3611
3612 if (netif_running(netdev))
3613 ixgbevf_up(adapter);
3614
3615 netif_device_attach(netdev);
3616}
3617
3618/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07003619static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00003620 .error_detected = ixgbevf_io_error_detected,
3621 .slot_reset = ixgbevf_io_slot_reset,
3622 .resume = ixgbevf_io_resume,
3623};
3624
Greg Rose92915f72010-01-09 02:24:10 +00003625static struct pci_driver ixgbevf_driver = {
3626 .name = ixgbevf_driver_name,
3627 .id_table = ixgbevf_pci_tbl,
3628 .probe = ixgbevf_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003629 .remove = ixgbevf_remove,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003630#ifdef CONFIG_PM
3631 /* Power Management Hooks */
3632 .suspend = ixgbevf_suspend,
3633 .resume = ixgbevf_resume,
3634#endif
Greg Rose92915f72010-01-09 02:24:10 +00003635 .shutdown = ixgbevf_shutdown,
Alexander Duyck9f19f312012-05-11 08:33:32 +00003636 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00003637};
3638
3639/**
Greg Rose65d676c2011-02-03 06:54:13 +00003640 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00003641 *
Greg Rose65d676c2011-02-03 06:54:13 +00003642 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00003643 * loaded. All it does is register with the PCI subsystem.
3644 **/
3645static int __init ixgbevf_init_module(void)
3646{
3647 int ret;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003648 pr_info("%s - version %s\n", ixgbevf_driver_string,
3649 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00003650
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003651 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00003652
3653 ret = pci_register_driver(&ixgbevf_driver);
3654 return ret;
3655}
3656
3657module_init(ixgbevf_init_module);
3658
3659/**
Greg Rose65d676c2011-02-03 06:54:13 +00003660 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00003661 *
Greg Rose65d676c2011-02-03 06:54:13 +00003662 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00003663 * from memory.
3664 **/
3665static void __exit ixgbevf_exit_module(void)
3666{
3667 pci_unregister_driver(&ixgbevf_driver);
3668}
3669
3670#ifdef DEBUG
3671/**
Greg Rose65d676c2011-02-03 06:54:13 +00003672 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00003673 * used by hardware layer to print debugging information
3674 **/
3675char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3676{
3677 struct ixgbevf_adapter *adapter = hw->back;
3678 return adapter->netdev->name;
3679}
3680
3681#endif
3682module_exit(ixgbevf_exit_module);
3683
3684/* ixgbevf_main.c */