blob: 7aa31eebc35661647ecd9dd7fed40421efaed5c9 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Greg Rose5c47a2b2012-01-06 02:53:30 +00004 Copyright(c) 1999 - 2012 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000032
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000036#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000037#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000045#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000048#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000051#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000052#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000054
55#include "ixgbevf.h"
56
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000057const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000058static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000059 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000060
Greg Rose9cd91302012-04-17 04:29:39 +000061#define DRV_VERSION "2.6.0-k"
Greg Rose92915f72010-01-09 02:24:10 +000062const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080063static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000064 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000065
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000067 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000069};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81 board_82599_vf},
Greg Rose2316aa22010-12-02 07:12:26 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
Greg Rose92915f72010-01-09 02:24:10 +000084
85 /* required last entry */
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
stephen hemmingerb3f4d592012-03-13 06:04:20 +000095#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +000099
100/* forward decls */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000102
103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring,
105 u32 val)
106{
107 /*
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
111 * such as IA-64).
112 */
113 wmb();
114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
115}
116
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000117/**
Greg Rose65d676c2011-02-03 06:54:13 +0000118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
123 *
124 */
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
127{
128 u32 ivar, index;
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
131 /* other causes */
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134 ivar &= ~0xFF;
135 ivar |= msix_vector;
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 } else {
138 /* tx or rx causes */
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 }
146}
147
Alexander Duyck70a10e22012-05-11 08:33:21 +0000148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000149 struct ixgbevf_tx_buffer
150 *tx_buffer_info)
151{
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
Alexander Duyck70a10e22012-05-11 08:33:21 +0000154 dma_unmap_page(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000155 tx_buffer_info->dma,
156 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000157 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000158 else
Alexander Duyck70a10e22012-05-11 08:33:21 +0000159 dma_unmap_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +0000160 tx_buffer_info->dma,
161 tx_buffer_info->length,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000162 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000163 tx_buffer_info->dma = 0;
164 }
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
168 }
169 tx_buffer_info->time_stamp = 0;
170 /* tx_buffer_info must be completely set up in the transmit path */
171}
172
Greg Rose92915f72010-01-09 02:24:10 +0000173#define IXGBE_MAX_TXD_PWR 14
174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175
176/* Tx Descriptors needed, worst case */
Alexander Duyck35959902012-05-11 08:32:40 +0000177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
Greg Rose92915f72010-01-09 02:24:10 +0000179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182/**
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000184 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000185 * @tx_ring: tx ring to clean
186 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000188 struct ixgbevf_ring *tx_ring)
189{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000190 struct ixgbevf_adapter *adapter = q_vector->adapter;
Greg Rose92915f72010-01-09 02:24:10 +0000191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
Greg Rose92915f72010-01-09 02:24:10 +0000199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck908421f2012-05-11 08:33:00 +0000201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
Greg Rose92915f72010-01-09 02:24:10 +0000202
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000204 (count < tx_ring->count)) {
Greg Rose92915f72010-01-09 02:24:10 +0000205 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000206 rmb(); /* read buffer_info after eop_desc */
Greg Rose98b9e482011-06-03 03:53:24 +0000207 /* eop could change between read and DD-check */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 goto cont_loop;
Greg Rose92915f72010-01-09 02:24:10 +0000210 for ( ; !cleaned; count++) {
211 struct sk_buff *skb;
Alexander Duyck908421f2012-05-11 08:33:00 +0000212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
216
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
219
220 /* gso_segs is currently only valid for tcp */
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222 /* multiply data chunks by size of headers */
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 skb->len;
225 total_packets += segs;
226 total_bytes += bytecount;
227 }
228
Alexander Duyck70a10e22012-05-11 08:33:21 +0000229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +0000230 tx_buffer_info);
231
232 tx_desc->wb.status = 0;
233
234 i++;
235 if (i == tx_ring->count)
236 i = 0;
237 }
238
Greg Rose98b9e482011-06-03 03:53:24 +0000239cont_loop:
Greg Rose92915f72010-01-09 02:24:10 +0000240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck908421f2012-05-11 08:33:00 +0000241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
Greg Rose92915f72010-01-09 02:24:10 +0000242 }
243
244 tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyckfb401952012-05-11 08:33:16 +0000247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
Greg Rose92915f72010-01-09 02:24:10 +0000248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249 /* Make sure that anybody stopping the queue after this
250 * sees the new next_to_clean.
251 */
252 smp_mb();
Alexander Duyckfb401952012-05-11 08:33:16 +0000253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
Greg Rose92915f72010-01-09 02:24:10 +0000255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
Alexander Duyckfb401952012-05-11 08:33:16 +0000256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +0000258 ++adapter->restart_queue;
259 }
Greg Rose92915f72010-01-09 02:24:10 +0000260 }
261
Eric Dumazet4197aa72011-06-22 05:01:35 +0000262 u64_stats_update_begin(&tx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000265 u64_stats_update_end(&tx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000266
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000267 return count < tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000268}
269
270/**
271 * ixgbevf_receive_skb - Send a completed packet up the stack
272 * @q_vector: structure containing interrupt and ring information
273 * @skb: packet to send up
274 * @status: hardware indication of status of receive
275 * @rx_ring: rx descriptor ring (for a specific queue) to setup
276 * @rx_desc: rx descriptor
277 **/
278static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
279 struct sk_buff *skb, u8 status,
280 struct ixgbevf_ring *ring,
281 union ixgbe_adv_rx_desc *rx_desc)
282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
Greg Rosedd1ed3b2011-08-27 02:06:25 +0000285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Greg Rose92915f72010-01-09 02:24:10 +0000286
Pascal Bouchareine5d9a5332012-06-14 02:18:18 +0000287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
Jiri Pirkodadcd652011-07-21 03:25:09 +0000288 __vlan_hwaccel_put_tag(skb, tag);
Jiri Pirkodadcd652011-07-21 03:25:09 +0000289
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000290 napi_gro_receive(&q_vector->napi, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000291}
292
293/**
294 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
295 * @adapter: address of board private structure
296 * @status_err: hardware indication of status of receive
297 * @skb: skb currently being received and modified
298 **/
299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
Alexander Duyckfb401952012-05-11 08:33:16 +0000300 struct ixgbevf_ring *ring,
Greg Rose92915f72010-01-09 02:24:10 +0000301 u32 status_err, struct sk_buff *skb)
302{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700303 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000304
305 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000306 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000307 return;
308
309 /* if IP and error */
310 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311 (status_err & IXGBE_RXDADV_ERR_IPE)) {
312 adapter->hw_csum_rx_error++;
313 return;
314 }
315
316 if (!(status_err & IXGBE_RXD_STAT_L4CS))
317 return;
318
319 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320 adapter->hw_csum_rx_error++;
321 return;
322 }
323
324 /* It must be a TCP or UDP packet with a valid checksum */
325 skb->ip_summed = CHECKSUM_UNNECESSARY;
326 adapter->hw_csum_rx_good++;
327}
328
329/**
330 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
331 * @adapter: address of board private structure
332 **/
333static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
334 struct ixgbevf_ring *rx_ring,
335 int cleaned_count)
336{
337 struct pci_dev *pdev = adapter->pdev;
338 union ixgbe_adv_rx_desc *rx_desc;
339 struct ixgbevf_rx_buffer *bi;
340 struct sk_buff *skb;
Alexander Duyckfb401952012-05-11 08:33:16 +0000341 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000342
Greg Rose92915f72010-01-09 02:24:10 +0000343 bi = &rx_ring->rx_buffer_info[i];
344
345 while (cleaned_count--) {
Alexander Duyck908421f2012-05-11 08:33:00 +0000346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000347 skb = bi->skb;
348 if (!skb) {
Alexander Duyckfb401952012-05-11 08:33:16 +0000349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350 rx_ring->rx_buf_len);
Greg Rose92915f72010-01-09 02:24:10 +0000351 if (!skb) {
352 adapter->alloc_rx_buff_failed++;
353 goto no_buffers;
354 }
Greg Rose92915f72010-01-09 02:24:10 +0000355 bi->skb = skb;
356 }
357 if (!bi->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +0000358 bi->dma = dma_map_single(&pdev->dev, skb->data,
Greg Rose92915f72010-01-09 02:24:10 +0000359 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000360 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000361 }
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Greg Rose92915f72010-01-09 02:24:10 +0000363
364 i++;
365 if (i == rx_ring->count)
366 i = 0;
367 bi = &rx_ring->rx_buffer_info[i];
368 }
369
370no_buffers:
371 if (rx_ring->next_to_use != i) {
372 rx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +0000373
374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
375 }
376}
377
378static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000379 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000380{
Greg Rose92915f72010-01-09 02:24:10 +0000381 struct ixgbe_hw *hw = &adapter->hw;
382
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000383 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000384}
385
Greg Rose92915f72010-01-09 02:24:10 +0000386static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
387 struct ixgbevf_ring *rx_ring,
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000388 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000389{
390 struct ixgbevf_adapter *adapter = q_vector->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
393 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
394 struct sk_buff *skb;
395 unsigned int i;
396 u32 len, staterr;
Greg Rose92915f72010-01-09 02:24:10 +0000397 int cleaned_count = 0;
398 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
399
400 i = rx_ring->next_to_clean;
Alexander Duyck908421f2012-05-11 08:33:00 +0000401 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000402 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
403 rx_buffer_info = &rx_ring->rx_buffer_info[i];
404
405 while (staterr & IXGBE_RXD_STAT_DD) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000406 if (!budget)
Greg Rose92915f72010-01-09 02:24:10 +0000407 break;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000408 budget--;
Greg Rose92915f72010-01-09 02:24:10 +0000409
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000410 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000411 len = le16_to_cpu(rx_desc->wb.upper.length);
Greg Rose92915f72010-01-09 02:24:10 +0000412 skb = rx_buffer_info->skb;
413 prefetch(skb->data - NET_IP_ALIGN);
414 rx_buffer_info->skb = NULL;
415
416 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +0000417 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +0000418 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +0000419 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000420 rx_buffer_info->dma = 0;
421 skb_put(skb, len);
422 }
423
Greg Rose92915f72010-01-09 02:24:10 +0000424 i++;
425 if (i == rx_ring->count)
426 i = 0;
427
Alexander Duyck908421f2012-05-11 08:33:00 +0000428 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +0000429 prefetch(next_rxd);
430 cleaned_count++;
431
432 next_buffer = &rx_ring->rx_buffer_info[i];
433
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
Alexander Duyck77d5dfc2012-05-11 08:32:19 +0000435 skb->next = next_buffer->skb;
436 skb->next->prev = skb;
Greg Rose92915f72010-01-09 02:24:10 +0000437 adapter->non_eop_descs++;
438 goto next_desc;
439 }
440
441 /* ERR_MASK will only have valid bits if EOP set */
442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
443 dev_kfree_skb_irq(skb);
444 goto next_desc;
445 }
446
Alexander Duyckfb401952012-05-11 08:33:16 +0000447 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000448
449 /* probably a little skewed due to removing CRC */
450 total_rx_bytes += skb->len;
451 total_rx_packets++;
452
453 /*
454 * Work around issue of some types of VM to VM loop back
455 * packets not getting split correctly
456 */
457 if (staterr & IXGBE_RXD_STAT_LB) {
Eric Dumazete743d312010-04-14 15:59:40 -0700458 u32 header_fixup_len = skb_headlen(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000459 if (header_fixup_len < 14)
460 skb_push(skb, header_fixup_len);
461 }
Alexander Duyckfb401952012-05-11 08:33:16 +0000462 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +0000463
464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Greg Rose92915f72010-01-09 02:24:10 +0000465
466next_desc:
467 rx_desc->wb.upper.status_error = 0;
468
469 /* return some buffers to hardware, one at a time is too slow */
470 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
471 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
472 cleaned_count);
473 cleaned_count = 0;
474 }
475
476 /* use prefetched values */
477 rx_desc = next_rxd;
478 rx_buffer_info = &rx_ring->rx_buffer_info[i];
479
480 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
481 }
482
483 rx_ring->next_to_clean = i;
484 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
485
486 if (cleaned_count)
487 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
488
Eric Dumazet4197aa72011-06-22 05:01:35 +0000489 u64_stats_update_begin(&rx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000490 rx_ring->total_packets += total_rx_packets;
491 rx_ring->total_bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000492 u64_stats_update_end(&rx_ring->syncp);
Greg Rose92915f72010-01-09 02:24:10 +0000493
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000494 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000495}
496
497/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000498 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +0000499 * @napi: napi struct with our devices info in it
500 * @budget: amount of work driver is allowed to do this pass, in packets
501 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000502 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +0000503 * q_vector.
504 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000505static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000506{
507 struct ixgbevf_q_vector *q_vector =
508 container_of(napi, struct ixgbevf_q_vector, napi);
509 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000510 struct ixgbevf_ring *ring;
511 int per_ring_budget;
512 bool clean_complete = true;
513
514 ixgbevf_for_each_ring(ring, q_vector->tx)
515 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +0000516
517 /* attempt to distribute budget to each queue fairly, but don't allow
518 * the budget to go below 1 because we'll exit polling */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000519 if (q_vector->rx.count > 1)
520 per_ring_budget = max(budget/q_vector->rx.count, 1);
521 else
522 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +0000523
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000524 ixgbevf_for_each_ring(ring, q_vector->rx)
525 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
526 per_ring_budget);
Greg Rose92915f72010-01-09 02:24:10 +0000527
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000528 /* If all work not completed, return budget and keep polling */
529 if (!clean_complete)
530 return budget;
531 /* all work done, exit the polling mode */
532 napi_complete(napi);
533 if (adapter->rx_itr_setting & 1)
534 ixgbevf_set_itr(q_vector);
535 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
536 ixgbevf_irq_enable_queues(adapter,
537 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000538
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000539 return 0;
Greg Rose92915f72010-01-09 02:24:10 +0000540}
541
Greg Rosece422602012-05-22 02:17:49 +0000542/**
543 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
544 * @q_vector: structure containing interrupt and ring information
545 */
546static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
547{
548 struct ixgbevf_adapter *adapter = q_vector->adapter;
549 struct ixgbe_hw *hw = &adapter->hw;
550 int v_idx = q_vector->v_idx;
551 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
552
553 /*
554 * set the WDIS bit to not clear the timer bits and cause an
555 * immediate assertion of the interrupt
556 */
557 itr_reg |= IXGBE_EITR_CNT_WDIS;
558
559 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
560}
Greg Rose92915f72010-01-09 02:24:10 +0000561
562/**
563 * ixgbevf_configure_msix - Configure MSI-X hardware
564 * @adapter: board private structure
565 *
566 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
567 * interrupts.
568 **/
569static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
570{
571 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +0000572 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +0000573
574 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000575 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000576
577 /*
578 * Populate the IVAR table and set the ITR values to the
579 * corresponding register.
580 */
581 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +0000582 struct ixgbevf_ring *ring;
Greg Rose92915f72010-01-09 02:24:10 +0000583 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +0000584
Alexander Duyck6b43c442012-05-11 08:32:45 +0000585 ixgbevf_for_each_ring(ring, q_vector->rx)
586 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000587
Alexander Duyck6b43c442012-05-11 08:32:45 +0000588 ixgbevf_for_each_ring(ring, q_vector->tx)
589 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000590
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000591 if (q_vector->tx.ring && !q_vector->rx.ring) {
592 /* tx only vector */
593 if (adapter->tx_itr_setting == 1)
594 q_vector->itr = IXGBE_10K_ITR;
595 else
596 q_vector->itr = adapter->tx_itr_setting;
597 } else {
598 /* rx or rx/tx vector */
599 if (adapter->rx_itr_setting == 1)
600 q_vector->itr = IXGBE_20K_ITR;
601 else
602 q_vector->itr = adapter->rx_itr_setting;
603 }
Greg Rose92915f72010-01-09 02:24:10 +0000604
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000605 /* add q_vector eims value to global eims_enable_mask */
606 adapter->eims_enable_mask |= 1 << v_idx;
607
608 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000609 }
610
611 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000612 /* setup eims_other and add value to global eims_enable_mask */
613 adapter->eims_other = 1 << v_idx;
614 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +0000615}
616
617enum latency_range {
618 lowest_latency = 0,
619 low_latency = 1,
620 bulk_latency = 2,
621 latency_invalid = 255
622};
623
624/**
625 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000626 * @q_vector: structure containing interrupt and ring information
627 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +0000628 *
629 * Stores a new ITR value based on packets and byte
630 * counts during the last interrupt. The advantage of per interrupt
631 * computation is faster updates and more accurate ITR for the current
632 * traffic pattern. Constants in this function were computed
633 * based on theoretical maximum wire speed and thresholds were set based
634 * on testing data as well as attempting to minimize response time
635 * while increasing bulk throughput.
636 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000637static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
638 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +0000639{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000640 int bytes = ring_container->total_bytes;
641 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000642 u32 timepassed_us;
643 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000644 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +0000645
646 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000647 return;
Greg Rose92915f72010-01-09 02:24:10 +0000648
649 /* simple throttlerate management
650 * 0-20MB/s lowest (100000 ints/s)
651 * 20-100MB/s low (20000 ints/s)
652 * 100-1249MB/s bulk (8000 ints/s)
653 */
654 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000655 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +0000656 bytes_perint = bytes / timepassed_us; /* bytes/usec */
657
658 switch (itr_setting) {
659 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000660 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000661 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000662 break;
663 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000664 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000665 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000666 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000667 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000668 break;
669 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +0000670 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000671 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +0000672 break;
673 }
674
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000675 /* clear work counters since we have the values we need */
676 ring_container->total_bytes = 0;
677 ring_container->total_packets = 0;
678
679 /* write updated itr to ring container */
680 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +0000681}
682
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000683static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +0000684{
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000685 u32 new_itr = q_vector->itr;
686 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +0000687
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000688 ixgbevf_update_itr(q_vector, &q_vector->tx);
689 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +0000690
Alexander Duyck6b43c442012-05-11 08:32:45 +0000691 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +0000692
693 switch (current_itr) {
694 /* counts and packets in update_itr are dependent on these numbers */
695 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000696 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000697 break;
698 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000699 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000700 break;
701 case bulk_latency:
702 default:
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000703 new_itr = IXGBE_8K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +0000704 break;
705 }
706
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000707 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +0000708 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000709 new_itr = (10 * new_itr * q_vector->itr) /
710 ((9 * new_itr) + q_vector->itr);
711
712 /* save the algorithm value here */
713 q_vector->itr = new_itr;
714
715 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000716 }
Greg Rose92915f72010-01-09 02:24:10 +0000717}
718
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000719static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000720{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000721 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +0000722 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000723
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000724 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +0000725
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000726 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
727 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose3a2c4032012-02-01 01:28:15 +0000728
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000729 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
730
Greg Rose92915f72010-01-09 02:24:10 +0000731 return IRQ_HANDLED;
732}
733
Greg Rose92915f72010-01-09 02:24:10 +0000734
735/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000736 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +0000737 * @irq: unused
738 * @data: pointer to our q_vector struct for this interrupt vector
739 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000740static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +0000741{
742 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +0000743
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000744 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000745 if (q_vector->rx.ring || q_vector->tx.ring)
746 napi_schedule(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +0000747
748 return IRQ_HANDLED;
749}
750
751static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
752 int r_idx)
753{
754 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
755
Alexander Duyck6b43c442012-05-11 08:32:45 +0000756 a->rx_ring[r_idx].next = q_vector->rx.ring;
757 q_vector->rx.ring = &a->rx_ring[r_idx];
758 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000759}
760
761static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
762 int t_idx)
763{
764 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
765
Alexander Duyck6b43c442012-05-11 08:32:45 +0000766 a->tx_ring[t_idx].next = q_vector->tx.ring;
767 q_vector->tx.ring = &a->tx_ring[t_idx];
768 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +0000769}
770
771/**
772 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
773 * @adapter: board private structure to initialize
774 *
775 * This function maps descriptor rings to the queue-specific vectors
776 * we were allotted through the MSI-X enabling code. Ideally, we'd have
777 * one vector per ring/queue, but on a constrained vector budget, we
778 * group the rings as "efficiently" as possible. You would add new
779 * mapping configurations in here.
780 **/
781static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
782{
783 int q_vectors;
784 int v_start = 0;
785 int rxr_idx = 0, txr_idx = 0;
786 int rxr_remaining = adapter->num_rx_queues;
787 int txr_remaining = adapter->num_tx_queues;
788 int i, j;
789 int rqpv, tqpv;
790 int err = 0;
791
792 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
793
794 /*
795 * The ideal configuration...
796 * We have enough vectors to map one per queue.
797 */
798 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
799 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
800 map_vector_to_rxq(adapter, v_start, rxr_idx);
801
802 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
803 map_vector_to_txq(adapter, v_start, txr_idx);
804 goto out;
805 }
806
807 /*
808 * If we don't have enough vectors for a 1-to-1
809 * mapping, we'll have to group them so there are
810 * multiple queues per vector.
811 */
812 /* Re-adjusting *qpv takes care of the remainder. */
813 for (i = v_start; i < q_vectors; i++) {
814 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
815 for (j = 0; j < rqpv; j++) {
816 map_vector_to_rxq(adapter, i, rxr_idx);
817 rxr_idx++;
818 rxr_remaining--;
819 }
820 }
821 for (i = v_start; i < q_vectors; i++) {
822 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
823 for (j = 0; j < tqpv; j++) {
824 map_vector_to_txq(adapter, i, txr_idx);
825 txr_idx++;
826 txr_remaining--;
827 }
828 }
829
830out:
831 return err;
832}
833
834/**
835 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
836 * @adapter: board private structure
837 *
838 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
839 * interrupts from the kernel.
840 **/
841static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
842{
843 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000844 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
845 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +0000846 int ri = 0, ti = 0;
847
Greg Rose92915f72010-01-09 02:24:10 +0000848 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000849 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
850 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +0000851
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000852 if (q_vector->tx.ring && q_vector->rx.ring) {
853 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
854 "%s-%s-%d", netdev->name, "TxRx", ri++);
855 ti++;
856 } else if (q_vector->rx.ring) {
857 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
858 "%s-%s-%d", netdev->name, "rx", ri++);
859 } else if (q_vector->tx.ring) {
860 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
861 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +0000862 } else {
863 /* skip this unused q_vector */
864 continue;
865 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000866 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
867 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +0000868 if (err) {
869 hw_dbg(&adapter->hw,
870 "request_irq failed for MSIX interrupt "
871 "Error: %d\n", err);
872 goto free_queue_irqs;
873 }
874 }
875
Greg Rose92915f72010-01-09 02:24:10 +0000876 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000877 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000878 if (err) {
879 hw_dbg(&adapter->hw,
Alexander Duyck4b2cd272012-08-02 01:16:59 +0000880 "request_irq for msix_other failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +0000881 goto free_queue_irqs;
882 }
883
884 return 0;
885
886free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000887 while (vector) {
888 vector--;
889 free_irq(adapter->msix_entries[vector].vector,
890 adapter->q_vector[vector]);
891 }
Greg Rose92915f72010-01-09 02:24:10 +0000892 pci_disable_msix(adapter->pdev);
893 kfree(adapter->msix_entries);
894 adapter->msix_entries = NULL;
895 return err;
896}
897
898static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
899{
900 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
901
902 for (i = 0; i < q_vectors; i++) {
903 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +0000904 q_vector->rx.ring = NULL;
905 q_vector->tx.ring = NULL;
906 q_vector->rx.count = 0;
907 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +0000908 }
909}
910
911/**
912 * ixgbevf_request_irq - initialize interrupts
913 * @adapter: board private structure
914 *
915 * Attempts to configure interrupts using the best available
916 * capabilities of the hardware and kernel.
917 **/
918static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
919{
920 int err = 0;
921
922 err = ixgbevf_request_msix_irqs(adapter);
923
924 if (err)
925 hw_dbg(&adapter->hw,
926 "request_irq failed, Error %d\n", err);
927
928 return err;
929}
930
931static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
932{
Greg Rose92915f72010-01-09 02:24:10 +0000933 int i, q_vectors;
934
935 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +0000936 i = q_vectors - 1;
937
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000938 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000939 i--;
940
941 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000942 /* free only the irqs that were actually requested */
943 if (!adapter->q_vector[i]->rx.ring &&
944 !adapter->q_vector[i]->tx.ring)
945 continue;
946
Greg Rose92915f72010-01-09 02:24:10 +0000947 free_irq(adapter->msix_entries[i].vector,
948 adapter->q_vector[i]);
949 }
950
951 ixgbevf_reset_q_vectors(adapter);
952}
953
954/**
955 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
956 * @adapter: board private structure
957 **/
958static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
959{
Greg Rose92915f72010-01-09 02:24:10 +0000960 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000961 int i;
Greg Rose92915f72010-01-09 02:24:10 +0000962
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000963 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000964 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000965 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000966
967 IXGBE_WRITE_FLUSH(hw);
968
969 for (i = 0; i < adapter->num_msix_vectors; i++)
970 synchronize_irq(adapter->msix_entries[i].vector);
971}
972
973/**
974 * ixgbevf_irq_enable - Enable default interrupt generation settings
975 * @adapter: board private structure
976 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000977static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +0000978{
979 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +0000980
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000981 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
982 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
983 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +0000984}
985
986/**
987 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
988 * @adapter: board private structure
989 *
990 * Configure the Tx unit of the MAC after a reset.
991 **/
992static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
993{
994 u64 tdba;
995 struct ixgbe_hw *hw = &adapter->hw;
996 u32 i, j, tdlen, txctrl;
997
998 /* Setup the HW Tx Head and Tail descriptor pointers */
999 for (i = 0; i < adapter->num_tx_queues; i++) {
1000 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1001 j = ring->reg_idx;
1002 tdba = ring->dma;
1003 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1004 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1005 (tdba & DMA_BIT_MASK(32)));
1006 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1007 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1008 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1009 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1010 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1011 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1012 /* Disable Tx Head Writeback RO bit, since this hoses
1013 * bookkeeping if things aren't delivered in order.
1014 */
1015 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1016 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1017 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1018 }
1019}
1020
1021#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1022
1023static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1024{
1025 struct ixgbevf_ring *rx_ring;
1026 struct ixgbe_hw *hw = &adapter->hw;
1027 u32 srrctl;
1028
1029 rx_ring = &adapter->rx_ring[index];
1030
1031 srrctl = IXGBE_SRRCTL_DROP_EN;
1032
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001033 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001034
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001035 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1036 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1037
Greg Rose92915f72010-01-09 02:24:10 +00001038 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1039}
1040
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001041static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1042{
1043 struct ixgbe_hw *hw = &adapter->hw;
1044 struct net_device *netdev = adapter->netdev;
1045 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1046 int i;
1047 u16 rx_buf_len;
1048
1049 /* notify the PF of our intent to use this size of frame */
1050 ixgbevf_rlpml_set_vf(hw, max_frame);
1051
1052 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1053 max_frame += VLAN_HLEN;
1054
1055 /*
1056 * Make best use of allocation by using all but 1K of a
1057 * power of 2 allocation that will be used for skb->head.
1058 */
1059 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1060 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1061 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1062 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1063 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1064 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1065 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1066 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1067 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1068 else
1069 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1070
1071 for (i = 0; i < adapter->num_rx_queues; i++)
1072 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1073}
1074
Greg Rose92915f72010-01-09 02:24:10 +00001075/**
1076 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1077 * @adapter: board private structure
1078 *
1079 * Configure the Rx unit of the MAC after a reset.
1080 **/
1081static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1082{
1083 u64 rdba;
1084 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001085 int i, j;
1086 u32 rdlen;
Greg Rose92915f72010-01-09 02:24:10 +00001087
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001088 /* PSRTYPE must be initialized in 82599 */
1089 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001090
1091 /* set_rx_buffer_len must be called before ring initialization */
1092 ixgbevf_set_rx_buffer_len(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001093
1094 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1095 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1096 * the Base and Length of the Rx Descriptor Ring */
1097 for (i = 0; i < adapter->num_rx_queues; i++) {
1098 rdba = adapter->rx_ring[i].dma;
1099 j = adapter->rx_ring[i].reg_idx;
1100 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1101 (rdba & DMA_BIT_MASK(32)));
1102 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1104 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1105 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1106 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1107 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
Greg Rose92915f72010-01-09 02:24:10 +00001108
1109 ixgbevf_configure_srrctl(adapter, j);
1110 }
1111}
1112
Jiri Pirko8e586132011-12-08 19:52:37 -05001113static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001114{
1115 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1116 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001117
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001118 spin_lock(&adapter->mbx_lock);
1119
Greg Rose92915f72010-01-09 02:24:10 +00001120 /* add VID to filter table */
1121 if (hw->mac.ops.set_vfta)
1122 hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001123
1124 spin_unlock(&adapter->mbx_lock);
1125
Jiri Pirkodadcd652011-07-21 03:25:09 +00001126 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001127
1128 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001129}
1130
Jiri Pirko8e586132011-12-08 19:52:37 -05001131static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001132{
1133 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1134 struct ixgbe_hw *hw = &adapter->hw;
1135
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001136 spin_lock(&adapter->mbx_lock);
1137
Greg Rose92915f72010-01-09 02:24:10 +00001138 /* remove VID from filter table */
1139 if (hw->mac.ops.set_vfta)
1140 hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001141
1142 spin_unlock(&adapter->mbx_lock);
1143
Jiri Pirkodadcd652011-07-21 03:25:09 +00001144 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001145
1146 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001147}
1148
1149static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1150{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001151 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001152
Jiri Pirkodadcd652011-07-21 03:25:09 +00001153 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1154 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
Greg Rose92915f72010-01-09 02:24:10 +00001155}
1156
Greg Rose46ec20f2011-05-13 01:33:42 +00001157static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1158{
1159 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1160 struct ixgbe_hw *hw = &adapter->hw;
1161 int count = 0;
1162
1163 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001164 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001165 return -ENOSPC;
1166 }
1167
1168 if (!netdev_uc_empty(netdev)) {
1169 struct netdev_hw_addr *ha;
1170 netdev_for_each_uc_addr(ha, netdev) {
1171 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1172 udelay(200);
1173 }
1174 } else {
1175 /*
1176 * If the list is empty then send message to PF driver to
1177 * clear all macvlans on this VF.
1178 */
1179 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1180 }
1181
1182 return count;
1183}
1184
Greg Rose92915f72010-01-09 02:24:10 +00001185/**
1186 * ixgbevf_set_rx_mode - Multicast set
1187 * @netdev: network interface device structure
1188 *
1189 * The set_rx_method entry point is called whenever the multicast address
1190 * list or the network interface flags are updated. This routine is
1191 * responsible for configuring the hardware for proper multicast mode.
1192 **/
1193static void ixgbevf_set_rx_mode(struct net_device *netdev)
1194{
1195 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1196 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001197
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001198 spin_lock(&adapter->mbx_lock);
1199
Greg Rose92915f72010-01-09 02:24:10 +00001200 /* reprogram multicast list */
Greg Rose92915f72010-01-09 02:24:10 +00001201 if (hw->mac.ops.update_mc_addr_list)
Jiri Pirko5c58c472010-03-23 22:58:20 +00001202 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001203
1204 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001205
1206 spin_unlock(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001207}
1208
1209static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1210{
1211 int q_idx;
1212 struct ixgbevf_q_vector *q_vector;
1213 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1214
1215 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001216 q_vector = adapter->q_vector[q_idx];
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001217 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001218 }
1219}
1220
1221static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1222{
1223 int q_idx;
1224 struct ixgbevf_q_vector *q_vector;
1225 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1226
1227 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1228 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001229 napi_disable(&q_vector->napi);
1230 }
1231}
1232
1233static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1234{
1235 struct net_device *netdev = adapter->netdev;
1236 int i;
1237
1238 ixgbevf_set_rx_mode(netdev);
1239
1240 ixgbevf_restore_vlan(adapter);
1241
1242 ixgbevf_configure_tx(adapter);
1243 ixgbevf_configure_rx(adapter);
1244 for (i = 0; i < adapter->num_rx_queues; i++) {
1245 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
Alexander Duyck18c63082012-05-11 08:33:11 +00001246 ixgbevf_alloc_rx_buffers(adapter, ring,
1247 IXGBE_DESC_UNUSED(ring));
Greg Rose92915f72010-01-09 02:24:10 +00001248 }
1249}
1250
1251#define IXGBE_MAX_RX_DESC_POLL 10
1252static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1253 int rxr)
1254{
1255 struct ixgbe_hw *hw = &adapter->hw;
1256 int j = adapter->rx_ring[rxr].reg_idx;
1257 int k;
1258
1259 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1260 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1261 break;
1262 else
1263 msleep(1);
1264 }
1265 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1266 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1267 "not set within the polling period\n", rxr);
1268 }
1269
1270 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1271 (adapter->rx_ring[rxr].count - 1));
1272}
1273
Greg Rose33bd9f62010-03-19 02:59:52 +00001274static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1275{
1276 /* Only save pre-reset stats if there are some */
1277 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1278 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1279 adapter->stats.base_vfgprc;
1280 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1281 adapter->stats.base_vfgptc;
1282 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1283 adapter->stats.base_vfgorc;
1284 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1285 adapter->stats.base_vfgotc;
1286 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1287 adapter->stats.base_vfmprc;
1288 }
1289}
1290
1291static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1292{
1293 struct ixgbe_hw *hw = &adapter->hw;
1294
1295 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1296 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1297 adapter->stats.last_vfgorc |=
1298 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1299 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1300 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1301 adapter->stats.last_vfgotc |=
1302 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1303 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1304
1305 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1306 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1307 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1308 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1309 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1310}
1311
Alexander Duyck31186782012-07-20 08:09:58 +00001312static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1313{
1314 struct ixgbe_hw *hw = &adapter->hw;
1315 int api[] = { ixgbe_mbox_api_10,
1316 ixgbe_mbox_api_unknown };
1317 int err = 0, idx = 0;
1318
1319 spin_lock(&adapter->mbx_lock);
1320
1321 while (api[idx] != ixgbe_mbox_api_unknown) {
1322 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1323 if (!err)
1324 break;
1325 idx++;
1326 }
1327
1328 spin_unlock(&adapter->mbx_lock);
1329}
1330
Greg Rose795180d2012-04-17 04:29:34 +00001331static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001332{
1333 struct net_device *netdev = adapter->netdev;
1334 struct ixgbe_hw *hw = &adapter->hw;
1335 int i, j = 0;
1336 int num_rx_rings = adapter->num_rx_queues;
1337 u32 txdctl, rxdctl;
1338
1339 for (i = 0; i < adapter->num_tx_queues; i++) {
1340 j = adapter->tx_ring[i].reg_idx;
1341 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1342 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1343 txdctl |= (8 << 16);
1344 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1345 }
1346
1347 for (i = 0; i < adapter->num_tx_queues; i++) {
1348 j = adapter->tx_ring[i].reg_idx;
1349 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1350 txdctl |= IXGBE_TXDCTL_ENABLE;
1351 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1352 }
1353
1354 for (i = 0; i < num_rx_rings; i++) {
1355 j = adapter->rx_ring[i].reg_idx;
1356 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
Jiri Pirkodadcd652011-07-21 03:25:09 +00001357 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
Greg Rose69bfbec2011-01-26 01:06:12 +00001358 if (hw->mac.type == ixgbe_mac_X540_vf) {
1359 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1360 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1361 IXGBE_RXDCTL_RLPML_EN);
1362 }
Greg Rose92915f72010-01-09 02:24:10 +00001363 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1364 ixgbevf_rx_desc_queue_enable(adapter, i);
1365 }
1366
1367 ixgbevf_configure_msix(adapter);
1368
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001369 spin_lock(&adapter->mbx_lock);
1370
Greg Rose92915f72010-01-09 02:24:10 +00001371 if (hw->mac.ops.set_rar) {
1372 if (is_valid_ether_addr(hw->mac.addr))
1373 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1374 else
1375 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1376 }
1377
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001378 spin_unlock(&adapter->mbx_lock);
1379
Greg Rose92915f72010-01-09 02:24:10 +00001380 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1381 ixgbevf_napi_enable_all(adapter);
1382
1383 /* enable transmits */
1384 netif_tx_start_all_queues(netdev);
1385
Greg Rose33bd9f62010-03-19 02:59:52 +00001386 ixgbevf_save_reset_stats(adapter);
1387 ixgbevf_init_last_counter_stats(adapter);
1388
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001389 hw->mac.get_link_status = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001390 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00001391}
1392
Greg Rose795180d2012-04-17 04:29:34 +00001393void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001394{
Greg Rose92915f72010-01-09 02:24:10 +00001395 struct ixgbe_hw *hw = &adapter->hw;
1396
Alexander Duyck31186782012-07-20 08:09:58 +00001397 ixgbevf_negotiate_api(adapter);
1398
Greg Rose92915f72010-01-09 02:24:10 +00001399 ixgbevf_configure(adapter);
1400
Greg Rose795180d2012-04-17 04:29:34 +00001401 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001402
1403 /* clear any pending interrupts, may auto mask */
1404 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1405
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001406 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001407}
1408
1409/**
1410 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1411 * @adapter: board private structure
1412 * @rx_ring: ring to free buffers from
1413 **/
1414static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1415 struct ixgbevf_ring *rx_ring)
1416{
1417 struct pci_dev *pdev = adapter->pdev;
1418 unsigned long size;
1419 unsigned int i;
1420
Greg Rosec0456c22010-01-22 22:47:18 +00001421 if (!rx_ring->rx_buffer_info)
1422 return;
Greg Rose92915f72010-01-09 02:24:10 +00001423
Greg Rosec0456c22010-01-22 22:47:18 +00001424 /* Free all the Rx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00001425 for (i = 0; i < rx_ring->count; i++) {
1426 struct ixgbevf_rx_buffer *rx_buffer_info;
1427
1428 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1429 if (rx_buffer_info->dma) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00001430 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
Greg Rose92915f72010-01-09 02:24:10 +00001431 rx_ring->rx_buf_len,
Nick Nunley2a1f8792010-04-27 13:10:50 +00001432 DMA_FROM_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +00001433 rx_buffer_info->dma = 0;
1434 }
1435 if (rx_buffer_info->skb) {
1436 struct sk_buff *skb = rx_buffer_info->skb;
1437 rx_buffer_info->skb = NULL;
1438 do {
1439 struct sk_buff *this = skb;
1440 skb = skb->prev;
1441 dev_kfree_skb(this);
1442 } while (skb);
1443 }
Greg Rose92915f72010-01-09 02:24:10 +00001444 }
1445
1446 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1447 memset(rx_ring->rx_buffer_info, 0, size);
1448
1449 /* Zero out the descriptor ring */
1450 memset(rx_ring->desc, 0, rx_ring->size);
1451
1452 rx_ring->next_to_clean = 0;
1453 rx_ring->next_to_use = 0;
1454
1455 if (rx_ring->head)
1456 writel(0, adapter->hw.hw_addr + rx_ring->head);
1457 if (rx_ring->tail)
1458 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1459}
1460
1461/**
1462 * ixgbevf_clean_tx_ring - Free Tx Buffers
1463 * @adapter: board private structure
1464 * @tx_ring: ring to be cleaned
1465 **/
1466static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1467 struct ixgbevf_ring *tx_ring)
1468{
1469 struct ixgbevf_tx_buffer *tx_buffer_info;
1470 unsigned long size;
1471 unsigned int i;
1472
Greg Rosec0456c22010-01-22 22:47:18 +00001473 if (!tx_ring->tx_buffer_info)
1474 return;
1475
Greg Rose92915f72010-01-09 02:24:10 +00001476 /* Free all the Tx ring sk_buffs */
1477
1478 for (i = 0; i < tx_ring->count; i++) {
1479 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00001480 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00001481 }
1482
1483 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1484 memset(tx_ring->tx_buffer_info, 0, size);
1485
1486 memset(tx_ring->desc, 0, tx_ring->size);
1487
1488 tx_ring->next_to_use = 0;
1489 tx_ring->next_to_clean = 0;
1490
1491 if (tx_ring->head)
1492 writel(0, adapter->hw.hw_addr + tx_ring->head);
1493 if (tx_ring->tail)
1494 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1495}
1496
1497/**
1498 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1499 * @adapter: board private structure
1500 **/
1501static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1502{
1503 int i;
1504
1505 for (i = 0; i < adapter->num_rx_queues; i++)
1506 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1507}
1508
1509/**
1510 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1511 * @adapter: board private structure
1512 **/
1513static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1514{
1515 int i;
1516
1517 for (i = 0; i < adapter->num_tx_queues; i++)
1518 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1519}
1520
1521void ixgbevf_down(struct ixgbevf_adapter *adapter)
1522{
1523 struct net_device *netdev = adapter->netdev;
1524 struct ixgbe_hw *hw = &adapter->hw;
1525 u32 txdctl;
1526 int i, j;
1527
1528 /* signal that we are down to the interrupt handler */
1529 set_bit(__IXGBEVF_DOWN, &adapter->state);
1530 /* disable receives */
1531
1532 netif_tx_disable(netdev);
1533
1534 msleep(10);
1535
1536 netif_tx_stop_all_queues(netdev);
1537
1538 ixgbevf_irq_disable(adapter);
1539
1540 ixgbevf_napi_disable_all(adapter);
1541
1542 del_timer_sync(&adapter->watchdog_timer);
1543 /* can't call flush scheduled work here because it can deadlock
1544 * if linkwatch_event tries to acquire the rtnl_lock which we are
1545 * holding */
1546 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1547 msleep(1);
1548
1549 /* disable transmits in the hardware now that interrupts are off */
1550 for (i = 0; i < adapter->num_tx_queues; i++) {
1551 j = adapter->tx_ring[i].reg_idx;
1552 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1553 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1554 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1555 }
1556
1557 netif_carrier_off(netdev);
1558
1559 if (!pci_channel_offline(adapter->pdev))
1560 ixgbevf_reset(adapter);
1561
1562 ixgbevf_clean_all_tx_rings(adapter);
1563 ixgbevf_clean_all_rx_rings(adapter);
1564}
1565
1566void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1567{
1568 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00001569
Greg Rose92915f72010-01-09 02:24:10 +00001570 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1571 msleep(1);
1572
Greg Rosec0456c22010-01-22 22:47:18 +00001573 /*
1574 * Check if PF is up before re-init. If not then skip until
1575 * later when the PF is up and ready to service requests from
1576 * the VF via mailbox. If the VF is up and running then the
1577 * watchdog task will continue to schedule reset tasks until
1578 * the PF is up and running.
1579 */
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001580 ixgbevf_down(adapter);
1581 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001582
1583 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1584}
1585
1586void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1587{
1588 struct ixgbe_hw *hw = &adapter->hw;
1589 struct net_device *netdev = adapter->netdev;
1590
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001591 spin_lock(&adapter->mbx_lock);
1592
Greg Rose92915f72010-01-09 02:24:10 +00001593 if (hw->mac.ops.reset_hw(hw))
1594 hw_dbg(hw, "PF still resetting\n");
1595 else
1596 hw->mac.ops.init_hw(hw);
1597
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001598 spin_unlock(&adapter->mbx_lock);
1599
Greg Rose92915f72010-01-09 02:24:10 +00001600 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1601 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1602 netdev->addr_len);
1603 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1604 netdev->addr_len);
1605 }
1606}
1607
1608static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1609 int vectors)
1610{
1611 int err, vector_threshold;
1612
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001613 /* We'll want at least 2 (vector_threshold):
1614 * 1) TxQ[0] + RxQ[0] handler
1615 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00001616 */
1617 vector_threshold = MIN_MSIX_COUNT;
1618
1619 /* The more we get, the more we will assign to Tx/Rx Cleanup
1620 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1621 * Right now, we simply care about how many we'll get; we'll
1622 * set them up later while requesting irq's.
1623 */
1624 while (vectors >= vector_threshold) {
1625 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1626 vectors);
1627 if (!err) /* Success in acquiring all requested vectors. */
1628 break;
1629 else if (err < 0)
1630 vectors = 0; /* Nasty failure, quit now */
1631 else /* err == number of vectors we should try again with */
1632 vectors = err;
1633 }
1634
1635 if (vectors < vector_threshold) {
1636 /* Can't allocate enough MSI-X interrupts? Oh well.
1637 * This just means we'll go with either a single MSI
1638 * vector or fall back to legacy interrupts.
1639 */
1640 hw_dbg(&adapter->hw,
1641 "Unable to allocate MSI-X interrupts\n");
1642 kfree(adapter->msix_entries);
1643 adapter->msix_entries = NULL;
1644 } else {
1645 /*
1646 * Adjust for only the vectors we'll use, which is minimum
1647 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1648 * vectors we were allocated.
1649 */
1650 adapter->num_msix_vectors = vectors;
1651 }
1652}
1653
Ben Hutchings49ce9c22012-07-10 10:56:00 +00001654/**
1655 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00001656 * @adapter: board private structure to initialize
1657 *
1658 * This is the top level queue allocation routine. The order here is very
1659 * important, starting with the "most" number of features turned on at once,
1660 * and ending with the smallest set of features. This way large combinations
1661 * can be allocated if they're turned on, and smaller combinations are the
1662 * fallthrough conditions.
1663 *
1664 **/
1665static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1666{
1667 /* Start with base case */
1668 adapter->num_rx_queues = 1;
1669 adapter->num_tx_queues = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001670}
1671
1672/**
1673 * ixgbevf_alloc_queues - Allocate memory for all rings
1674 * @adapter: board private structure to initialize
1675 *
1676 * We allocate one ring per queue at run-time since we don't know the
1677 * number of queues at compile-time. The polling_netdev array is
1678 * intended for Multiqueue, but should work fine with a single queue.
1679 **/
1680static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1681{
1682 int i;
1683
1684 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1685 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1686 if (!adapter->tx_ring)
1687 goto err_tx_ring_allocation;
1688
1689 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1690 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1691 if (!adapter->rx_ring)
1692 goto err_rx_ring_allocation;
1693
1694 for (i = 0; i < adapter->num_tx_queues; i++) {
1695 adapter->tx_ring[i].count = adapter->tx_ring_count;
1696 adapter->tx_ring[i].queue_index = i;
1697 adapter->tx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001698 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1699 adapter->tx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001700 }
1701
1702 for (i = 0; i < adapter->num_rx_queues; i++) {
1703 adapter->rx_ring[i].count = adapter->rx_ring_count;
1704 adapter->rx_ring[i].queue_index = i;
1705 adapter->rx_ring[i].reg_idx = i;
Alexander Duyckfb401952012-05-11 08:33:16 +00001706 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1707 adapter->rx_ring[i].netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001708 }
1709
1710 return 0;
1711
1712err_rx_ring_allocation:
1713 kfree(adapter->tx_ring);
1714err_tx_ring_allocation:
1715 return -ENOMEM;
1716}
1717
1718/**
1719 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1720 * @adapter: board private structure to initialize
1721 *
1722 * Attempt to configure the interrupts using the best available
1723 * capabilities of the hardware and the kernel.
1724 **/
1725static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1726{
1727 int err = 0;
1728 int vector, v_budget;
1729
1730 /*
1731 * It's easy to be greedy for MSI-X vectors, but it really
1732 * doesn't do us much good if we have a lot more vectors
1733 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001734 * (roughly) the same number of vectors as there are CPU's.
1735 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00001736 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001737 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1738 v_budget = min_t(int, v_budget, num_online_cpus());
1739 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001740
1741 /* A failure in MSI-X entry allocation isn't fatal, but it does
1742 * mean we disable MSI-X capabilities of the adapter. */
1743 adapter->msix_entries = kcalloc(v_budget,
1744 sizeof(struct msix_entry), GFP_KERNEL);
1745 if (!adapter->msix_entries) {
1746 err = -ENOMEM;
1747 goto out;
1748 }
1749
1750 for (vector = 0; vector < v_budget; vector++)
1751 adapter->msix_entries[vector].entry = vector;
1752
1753 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1754
1755out:
1756 return err;
1757}
1758
1759/**
1760 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1761 * @adapter: board private structure to initialize
1762 *
1763 * We allocate one q_vector per queue interrupt. If allocation fails we
1764 * return -ENOMEM.
1765 **/
1766static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1767{
1768 int q_idx, num_q_vectors;
1769 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00001770
1771 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00001772
1773 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1774 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1775 if (!q_vector)
1776 goto err_out;
1777 q_vector->adapter = adapter;
1778 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001779 netif_napi_add(adapter->netdev, &q_vector->napi,
1780 ixgbevf_poll, 64);
Greg Rose92915f72010-01-09 02:24:10 +00001781 adapter->q_vector[q_idx] = q_vector;
1782 }
1783
1784 return 0;
1785
1786err_out:
1787 while (q_idx) {
1788 q_idx--;
1789 q_vector = adapter->q_vector[q_idx];
1790 netif_napi_del(&q_vector->napi);
1791 kfree(q_vector);
1792 adapter->q_vector[q_idx] = NULL;
1793 }
1794 return -ENOMEM;
1795}
1796
1797/**
1798 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1799 * @adapter: board private structure to initialize
1800 *
1801 * This function frees the memory allocated to the q_vectors. In addition if
1802 * NAPI is enabled it will delete any references to the NAPI struct prior
1803 * to freeing the q_vector.
1804 **/
1805static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1806{
1807 int q_idx, num_q_vectors;
1808 int napi_vectors;
1809
1810 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1811 napi_vectors = adapter->num_rx_queues;
1812
1813 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1814 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1815
1816 adapter->q_vector[q_idx] = NULL;
1817 if (q_idx < napi_vectors)
1818 netif_napi_del(&q_vector->napi);
1819 kfree(q_vector);
1820 }
1821}
1822
1823/**
1824 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1825 * @adapter: board private structure
1826 *
1827 **/
1828static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1829{
1830 pci_disable_msix(adapter->pdev);
1831 kfree(adapter->msix_entries);
1832 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00001833}
1834
1835/**
1836 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1837 * @adapter: board private structure to initialize
1838 *
1839 **/
1840static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1841{
1842 int err;
1843
1844 /* Number of supported queues */
1845 ixgbevf_set_num_queues(adapter);
1846
1847 err = ixgbevf_set_interrupt_capability(adapter);
1848 if (err) {
1849 hw_dbg(&adapter->hw,
1850 "Unable to setup interrupt capabilities\n");
1851 goto err_set_interrupt;
1852 }
1853
1854 err = ixgbevf_alloc_q_vectors(adapter);
1855 if (err) {
1856 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1857 "vectors\n");
1858 goto err_alloc_q_vectors;
1859 }
1860
1861 err = ixgbevf_alloc_queues(adapter);
1862 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001863 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00001864 goto err_alloc_queues;
1865 }
1866
1867 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1868 "Tx Queue count = %u\n",
1869 (adapter->num_rx_queues > 1) ? "Enabled" :
1870 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1871
1872 set_bit(__IXGBEVF_DOWN, &adapter->state);
1873
1874 return 0;
1875err_alloc_queues:
1876 ixgbevf_free_q_vectors(adapter);
1877err_alloc_q_vectors:
1878 ixgbevf_reset_interrupt_capability(adapter);
1879err_set_interrupt:
1880 return err;
1881}
1882
1883/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00001884 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1885 * @adapter: board private structure to clear interrupt scheme on
1886 *
1887 * We go through and clear interrupt specific resources and reset the structure
1888 * to pre-load conditions
1889 **/
1890static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1891{
1892 adapter->num_tx_queues = 0;
1893 adapter->num_rx_queues = 0;
1894
1895 ixgbevf_free_q_vectors(adapter);
1896 ixgbevf_reset_interrupt_capability(adapter);
1897}
1898
1899/**
Greg Rose92915f72010-01-09 02:24:10 +00001900 * ixgbevf_sw_init - Initialize general software structures
1901 * (struct ixgbevf_adapter)
1902 * @adapter: board private structure to initialize
1903 *
1904 * ixgbevf_sw_init initializes the Adapter private data structure.
1905 * Fields are initialized based on PCI device information and
1906 * OS network device settings (MTU size).
1907 **/
1908static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1909{
1910 struct ixgbe_hw *hw = &adapter->hw;
1911 struct pci_dev *pdev = adapter->pdev;
1912 int err;
1913
1914 /* PCI config space info */
1915
1916 hw->vendor_id = pdev->vendor;
1917 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08001918 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00001919 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1920 hw->subsystem_device_id = pdev->subsystem_device;
1921
1922 hw->mbx.ops.init_params(hw);
1923 hw->mac.max_tx_queues = MAX_TX_QUEUES;
1924 hw->mac.max_rx_queues = MAX_RX_QUEUES;
1925 err = hw->mac.ops.reset_hw(hw);
1926 if (err) {
1927 dev_info(&pdev->dev,
1928 "PF still in reset state, assigning new address\n");
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00001929 eth_hw_addr_random(adapter->netdev);
1930 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1931 adapter->netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00001932 } else {
1933 err = hw->mac.ops.init_hw(hw);
1934 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001935 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00001936 goto out;
1937 }
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00001938 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1939 adapter->netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00001940 }
1941
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001942 /* lock to protect mailbox accesses */
1943 spin_lock_init(&adapter->mbx_lock);
1944
Greg Rose92915f72010-01-09 02:24:10 +00001945 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001946 adapter->rx_itr_setting = 1;
1947 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001948
Greg Rose92915f72010-01-09 02:24:10 +00001949 /* set default ring sizes */
1950 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
1951 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
1952
Greg Rose92915f72010-01-09 02:24:10 +00001953 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00001954 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00001955
1956out:
1957 return err;
1958}
1959
Greg Rose92915f72010-01-09 02:24:10 +00001960#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1961 { \
1962 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1963 if (current_counter < last_counter) \
1964 counter += 0x100000000LL; \
1965 last_counter = current_counter; \
1966 counter &= 0xFFFFFFFF00000000LL; \
1967 counter |= current_counter; \
1968 }
1969
1970#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1971 { \
1972 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
1973 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
1974 u64 current_counter = (current_counter_msb << 32) | \
1975 current_counter_lsb; \
1976 if (current_counter < last_counter) \
1977 counter += 0x1000000000LL; \
1978 last_counter = current_counter; \
1979 counter &= 0xFFFFFFF000000000LL; \
1980 counter |= current_counter; \
1981 }
1982/**
1983 * ixgbevf_update_stats - Update the board statistics counters.
1984 * @adapter: board private structure
1985 **/
1986void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
1987{
1988 struct ixgbe_hw *hw = &adapter->hw;
1989
1990 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
1991 adapter->stats.vfgprc);
1992 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
1993 adapter->stats.vfgptc);
1994 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1995 adapter->stats.last_vfgorc,
1996 adapter->stats.vfgorc);
1997 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1998 adapter->stats.last_vfgotc,
1999 adapter->stats.vfgotc);
2000 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2001 adapter->stats.vfmprc);
Greg Rose92915f72010-01-09 02:24:10 +00002002}
2003
2004/**
2005 * ixgbevf_watchdog - Timer Call-back
2006 * @data: pointer to adapter cast into an unsigned long
2007 **/
2008static void ixgbevf_watchdog(unsigned long data)
2009{
2010 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2011 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002012 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002013 int i;
2014
2015 /*
2016 * Do the watchdog outside of interrupt context due to the lovely
2017 * delays that some of the newer hardware requires
2018 */
2019
2020 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2021 goto watchdog_short_circuit;
2022
2023 /* get one bit for every active tx/rx interrupt vector */
2024 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2025 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +00002026 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002027 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002028 }
2029
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002030 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002031
2032watchdog_short_circuit:
2033 schedule_work(&adapter->watchdog_task);
2034}
2035
2036/**
2037 * ixgbevf_tx_timeout - Respond to a Tx Hang
2038 * @netdev: network interface device structure
2039 **/
2040static void ixgbevf_tx_timeout(struct net_device *netdev)
2041{
2042 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2043
2044 /* Do the reset outside of interrupt context */
2045 schedule_work(&adapter->reset_task);
2046}
2047
2048static void ixgbevf_reset_task(struct work_struct *work)
2049{
2050 struct ixgbevf_adapter *adapter;
2051 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2052
2053 /* If we're already down or resetting, just bail */
2054 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2055 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2056 return;
2057
2058 adapter->tx_timeout_count++;
2059
2060 ixgbevf_reinit_locked(adapter);
2061}
2062
2063/**
2064 * ixgbevf_watchdog_task - worker thread to bring link up
2065 * @work: pointer to work_struct containing our data
2066 **/
2067static void ixgbevf_watchdog_task(struct work_struct *work)
2068{
2069 struct ixgbevf_adapter *adapter = container_of(work,
2070 struct ixgbevf_adapter,
2071 watchdog_task);
2072 struct net_device *netdev = adapter->netdev;
2073 struct ixgbe_hw *hw = &adapter->hw;
2074 u32 link_speed = adapter->link_speed;
2075 bool link_up = adapter->link_up;
2076
2077 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2078
2079 /*
2080 * Always check the link on the watchdog because we have
2081 * no LSC interrupt
2082 */
2083 if (hw->mac.ops.check_link) {
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002084 s32 need_reset;
2085
2086 spin_lock(&adapter->mbx_lock);
2087
2088 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2089 &link_up, false);
2090
2091 spin_unlock(&adapter->mbx_lock);
2092
2093 if (need_reset) {
Greg Rose92915f72010-01-09 02:24:10 +00002094 adapter->link_up = link_up;
2095 adapter->link_speed = link_speed;
Greg Roseda6b3332010-01-22 22:47:37 +00002096 netif_carrier_off(netdev);
2097 netif_tx_stop_all_queues(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002098 schedule_work(&adapter->reset_task);
2099 goto pf_has_reset;
2100 }
2101 } else {
2102 /* always assume link is up, if no check link
2103 * function */
2104 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2105 link_up = true;
2106 }
2107 adapter->link_up = link_up;
2108 adapter->link_speed = link_speed;
2109
2110 if (link_up) {
2111 if (!netif_carrier_ok(netdev)) {
Joe Perches300bc062010-03-22 20:08:04 -07002112 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2113 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2114 10 : 1);
Greg Rose92915f72010-01-09 02:24:10 +00002115 netif_carrier_on(netdev);
2116 netif_tx_wake_all_queues(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002117 }
2118 } else {
2119 adapter->link_up = false;
2120 adapter->link_speed = 0;
2121 if (netif_carrier_ok(netdev)) {
2122 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2123 netif_carrier_off(netdev);
2124 netif_tx_stop_all_queues(netdev);
2125 }
2126 }
2127
Greg Rose92915f72010-01-09 02:24:10 +00002128 ixgbevf_update_stats(adapter);
2129
Greg Rose33bd9f62010-03-19 02:59:52 +00002130pf_has_reset:
Greg Rose92915f72010-01-09 02:24:10 +00002131 /* Reset the timer */
2132 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2133 mod_timer(&adapter->watchdog_timer,
2134 round_jiffies(jiffies + (2 * HZ)));
2135
2136 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2137}
2138
2139/**
2140 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2141 * @adapter: board private structure
2142 * @tx_ring: Tx descriptor ring for a specific queue
2143 *
2144 * Free all transmit software resources
2145 **/
2146void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2147 struct ixgbevf_ring *tx_ring)
2148{
2149 struct pci_dev *pdev = adapter->pdev;
2150
Greg Rose92915f72010-01-09 02:24:10 +00002151 ixgbevf_clean_tx_ring(adapter, tx_ring);
2152
2153 vfree(tx_ring->tx_buffer_info);
2154 tx_ring->tx_buffer_info = NULL;
2155
Nick Nunley2a1f8792010-04-27 13:10:50 +00002156 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2157 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002158
2159 tx_ring->desc = NULL;
2160}
2161
2162/**
2163 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2164 * @adapter: board private structure
2165 *
2166 * Free all transmit software resources
2167 **/
2168static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2169{
2170 int i;
2171
2172 for (i = 0; i < adapter->num_tx_queues; i++)
2173 if (adapter->tx_ring[i].desc)
2174 ixgbevf_free_tx_resources(adapter,
2175 &adapter->tx_ring[i]);
2176
2177}
2178
2179/**
2180 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2181 * @adapter: board private structure
2182 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2183 *
2184 * Return 0 on success, negative on failure
2185 **/
2186int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2187 struct ixgbevf_ring *tx_ring)
2188{
2189 struct pci_dev *pdev = adapter->pdev;
2190 int size;
2191
2192 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002193 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002194 if (!tx_ring->tx_buffer_info)
2195 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002196
2197 /* round up to nearest 4K */
2198 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2199 tx_ring->size = ALIGN(tx_ring->size, 4096);
2200
Nick Nunley2a1f8792010-04-27 13:10:50 +00002201 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2202 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002203 if (!tx_ring->desc)
2204 goto err;
2205
2206 tx_ring->next_to_use = 0;
2207 tx_ring->next_to_clean = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002208 return 0;
2209
2210err:
2211 vfree(tx_ring->tx_buffer_info);
2212 tx_ring->tx_buffer_info = NULL;
2213 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2214 "descriptor ring\n");
2215 return -ENOMEM;
2216}
2217
2218/**
2219 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2220 * @adapter: board private structure
2221 *
2222 * If this function returns with an error, then it's possible one or
2223 * more of the rings is populated (while the rest are not). It is the
2224 * callers duty to clean those orphaned rings.
2225 *
2226 * Return 0 on success, negative on failure
2227 **/
2228static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2229{
2230 int i, err = 0;
2231
2232 for (i = 0; i < adapter->num_tx_queues; i++) {
2233 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2234 if (!err)
2235 continue;
2236 hw_dbg(&adapter->hw,
2237 "Allocation for Tx Queue %u failed\n", i);
2238 break;
2239 }
2240
2241 return err;
2242}
2243
2244/**
2245 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2246 * @adapter: board private structure
2247 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2248 *
2249 * Returns 0 on success, negative on failure
2250 **/
2251int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2252 struct ixgbevf_ring *rx_ring)
2253{
2254 struct pci_dev *pdev = adapter->pdev;
2255 int size;
2256
2257 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002258 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00002259 if (!rx_ring->rx_buffer_info)
Greg Rose92915f72010-01-09 02:24:10 +00002260 goto alloc_failed;
Greg Rose92915f72010-01-09 02:24:10 +00002261
2262 /* Round up to nearest 4K */
2263 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2264 rx_ring->size = ALIGN(rx_ring->size, 4096);
2265
Nick Nunley2a1f8792010-04-27 13:10:50 +00002266 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2267 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002268
2269 if (!rx_ring->desc) {
2270 hw_dbg(&adapter->hw,
2271 "Unable to allocate memory for "
2272 "the receive descriptor ring\n");
2273 vfree(rx_ring->rx_buffer_info);
2274 rx_ring->rx_buffer_info = NULL;
2275 goto alloc_failed;
2276 }
2277
2278 rx_ring->next_to_clean = 0;
2279 rx_ring->next_to_use = 0;
2280
2281 return 0;
2282alloc_failed:
2283 return -ENOMEM;
2284}
2285
2286/**
2287 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2288 * @adapter: board private structure
2289 *
2290 * If this function returns with an error, then it's possible one or
2291 * more of the rings is populated (while the rest are not). It is the
2292 * callers duty to clean those orphaned rings.
2293 *
2294 * Return 0 on success, negative on failure
2295 **/
2296static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2297{
2298 int i, err = 0;
2299
2300 for (i = 0; i < adapter->num_rx_queues; i++) {
2301 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2302 if (!err)
2303 continue;
2304 hw_dbg(&adapter->hw,
2305 "Allocation for Rx Queue %u failed\n", i);
2306 break;
2307 }
2308 return err;
2309}
2310
2311/**
2312 * ixgbevf_free_rx_resources - Free Rx Resources
2313 * @adapter: board private structure
2314 * @rx_ring: ring to clean the resources from
2315 *
2316 * Free all receive software resources
2317 **/
2318void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2319 struct ixgbevf_ring *rx_ring)
2320{
2321 struct pci_dev *pdev = adapter->pdev;
2322
2323 ixgbevf_clean_rx_ring(adapter, rx_ring);
2324
2325 vfree(rx_ring->rx_buffer_info);
2326 rx_ring->rx_buffer_info = NULL;
2327
Nick Nunley2a1f8792010-04-27 13:10:50 +00002328 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2329 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002330
2331 rx_ring->desc = NULL;
2332}
2333
2334/**
2335 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2336 * @adapter: board private structure
2337 *
2338 * Free all receive software resources
2339 **/
2340static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2341{
2342 int i;
2343
2344 for (i = 0; i < adapter->num_rx_queues; i++)
2345 if (adapter->rx_ring[i].desc)
2346 ixgbevf_free_rx_resources(adapter,
2347 &adapter->rx_ring[i]);
2348}
2349
2350/**
2351 * ixgbevf_open - Called when a network interface is made active
2352 * @netdev: network interface device structure
2353 *
2354 * Returns 0 on success, negative value on failure
2355 *
2356 * The open entry point is called when a network interface is made
2357 * active by the system (IFF_UP). At this point all resources needed
2358 * for transmit and receive operations are allocated, the interrupt
2359 * handler is registered with the OS, the watchdog timer is started,
2360 * and the stack is notified that the interface is ready.
2361 **/
2362static int ixgbevf_open(struct net_device *netdev)
2363{
2364 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2365 struct ixgbe_hw *hw = &adapter->hw;
2366 int err;
2367
2368 /* disallow open during test */
2369 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2370 return -EBUSY;
2371
2372 if (hw->adapter_stopped) {
2373 ixgbevf_reset(adapter);
2374 /* if adapter is still stopped then PF isn't up and
2375 * the vf can't start. */
2376 if (hw->adapter_stopped) {
2377 err = IXGBE_ERR_MBX;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002378 pr_err("Unable to start - perhaps the PF Driver isn't "
2379 "up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00002380 goto err_setup_reset;
2381 }
2382 }
2383
Alexander Duyck31186782012-07-20 08:09:58 +00002384 ixgbevf_negotiate_api(adapter);
2385
Greg Rose92915f72010-01-09 02:24:10 +00002386 /* allocate transmit descriptors */
2387 err = ixgbevf_setup_all_tx_resources(adapter);
2388 if (err)
2389 goto err_setup_tx;
2390
2391 /* allocate receive descriptors */
2392 err = ixgbevf_setup_all_rx_resources(adapter);
2393 if (err)
2394 goto err_setup_rx;
2395
2396 ixgbevf_configure(adapter);
2397
2398 /*
2399 * Map the Tx/Rx rings to the vectors we were allotted.
2400 * if request_irq will be called in this function map_rings
2401 * must be called *before* up_complete
2402 */
2403 ixgbevf_map_rings_to_vectors(adapter);
2404
Greg Rose795180d2012-04-17 04:29:34 +00002405 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002406
2407 /* clear any pending interrupts, may auto mask */
2408 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2409 err = ixgbevf_request_irq(adapter);
2410 if (err)
2411 goto err_req_irq;
2412
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002413 ixgbevf_irq_enable(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002414
2415 return 0;
2416
2417err_req_irq:
2418 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002419 ixgbevf_free_irq(adapter);
2420err_setup_rx:
2421 ixgbevf_free_all_rx_resources(adapter);
2422err_setup_tx:
2423 ixgbevf_free_all_tx_resources(adapter);
2424 ixgbevf_reset(adapter);
2425
2426err_setup_reset:
2427
2428 return err;
2429}
2430
2431/**
2432 * ixgbevf_close - Disables a network interface
2433 * @netdev: network interface device structure
2434 *
2435 * Returns 0, this is not allowed to fail
2436 *
2437 * The close entry point is called when an interface is de-activated
2438 * by the OS. The hardware is still under the drivers control, but
2439 * needs to be disabled. A global MAC reset is issued to stop the
2440 * hardware, and all transmit and receive resources are freed.
2441 **/
2442static int ixgbevf_close(struct net_device *netdev)
2443{
2444 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2445
2446 ixgbevf_down(adapter);
2447 ixgbevf_free_irq(adapter);
2448
2449 ixgbevf_free_all_tx_resources(adapter);
2450 ixgbevf_free_all_rx_resources(adapter);
2451
2452 return 0;
2453}
2454
Alexander Duyck70a10e22012-05-11 08:33:21 +00002455static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2456 u32 vlan_macip_lens, u32 type_tucmd,
2457 u32 mss_l4len_idx)
2458{
2459 struct ixgbe_adv_tx_context_desc *context_desc;
2460 u16 i = tx_ring->next_to_use;
2461
2462 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2463
2464 i++;
2465 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2466
2467 /* set bits to identify this as an advanced context descriptor */
2468 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2469
2470 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2471 context_desc->seqnum_seed = 0;
2472 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2473 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2474}
2475
2476static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002477 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2478{
Alexander Duyck70a10e22012-05-11 08:33:21 +00002479 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00002480 u32 mss_l4len_idx, l4len;
2481
Alexander Duyck70a10e22012-05-11 08:33:21 +00002482 if (!skb_is_gso(skb))
2483 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002484
Alexander Duyck70a10e22012-05-11 08:33:21 +00002485 if (skb_header_cloned(skb)) {
2486 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2487 if (err)
2488 return err;
Greg Rose92915f72010-01-09 02:24:10 +00002489 }
2490
Alexander Duyck70a10e22012-05-11 08:33:21 +00002491 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2492 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2493
2494 if (skb->protocol == htons(ETH_P_IP)) {
2495 struct iphdr *iph = ip_hdr(skb);
2496 iph->tot_len = 0;
2497 iph->check = 0;
2498 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2499 iph->daddr, 0,
2500 IPPROTO_TCP,
2501 0);
2502 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2503 } else if (skb_is_gso_v6(skb)) {
2504 ipv6_hdr(skb)->payload_len = 0;
2505 tcp_hdr(skb)->check =
2506 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2507 &ipv6_hdr(skb)->daddr,
2508 0, IPPROTO_TCP, 0);
2509 }
2510
2511 /* compute header lengths */
2512 l4len = tcp_hdrlen(skb);
2513 *hdr_len += l4len;
2514 *hdr_len = skb_transport_offset(skb) + l4len;
2515
2516 /* mss_l4len_id: use 1 as index for TSO */
2517 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2518 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2519 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2520
2521 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2522 vlan_macip_lens = skb_network_header_len(skb);
2523 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2524 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2525
2526 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2527 type_tucmd, mss_l4len_idx);
2528
2529 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00002530}
2531
Alexander Duyck70a10e22012-05-11 08:33:21 +00002532static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002533 struct sk_buff *skb, u32 tx_flags)
2534{
Greg Rose92915f72010-01-09 02:24:10 +00002535
Greg Rose92915f72010-01-09 02:24:10 +00002536
Greg Rose92915f72010-01-09 02:24:10 +00002537
Alexander Duyck70a10e22012-05-11 08:33:21 +00002538 u32 vlan_macip_lens = 0;
2539 u32 mss_l4len_idx = 0;
2540 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002541
Alexander Duyck70a10e22012-05-11 08:33:21 +00002542 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2543 u8 l4_hdr = 0;
2544 switch (skb->protocol) {
2545 case __constant_htons(ETH_P_IP):
2546 vlan_macip_lens |= skb_network_header_len(skb);
2547 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2548 l4_hdr = ip_hdr(skb)->protocol;
2549 break;
2550 case __constant_htons(ETH_P_IPV6):
2551 vlan_macip_lens |= skb_network_header_len(skb);
2552 l4_hdr = ipv6_hdr(skb)->nexthdr;
2553 break;
2554 default:
2555 if (unlikely(net_ratelimit())) {
2556 dev_warn(tx_ring->dev,
2557 "partial checksum but proto=%x!\n",
2558 skb->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00002559 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00002560 break;
Greg Rose92915f72010-01-09 02:24:10 +00002561 }
2562
Alexander Duyck70a10e22012-05-11 08:33:21 +00002563 switch (l4_hdr) {
2564 case IPPROTO_TCP:
2565 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2566 mss_l4len_idx = tcp_hdrlen(skb) <<
2567 IXGBE_ADVTXD_L4LEN_SHIFT;
2568 break;
2569 case IPPROTO_SCTP:
2570 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2571 mss_l4len_idx = sizeof(struct sctphdr) <<
2572 IXGBE_ADVTXD_L4LEN_SHIFT;
2573 break;
2574 case IPPROTO_UDP:
2575 mss_l4len_idx = sizeof(struct udphdr) <<
2576 IXGBE_ADVTXD_L4LEN_SHIFT;
2577 break;
2578 default:
2579 if (unlikely(net_ratelimit())) {
2580 dev_warn(tx_ring->dev,
2581 "partial checksum but l4 proto=%x!\n",
2582 l4_hdr);
2583 }
2584 break;
2585 }
Greg Rose92915f72010-01-09 02:24:10 +00002586 }
2587
Alexander Duyck70a10e22012-05-11 08:33:21 +00002588 /* vlan_macip_lens: MACLEN, VLAN tag */
2589 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2590 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2591
2592 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2593 type_tucmd, mss_l4len_idx);
2594
2595 return (skb->ip_summed == CHECKSUM_PARTIAL);
Greg Rose92915f72010-01-09 02:24:10 +00002596}
2597
Alexander Duyck70a10e22012-05-11 08:33:21 +00002598static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
Greg Rose92915f72010-01-09 02:24:10 +00002599 struct sk_buff *skb, u32 tx_flags,
2600 unsigned int first)
2601{
Greg Rose92915f72010-01-09 02:24:10 +00002602 struct ixgbevf_tx_buffer *tx_buffer_info;
2603 unsigned int len;
2604 unsigned int total = skb->len;
Kulikov Vasiliy2540ddb2010-07-15 08:45:57 +00002605 unsigned int offset = 0, size;
2606 int count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002607 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2608 unsigned int f;
Greg Rose65deeed2010-03-24 09:35:42 +00002609 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002610
2611 i = tx_ring->next_to_use;
2612
2613 len = min(skb_headlen(skb), total);
2614 while (len) {
2615 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2616 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2617
2618 tx_buffer_info->length = size;
2619 tx_buffer_info->mapped_as_page = false;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002620 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
Greg Rose92915f72010-01-09 02:24:10 +00002621 skb->data + offset,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002622 size, DMA_TO_DEVICE);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002623 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002624 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00002625 tx_buffer_info->next_to_watch = i;
2626
2627 len -= size;
2628 total -= size;
2629 offset += size;
2630 count++;
2631 i++;
2632 if (i == tx_ring->count)
2633 i = 0;
2634 }
2635
2636 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002637 const struct skb_frag_struct *frag;
Greg Rose92915f72010-01-09 02:24:10 +00002638
2639 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002640 len = min((unsigned int)skb_frag_size(frag), total);
Ian Campbell877749b2011-08-29 23:18:26 +00002641 offset = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002642
2643 while (len) {
2644 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2645 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2646
2647 tx_buffer_info->length = size;
Ian Campbell877749b2011-08-29 23:18:26 +00002648 tx_buffer_info->dma =
Alexander Duyck70a10e22012-05-11 08:33:21 +00002649 skb_frag_dma_map(tx_ring->dev, frag,
Ian Campbell877749b2011-08-29 23:18:26 +00002650 offset, size, DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +00002651 tx_buffer_info->mapped_as_page = true;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002652 if (dma_mapping_error(tx_ring->dev,
2653 tx_buffer_info->dma))
Greg Rose92915f72010-01-09 02:24:10 +00002654 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00002655 tx_buffer_info->next_to_watch = i;
2656
2657 len -= size;
2658 total -= size;
2659 offset += size;
2660 count++;
2661 i++;
2662 if (i == tx_ring->count)
2663 i = 0;
2664 }
2665 if (total == 0)
2666 break;
2667 }
2668
2669 if (i == 0)
2670 i = tx_ring->count - 1;
2671 else
2672 i = i - 1;
2673 tx_ring->tx_buffer_info[i].skb = skb;
2674 tx_ring->tx_buffer_info[first].next_to_watch = i;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002675 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00002676
2677 return count;
2678
2679dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00002680 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00002681
2682 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2683 tx_buffer_info->dma = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002684 tx_buffer_info->next_to_watch = 0;
2685 count--;
2686
2687 /* clear timestamp and dma mappings for remaining portion of packet */
2688 while (count >= 0) {
2689 count--;
2690 i--;
2691 if (i < 0)
2692 i += tx_ring->count;
2693 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002694 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002695 }
2696
2697 return count;
2698}
2699
Alexander Duyck70a10e22012-05-11 08:33:21 +00002700static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
Greg Rose92915f72010-01-09 02:24:10 +00002701 int count, u32 paylen, u8 hdr_len)
2702{
2703 union ixgbe_adv_tx_desc *tx_desc = NULL;
2704 struct ixgbevf_tx_buffer *tx_buffer_info;
2705 u32 olinfo_status = 0, cmd_type_len = 0;
2706 unsigned int i;
2707
2708 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2709
2710 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2711
2712 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2713
2714 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2715 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2716
Alexander Duyck70a10e22012-05-11 08:33:21 +00002717 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2718 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2719
Greg Rose92915f72010-01-09 02:24:10 +00002720 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2721 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2722
Greg Rose92915f72010-01-09 02:24:10 +00002723 /* use index 1 context for tso */
2724 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2725 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
Alexander Duyck70a10e22012-05-11 08:33:21 +00002726 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
Greg Rose92915f72010-01-09 02:24:10 +00002727
Alexander Duyck70a10e22012-05-11 08:33:21 +00002728 }
2729
2730 /*
2731 * Check Context must be set if Tx switch is enabled, which it
2732 * always is for case where virtual functions are running
2733 */
2734 olinfo_status |= IXGBE_ADVTXD_CC;
Greg Rose92915f72010-01-09 02:24:10 +00002735
2736 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2737
2738 i = tx_ring->next_to_use;
2739 while (count--) {
2740 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck908421f2012-05-11 08:33:00 +00002741 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00002742 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2743 tx_desc->read.cmd_type_len =
2744 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2745 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2746 i++;
2747 if (i == tx_ring->count)
2748 i = 0;
2749 }
2750
2751 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2752
Greg Rose92915f72010-01-09 02:24:10 +00002753 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00002754}
2755
Alexander Duyckfb401952012-05-11 08:33:16 +00002756static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00002757{
Alexander Duyckfb401952012-05-11 08:33:16 +00002758 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002759
Alexander Duyckfb401952012-05-11 08:33:16 +00002760 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00002761 /* Herbert's original patch had:
2762 * smp_mb__after_netif_stop_queue();
2763 * but since that doesn't exist yet, just open code it. */
2764 smp_mb();
2765
2766 /* We need to check again in a case another CPU has just
2767 * made room available. */
2768 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2769 return -EBUSY;
2770
2771 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00002772 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00002773 ++adapter->restart_queue;
2774 return 0;
2775}
2776
Alexander Duyckfb401952012-05-11 08:33:16 +00002777static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00002778{
2779 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2780 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00002781 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00002782}
2783
2784static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2785{
2786 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2787 struct ixgbevf_ring *tx_ring;
2788 unsigned int first;
2789 unsigned int tx_flags = 0;
2790 u8 hdr_len = 0;
2791 int r_idx = 0, tso;
Alexander Duyck35959902012-05-11 08:32:40 +00002792 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2793#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2794 unsigned short f;
2795#endif
Greg Rose92915f72010-01-09 02:24:10 +00002796
2797 tx_ring = &adapter->tx_ring[r_idx];
2798
Alexander Duyck35959902012-05-11 08:32:40 +00002799 /*
2800 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2801 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2802 * + 2 desc gap to keep tail from touching head,
2803 * + 1 desc for context descriptor,
2804 * otherwise try next time
2805 */
2806#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2807 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2808 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2809#else
2810 count += skb_shinfo(skb)->nr_frags;
2811#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00002812 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Alexander Duyck35959902012-05-11 08:32:40 +00002813 adapter->tx_busy++;
2814 return NETDEV_TX_BUSY;
2815 }
2816
Jesse Grosseab6d182010-10-20 13:56:03 +00002817 if (vlan_tx_tag_present(skb)) {
Greg Rose92915f72010-01-09 02:24:10 +00002818 tx_flags |= vlan_tx_tag_get(skb);
2819 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2820 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2821 }
2822
Greg Rose92915f72010-01-09 02:24:10 +00002823 first = tx_ring->next_to_use;
2824
2825 if (skb->protocol == htons(ETH_P_IP))
2826 tx_flags |= IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00002827 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002828 if (tso < 0) {
2829 dev_kfree_skb_any(skb);
2830 return NETDEV_TX_OK;
2831 }
2832
2833 if (tso)
Alexander Duyck70a10e22012-05-11 08:33:21 +00002834 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2835 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
Greg Rose92915f72010-01-09 02:24:10 +00002836 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2837
Alexander Duyck70a10e22012-05-11 08:33:21 +00002838 ixgbevf_tx_queue(tx_ring, tx_flags,
2839 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
Greg Rose92915f72010-01-09 02:24:10 +00002840 skb->len, hdr_len);
Alexander Duyck70a10e22012-05-11 08:33:21 +00002841 /*
2842 * Force memory writes to complete before letting h/w
2843 * know there are new descriptors to fetch. (Only
2844 * applicable for weak-ordered memory model archs,
2845 * such as IA-64).
2846 */
2847 wmb();
2848
2849 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
Greg Rose92915f72010-01-09 02:24:10 +00002850
Alexander Duyckfb401952012-05-11 08:33:16 +00002851 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00002852
2853 return NETDEV_TX_OK;
2854}
2855
2856/**
Greg Rose92915f72010-01-09 02:24:10 +00002857 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
2858 * @netdev: network interface device structure
2859 * @p: pointer to an address structure
2860 *
2861 * Returns 0 on success, negative on failure
2862 **/
2863static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2864{
2865 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2866 struct ixgbe_hw *hw = &adapter->hw;
2867 struct sockaddr *addr = p;
2868
2869 if (!is_valid_ether_addr(addr->sa_data))
2870 return -EADDRNOTAVAIL;
2871
2872 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2873 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2874
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002875 spin_lock(&adapter->mbx_lock);
2876
Greg Rose92915f72010-01-09 02:24:10 +00002877 if (hw->mac.ops.set_rar)
2878 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2879
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002880 spin_unlock(&adapter->mbx_lock);
2881
Greg Rose92915f72010-01-09 02:24:10 +00002882 return 0;
2883}
2884
2885/**
2886 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
2887 * @netdev: network interface device structure
2888 * @new_mtu: new value for maximum frame size
2889 *
2890 * Returns 0 on success, negative on failure
2891 **/
2892static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2893{
2894 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2895 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00002896 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00002897
2898 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2899 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Greg Rose92915f72010-01-09 02:24:10 +00002900
2901 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00002902 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00002903 return -EINVAL;
2904
2905 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2906 netdev->mtu, new_mtu);
2907 /* must set new MTU before calling down or up */
2908 netdev->mtu = new_mtu;
2909
2910 if (netif_running(netdev))
2911 ixgbevf_reinit_locked(adapter);
2912
2913 return 0;
2914}
2915
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002916static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00002917{
2918 struct net_device *netdev = pci_get_drvdata(pdev);
2919 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002920#ifdef CONFIG_PM
2921 int retval = 0;
2922#endif
Greg Rose92915f72010-01-09 02:24:10 +00002923
2924 netif_device_detach(netdev);
2925
2926 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002927 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00002928 ixgbevf_down(adapter);
2929 ixgbevf_free_irq(adapter);
2930 ixgbevf_free_all_tx_resources(adapter);
2931 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002932 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00002933 }
2934
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002935 ixgbevf_clear_interrupt_scheme(adapter);
2936
2937#ifdef CONFIG_PM
2938 retval = pci_save_state(pdev);
2939 if (retval)
2940 return retval;
2941
2942#endif
2943 pci_disable_device(pdev);
2944
2945 return 0;
2946}
2947
2948#ifdef CONFIG_PM
2949static int ixgbevf_resume(struct pci_dev *pdev)
2950{
2951 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2952 struct net_device *netdev = adapter->netdev;
2953 u32 err;
2954
2955 pci_set_power_state(pdev, PCI_D0);
2956 pci_restore_state(pdev);
2957 /*
2958 * pci_restore_state clears dev->state_saved so call
2959 * pci_save_state to restore it.
2960 */
Greg Rose92915f72010-01-09 02:24:10 +00002961 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00002962
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002963 err = pci_enable_device_mem(pdev);
2964 if (err) {
2965 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2966 return err;
2967 }
2968 pci_set_master(pdev);
2969
2970 rtnl_lock();
2971 err = ixgbevf_init_interrupt_scheme(adapter);
2972 rtnl_unlock();
2973 if (err) {
2974 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
2975 return err;
2976 }
2977
2978 ixgbevf_reset(adapter);
2979
2980 if (netif_running(netdev)) {
2981 err = ixgbevf_open(netdev);
2982 if (err)
2983 return err;
2984 }
2985
2986 netif_device_attach(netdev);
2987
2988 return err;
2989}
2990
2991#endif /* CONFIG_PM */
2992static void ixgbevf_shutdown(struct pci_dev *pdev)
2993{
2994 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00002995}
2996
Eric Dumazet4197aa72011-06-22 05:01:35 +00002997static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2998 struct rtnl_link_stats64 *stats)
2999{
3000 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3001 unsigned int start;
3002 u64 bytes, packets;
3003 const struct ixgbevf_ring *ring;
3004 int i;
3005
3006 ixgbevf_update_stats(adapter);
3007
3008 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3009
3010 for (i = 0; i < adapter->num_rx_queues; i++) {
3011 ring = &adapter->rx_ring[i];
3012 do {
3013 start = u64_stats_fetch_begin_bh(&ring->syncp);
3014 bytes = ring->total_bytes;
3015 packets = ring->total_packets;
3016 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3017 stats->rx_bytes += bytes;
3018 stats->rx_packets += packets;
3019 }
3020
3021 for (i = 0; i < adapter->num_tx_queues; i++) {
3022 ring = &adapter->tx_ring[i];
3023 do {
3024 start = u64_stats_fetch_begin_bh(&ring->syncp);
3025 bytes = ring->total_bytes;
3026 packets = ring->total_packets;
3027 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3028 stats->tx_bytes += bytes;
3029 stats->tx_packets += packets;
3030 }
3031
3032 return stats;
3033}
3034
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003035static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003036 .ndo_open = ixgbevf_open,
3037 .ndo_stop = ixgbevf_close,
3038 .ndo_start_xmit = ixgbevf_xmit_frame,
3039 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003040 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003041 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003042 .ndo_set_mac_address = ixgbevf_set_mac,
3043 .ndo_change_mtu = ixgbevf_change_mtu,
3044 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003045 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3046 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Greg Rose92915f72010-01-09 02:24:10 +00003047};
Greg Rose92915f72010-01-09 02:24:10 +00003048
3049static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3050{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003051 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003052 ixgbevf_set_ethtool_ops(dev);
3053 dev->watchdog_timeo = 5 * HZ;
3054}
3055
3056/**
3057 * ixgbevf_probe - Device Initialization Routine
3058 * @pdev: PCI device information struct
3059 * @ent: entry in ixgbevf_pci_tbl
3060 *
3061 * Returns 0 on success, negative on failure
3062 *
3063 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3064 * The OS initialization, configuring of the adapter private structure,
3065 * and a hardware reset occur.
3066 **/
3067static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3068 const struct pci_device_id *ent)
3069{
3070 struct net_device *netdev;
3071 struct ixgbevf_adapter *adapter = NULL;
3072 struct ixgbe_hw *hw = NULL;
3073 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3074 static int cards_found;
3075 int err, pci_using_dac;
3076
3077 err = pci_enable_device(pdev);
3078 if (err)
3079 return err;
3080
Nick Nunley2a1f8792010-04-27 13:10:50 +00003081 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3082 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003083 pci_using_dac = 1;
3084 } else {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003085 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003086 if (err) {
Nick Nunley2a1f8792010-04-27 13:10:50 +00003087 err = dma_set_coherent_mask(&pdev->dev,
3088 DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003089 if (err) {
3090 dev_err(&pdev->dev, "No usable DMA "
3091 "configuration, aborting\n");
3092 goto err_dma;
3093 }
3094 }
3095 pci_using_dac = 0;
3096 }
3097
3098 err = pci_request_regions(pdev, ixgbevf_driver_name);
3099 if (err) {
3100 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3101 goto err_pci_reg;
3102 }
3103
3104 pci_set_master(pdev);
3105
Greg Rose92915f72010-01-09 02:24:10 +00003106 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3107 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003108 if (!netdev) {
3109 err = -ENOMEM;
3110 goto err_alloc_etherdev;
3111 }
3112
3113 SET_NETDEV_DEV(netdev, &pdev->dev);
3114
3115 pci_set_drvdata(pdev, netdev);
3116 adapter = netdev_priv(netdev);
3117
3118 adapter->netdev = netdev;
3119 adapter->pdev = pdev;
3120 hw = &adapter->hw;
3121 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003122 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003123
3124 /*
3125 * call save state here in standalone driver because it relies on
3126 * adapter struct to exist, and needs to call netdev_priv
3127 */
3128 pci_save_state(pdev);
3129
3130 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3131 pci_resource_len(pdev, 0));
3132 if (!hw->hw_addr) {
3133 err = -EIO;
3134 goto err_ioremap;
3135 }
3136
3137 ixgbevf_assign_netdev_ops(netdev);
3138
3139 adapter->bd_number = cards_found;
3140
3141 /* Setup hw api */
3142 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3143 hw->mac.type = ii->mac;
3144
3145 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003146 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003147
Greg Rose92915f72010-01-09 02:24:10 +00003148 /* setup the private structure */
3149 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003150 if (err)
3151 goto err_sw_init;
3152
3153 /* The HW MAC address was set and/or determined in sw_init */
3154 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3155
3156 if (!is_valid_ether_addr(netdev->dev_addr)) {
3157 pr_err("invalid MAC address\n");
3158 err = -EIO;
3159 goto err_sw_init;
3160 }
Greg Rose92915f72010-01-09 02:24:10 +00003161
Michał Mirosław471a76d2011-06-08 08:53:03 +00003162 netdev->hw_features = NETIF_F_SG |
Greg Rose92915f72010-01-09 02:24:10 +00003163 NETIF_F_IP_CSUM |
Michał Mirosław471a76d2011-06-08 08:53:03 +00003164 NETIF_F_IPV6_CSUM |
3165 NETIF_F_TSO |
3166 NETIF_F_TSO6 |
3167 NETIF_F_RXCSUM;
3168
3169 netdev->features = netdev->hw_features |
Greg Rose92915f72010-01-09 02:24:10 +00003170 NETIF_F_HW_VLAN_TX |
3171 NETIF_F_HW_VLAN_RX |
3172 NETIF_F_HW_VLAN_FILTER;
3173
Greg Rose92915f72010-01-09 02:24:10 +00003174 netdev->vlan_features |= NETIF_F_TSO;
3175 netdev->vlan_features |= NETIF_F_TSO6;
3176 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyck3bfacf92010-08-02 14:59:04 +00003177 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003178 netdev->vlan_features |= NETIF_F_SG;
3179
3180 if (pci_using_dac)
3181 netdev->features |= NETIF_F_HIGHDMA;
3182
Jiri Pirko01789342011-08-16 06:29:00 +00003183 netdev->priv_flags |= IFF_UNICAST_FLT;
3184
Greg Rose92915f72010-01-09 02:24:10 +00003185 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00003186 adapter->watchdog_timer.function = ixgbevf_watchdog;
Greg Rose92915f72010-01-09 02:24:10 +00003187 adapter->watchdog_timer.data = (unsigned long)adapter;
3188
3189 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3190 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3191
3192 err = ixgbevf_init_interrupt_scheme(adapter);
3193 if (err)
3194 goto err_sw_init;
3195
3196 /* pick up the PCI bus settings for reporting later */
3197 if (hw->mac.ops.get_bus_info)
3198 hw->mac.ops.get_bus_info(hw);
3199
Greg Rose92915f72010-01-09 02:24:10 +00003200 strcpy(netdev->name, "eth%d");
3201
3202 err = register_netdev(netdev);
3203 if (err)
3204 goto err_register;
3205
Greg Rose5d426ad2010-11-16 19:27:19 -08003206 netif_carrier_off(netdev);
3207
Greg Rose33bd9f62010-03-19 02:59:52 +00003208 ixgbevf_init_last_counter_stats(adapter);
3209
Greg Rose92915f72010-01-09 02:24:10 +00003210 /* print the MAC address */
Danny Kukawkaf794e7e2012-02-24 03:45:56 +00003211 hw_dbg(hw, "%pM\n", netdev->dev_addr);
Greg Rose92915f72010-01-09 02:24:10 +00003212
3213 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3214
Greg Rose92915f72010-01-09 02:24:10 +00003215 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3216 cards_found++;
3217 return 0;
3218
3219err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003220 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003221err_sw_init:
3222 ixgbevf_reset_interrupt_capability(adapter);
3223 iounmap(hw->hw_addr);
3224err_ioremap:
3225 free_netdev(netdev);
3226err_alloc_etherdev:
3227 pci_release_regions(pdev);
3228err_pci_reg:
3229err_dma:
3230 pci_disable_device(pdev);
3231 return err;
3232}
3233
3234/**
3235 * ixgbevf_remove - Device Removal Routine
3236 * @pdev: PCI device information struct
3237 *
3238 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3239 * that it should release a PCI device. The could be caused by a
3240 * Hot-Plug event, or because the driver is going to be removed from
3241 * memory.
3242 **/
3243static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3244{
3245 struct net_device *netdev = pci_get_drvdata(pdev);
3246 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3247
3248 set_bit(__IXGBEVF_DOWN, &adapter->state);
3249
3250 del_timer_sync(&adapter->watchdog_timer);
3251
Tejun Heo23f333a2010-12-12 16:45:14 +01003252 cancel_work_sync(&adapter->reset_task);
Greg Rose92915f72010-01-09 02:24:10 +00003253 cancel_work_sync(&adapter->watchdog_task);
3254
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00003255 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00003256 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003257
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003258 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003259 ixgbevf_reset_interrupt_capability(adapter);
3260
3261 iounmap(adapter->hw.hw_addr);
3262 pci_release_regions(pdev);
3263
3264 hw_dbg(&adapter->hw, "Remove complete\n");
3265
3266 kfree(adapter->tx_ring);
3267 kfree(adapter->rx_ring);
3268
3269 free_netdev(netdev);
3270
3271 pci_disable_device(pdev);
3272}
3273
Alexander Duyck9f19f312012-05-11 08:33:32 +00003274/**
3275 * ixgbevf_io_error_detected - called when PCI error is detected
3276 * @pdev: Pointer to PCI device
3277 * @state: The current pci connection state
3278 *
3279 * This function is called after a PCI bus error affecting
3280 * this device has been detected.
3281 */
3282static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3283 pci_channel_state_t state)
3284{
3285 struct net_device *netdev = pci_get_drvdata(pdev);
3286 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3287
3288 netif_device_detach(netdev);
3289
3290 if (state == pci_channel_io_perm_failure)
3291 return PCI_ERS_RESULT_DISCONNECT;
3292
3293 if (netif_running(netdev))
3294 ixgbevf_down(adapter);
3295
3296 pci_disable_device(pdev);
3297
3298 /* Request a slot slot reset. */
3299 return PCI_ERS_RESULT_NEED_RESET;
3300}
3301
3302/**
3303 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3304 * @pdev: Pointer to PCI device
3305 *
3306 * Restart the card from scratch, as if from a cold-boot. Implementation
3307 * resembles the first-half of the ixgbevf_resume routine.
3308 */
3309static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3310{
3311 struct net_device *netdev = pci_get_drvdata(pdev);
3312 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3313
3314 if (pci_enable_device_mem(pdev)) {
3315 dev_err(&pdev->dev,
3316 "Cannot re-enable PCI device after reset.\n");
3317 return PCI_ERS_RESULT_DISCONNECT;
3318 }
3319
3320 pci_set_master(pdev);
3321
3322 ixgbevf_reset(adapter);
3323
3324 return PCI_ERS_RESULT_RECOVERED;
3325}
3326
3327/**
3328 * ixgbevf_io_resume - called when traffic can start flowing again.
3329 * @pdev: Pointer to PCI device
3330 *
3331 * This callback is called when the error recovery driver tells us that
3332 * its OK to resume normal operation. Implementation resembles the
3333 * second-half of the ixgbevf_resume routine.
3334 */
3335static void ixgbevf_io_resume(struct pci_dev *pdev)
3336{
3337 struct net_device *netdev = pci_get_drvdata(pdev);
3338 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3339
3340 if (netif_running(netdev))
3341 ixgbevf_up(adapter);
3342
3343 netif_device_attach(netdev);
3344}
3345
3346/* PCI Error Recovery (ERS) */
3347static struct pci_error_handlers ixgbevf_err_handler = {
3348 .error_detected = ixgbevf_io_error_detected,
3349 .slot_reset = ixgbevf_io_slot_reset,
3350 .resume = ixgbevf_io_resume,
3351};
3352
Greg Rose92915f72010-01-09 02:24:10 +00003353static struct pci_driver ixgbevf_driver = {
3354 .name = ixgbevf_driver_name,
3355 .id_table = ixgbevf_pci_tbl,
3356 .probe = ixgbevf_probe,
3357 .remove = __devexit_p(ixgbevf_remove),
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003358#ifdef CONFIG_PM
3359 /* Power Management Hooks */
3360 .suspend = ixgbevf_suspend,
3361 .resume = ixgbevf_resume,
3362#endif
Greg Rose92915f72010-01-09 02:24:10 +00003363 .shutdown = ixgbevf_shutdown,
Alexander Duyck9f19f312012-05-11 08:33:32 +00003364 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00003365};
3366
3367/**
Greg Rose65d676c2011-02-03 06:54:13 +00003368 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00003369 *
Greg Rose65d676c2011-02-03 06:54:13 +00003370 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00003371 * loaded. All it does is register with the PCI subsystem.
3372 **/
3373static int __init ixgbevf_init_module(void)
3374{
3375 int ret;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003376 pr_info("%s - version %s\n", ixgbevf_driver_string,
3377 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00003378
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003379 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00003380
3381 ret = pci_register_driver(&ixgbevf_driver);
3382 return ret;
3383}
3384
3385module_init(ixgbevf_init_module);
3386
3387/**
Greg Rose65d676c2011-02-03 06:54:13 +00003388 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00003389 *
Greg Rose65d676c2011-02-03 06:54:13 +00003390 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00003391 * from memory.
3392 **/
3393static void __exit ixgbevf_exit_module(void)
3394{
3395 pci_unregister_driver(&ixgbevf_driver);
3396}
3397
3398#ifdef DEBUG
3399/**
Greg Rose65d676c2011-02-03 06:54:13 +00003400 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00003401 * used by hardware layer to print debugging information
3402 **/
3403char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3404{
3405 struct ixgbevf_adapter *adapter = hw->back;
3406 return adapter->netdev->name;
3407}
3408
3409#endif
3410module_exit(ixgbevf_exit_module);
3411
3412/* ixgbevf_main.c */