blob: 2ed2694df5ab913f999dbb8e06f509f67150e584 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck86d5d382009-02-06 23:23:12 +000052#define DRV_VERSION "1.3.16-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
Alexander Duyck2d064c02008-07-08 15:10:12 -070064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74 /* required last entry */
75 {0, }
76};
77
78MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
79
80void igb_reset(struct igb_adapter *);
81static int igb_setup_all_tx_resources(struct igb_adapter *);
82static int igb_setup_all_rx_resources(struct igb_adapter *);
83static void igb_free_all_tx_resources(struct igb_adapter *);
84static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000085static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080086void igb_update_stats(struct igb_adapter *);
87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
88static void __devexit igb_remove(struct pci_dev *pdev);
89static int igb_sw_init(struct igb_adapter *);
90static int igb_open(struct net_device *);
91static int igb_close(struct net_device *);
92static void igb_configure_tx(struct igb_adapter *);
93static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080094static void igb_clean_all_tx_rings(struct igb_adapter *);
95static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -070096static void igb_clean_tx_ring(struct igb_ring *);
97static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +000098static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -080099static void igb_update_phy_info(unsigned long);
100static void igb_watchdog(unsigned long);
101static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800103static struct net_device_stats *igb_get_stats(struct net_device *);
104static int igb_change_mtu(struct net_device *, int);
105static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000106static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800107static irqreturn_t igb_intr(int irq, void *);
108static irqreturn_t igb_intr_msi(int irq, void *);
109static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000110static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700111#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000112static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700113static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700114#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000115static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700116static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
119static void igb_tx_timeout(struct net_device *);
120static void igb_reset_task(struct work_struct *);
121static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122static void igb_vlan_rx_add_vid(struct net_device *, u16);
123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800126static void igb_ping_all_vfs(struct igb_adapter *);
127static void igb_msg_task(struct igb_adapter *);
128static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800129static void igb_vmm_control(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800130static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
131static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800132
Eric Dumazetc8159b22009-07-13 11:11:41 -0700133static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
134{
135 u32 reg_data;
136
137 reg_data = rd32(E1000_VMOLR(vfn));
138 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
Eric Dumazetc8159b22009-07-13 11:11:41 -0700139 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
140 E1000_VMOLR_AUPE | /* Accept untagged packets */
141 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
142 wr32(E1000_VMOLR(vfn), reg_data);
143}
144
145static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
146 int vfn)
147{
148 struct e1000_hw *hw = &adapter->hw;
149 u32 vmolr;
150
Alexander Duyckae641bd2009-09-03 14:49:33 +0000151 /* if it isn't the PF check to see if VFs are enabled and
152 * increase the size to support vlan tags */
153 if (vfn < adapter->vfs_allocated_count &&
154 adapter->vf_data[vfn].vlans_enabled)
155 size += VLAN_TAG_SIZE;
156
Eric Dumazetc8159b22009-07-13 11:11:41 -0700157 vmolr = rd32(E1000_VMOLR(vfn));
158 vmolr &= ~E1000_VMOLR_RLPML_MASK;
159 vmolr |= size | E1000_VMOLR_LPE;
160 wr32(E1000_VMOLR(vfn), vmolr);
161
162 return 0;
163}
164
Auke Kok9d5c8242008-01-24 02:22:38 -0800165#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000166static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800167static int igb_resume(struct pci_dev *);
168#endif
169static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700170#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700171static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
172static struct notifier_block dca_notifier = {
173 .notifier_call = igb_notify_dca,
174 .next = NULL,
175 .priority = 0
176};
177#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800178#ifdef CONFIG_NET_POLL_CONTROLLER
179/* for netdump / net console */
180static void igb_netpoll(struct net_device *);
181#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800182#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000183static unsigned int max_vfs = 0;
184module_param(max_vfs, uint, 0);
185MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
186 "per physical function");
187#endif /* CONFIG_PCI_IOV */
188
Auke Kok9d5c8242008-01-24 02:22:38 -0800189static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
190 pci_channel_state_t);
191static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
192static void igb_io_resume(struct pci_dev *);
193
194static struct pci_error_handlers igb_err_handler = {
195 .error_detected = igb_io_error_detected,
196 .slot_reset = igb_io_slot_reset,
197 .resume = igb_io_resume,
198};
199
200
201static struct pci_driver igb_driver = {
202 .name = igb_driver_name,
203 .id_table = igb_pci_tbl,
204 .probe = igb_probe,
205 .remove = __devexit_p(igb_remove),
206#ifdef CONFIG_PM
207 /* Power Managment Hooks */
208 .suspend = igb_suspend,
209 .resume = igb_resume,
210#endif
211 .shutdown = igb_shutdown,
212 .err_handler = &igb_err_handler
213};
214
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700215static int global_quad_port_a; /* global quad port a indication */
216
Auke Kok9d5c8242008-01-24 02:22:38 -0800217MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
218MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_VERSION);
221
Patrick Ohly38c845c2009-02-12 05:03:41 +0000222/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000223 * igb_read_clock - read raw cycle counter (to be used by time counter)
224 */
225static cycle_t igb_read_clock(const struct cyclecounter *tc)
226{
227 struct igb_adapter *adapter =
228 container_of(tc, struct igb_adapter, cycles);
229 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000230 u64 stamp = 0;
231 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000232
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000233 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
234 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000235 return stamp;
236}
237
Auke Kok9d5c8242008-01-24 02:22:38 -0800238#ifdef DEBUG
239/**
240 * igb_get_hw_dev_name - return device name string
241 * used by hardware layer to print debugging information
242 **/
243char *igb_get_hw_dev_name(struct e1000_hw *hw)
244{
245 struct igb_adapter *adapter = hw->back;
246 return adapter->netdev->name;
247}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000248
249/**
250 * igb_get_time_str - format current NIC and system time as string
251 */
252static char *igb_get_time_str(struct igb_adapter *adapter,
253 char buffer[160])
254{
255 cycle_t hw = adapter->cycles.read(&adapter->cycles);
256 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
257 struct timespec sys;
258 struct timespec delta;
259 getnstimeofday(&sys);
260
261 delta = timespec_sub(nic, sys);
262
263 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000264 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
265 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000266 (long)nic.tv_sec, nic.tv_nsec,
267 (long)sys.tv_sec, sys.tv_nsec,
268 (long)delta.tv_sec, delta.tv_nsec);
269
270 return buffer;
271}
Auke Kok9d5c8242008-01-24 02:22:38 -0800272#endif
273
274/**
275 * igb_init_module - Driver Registration Routine
276 *
277 * igb_init_module is the first routine called when the driver is
278 * loaded. All it does is register with the PCI subsystem.
279 **/
280static int __init igb_init_module(void)
281{
282 int ret;
283 printk(KERN_INFO "%s - version %s\n",
284 igb_driver_string, igb_driver_version);
285
286 printk(KERN_INFO "%s\n", igb_copyright);
287
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700288 global_quad_port_a = 0;
289
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700290#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700291 dca_register_notify(&dca_notifier);
292#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800293
294 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800295 return ret;
296}
297
298module_init(igb_init_module);
299
300/**
301 * igb_exit_module - Driver Exit Cleanup Routine
302 *
303 * igb_exit_module is called just before the driver is removed
304 * from memory.
305 **/
306static void __exit igb_exit_module(void)
307{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700308#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700309 dca_unregister_notify(&dca_notifier);
310#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800311 pci_unregister_driver(&igb_driver);
312}
313
314module_exit(igb_exit_module);
315
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800316#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
317/**
318 * igb_cache_ring_register - Descriptor ring to register mapping
319 * @adapter: board private structure to initialize
320 *
321 * Once we know the feature-set enabled for the device, we'll cache
322 * the register offset the descriptor ring is assigned to.
323 **/
324static void igb_cache_ring_register(struct igb_adapter *adapter)
325{
326 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000327 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800328
329 switch (adapter->hw.mac.type) {
330 case e1000_82576:
331 /* The queues are allocated for virtualization such that VF 0
332 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
333 * In order to avoid collision we start at the first free queue
334 * and continue consuming queues in the same sequence
335 */
336 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800337 adapter->rx_ring[i].reg_idx = rbase_offset +
338 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800339 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800340 adapter->tx_ring[i].reg_idx = rbase_offset +
341 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800342 break;
343 case e1000_82575:
344 default:
345 for (i = 0; i < adapter->num_rx_queues; i++)
346 adapter->rx_ring[i].reg_idx = i;
347 for (i = 0; i < adapter->num_tx_queues; i++)
348 adapter->tx_ring[i].reg_idx = i;
349 break;
350 }
351}
352
Alexander Duyck047e0032009-10-27 15:49:27 +0000353static void igb_free_queues(struct igb_adapter *adapter)
354{
355 kfree(adapter->tx_ring);
356 kfree(adapter->rx_ring);
357
358 adapter->tx_ring = NULL;
359 adapter->rx_ring = NULL;
360
361 adapter->num_rx_queues = 0;
362 adapter->num_tx_queues = 0;
363}
364
Auke Kok9d5c8242008-01-24 02:22:38 -0800365/**
366 * igb_alloc_queues - Allocate memory for all rings
367 * @adapter: board private structure to initialize
368 *
369 * We allocate one ring per queue at run-time since we don't know the
370 * number of queues at compile-time.
371 **/
372static int igb_alloc_queues(struct igb_adapter *adapter)
373{
374 int i;
375
376 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
377 sizeof(struct igb_ring), GFP_KERNEL);
378 if (!adapter->tx_ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000379 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -0800380
381 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
382 sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +0000383 if (!adapter->rx_ring)
384 goto err;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -0700385
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700386 for (i = 0; i < adapter->num_tx_queues; i++) {
387 struct igb_ring *ring = &(adapter->tx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800388 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700389 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000390 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000391 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000392 /* For 82575, context index must be unique per ring. */
393 if (adapter->hw.mac.type == e1000_82575)
394 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700395 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000396
Auke Kok9d5c8242008-01-24 02:22:38 -0800397 for (i = 0; i < adapter->num_rx_queues; i++) {
398 struct igb_ring *ring = &(adapter->rx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800399 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700400 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000401 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000402 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000403 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000404 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
405 /* set flag indicating ring supports SCTP checksum offload */
406 if (adapter->hw.mac.type >= e1000_82576)
407 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -0800408 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800409
410 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000411
Auke Kok9d5c8242008-01-24 02:22:38 -0800412 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800413
Alexander Duyck047e0032009-10-27 15:49:27 +0000414err:
415 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700416
Alexander Duyck047e0032009-10-27 15:49:27 +0000417 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700418}
419
Auke Kok9d5c8242008-01-24 02:22:38 -0800420#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000421static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800422{
423 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000424 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800425 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700426 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000427 int rx_queue = IGB_N0_QUEUE;
428 int tx_queue = IGB_N0_QUEUE;
429
430 if (q_vector->rx_ring)
431 rx_queue = q_vector->rx_ring->reg_idx;
432 if (q_vector->tx_ring)
433 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700434
435 switch (hw->mac.type) {
436 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800437 /* The 82575 assigns vectors using a bitmask, which matches the
438 bitmask for the EICR/EIMS/EIMC registers. To assign one
439 or more queues to a vector, we write the appropriate bits
440 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000441 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800442 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000443 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800444 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Auke Kok9d5c8242008-01-24 02:22:38 -0800445 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000446 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700447 break;
448 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800449 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700450 Each queue has a single entry in the table to which we write
451 a vector number along with a "valid" bit. Sadly, the layout
452 of the table is somewhat counterintuitive. */
453 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000454 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700455 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000456 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800457 /* vector goes into low byte of register */
458 ivar = ivar & 0xFFFFFF00;
459 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000460 } else {
461 /* vector goes into third byte of register */
462 ivar = ivar & 0xFF00FFFF;
463 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700464 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700465 array_wr32(E1000_IVAR0, index, ivar);
466 }
467 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000468 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700469 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000470 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800471 /* vector goes into second byte of register */
472 ivar = ivar & 0xFFFF00FF;
473 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000474 } else {
475 /* vector goes into high byte of register */
476 ivar = ivar & 0x00FFFFFF;
477 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700478 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700479 array_wr32(E1000_IVAR0, index, ivar);
480 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000481 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700482 break;
483 default:
484 BUG();
485 break;
486 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800487}
488
489/**
490 * igb_configure_msix - Configure MSI-X hardware
491 *
492 * igb_configure_msix sets up the hardware to properly
493 * generate MSI-X interrupts.
494 **/
495static void igb_configure_msix(struct igb_adapter *adapter)
496{
497 u32 tmp;
498 int i, vector = 0;
499 struct e1000_hw *hw = &adapter->hw;
500
501 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800502
503 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700504 switch (hw->mac.type) {
505 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800506 tmp = rd32(E1000_CTRL_EXT);
507 /* enable MSI-X PBA support*/
508 tmp |= E1000_CTRL_EXT_PBA_CLR;
509
510 /* Auto-Mask interrupts upon ICR read. */
511 tmp |= E1000_CTRL_EXT_EIAME;
512 tmp |= E1000_CTRL_EXT_IRCA;
513
514 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000515
516 /* enable msix_other interrupt */
517 array_wr32(E1000_MSIXBM(0), vector++,
518 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700519 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800520
Alexander Duyck2d064c02008-07-08 15:10:12 -0700521 break;
522
523 case e1000_82576:
Alexander Duyck047e0032009-10-27 15:49:27 +0000524 /* Turn on MSI-X capability first, or our settings
525 * won't stick. And it will take days to debug. */
526 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
527 E1000_GPIE_PBA | E1000_GPIE_EIAME |
528 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700529
Alexander Duyck047e0032009-10-27 15:49:27 +0000530 /* enable msix_other interrupt */
531 adapter->eims_other = 1 << vector;
532 tmp = (vector++ | E1000_IVAR_VALID) << 8;
533
534 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700535 break;
536 default:
537 /* do nothing, since nothing else supports MSI-X */
538 break;
539 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000540
541 adapter->eims_enable_mask |= adapter->eims_other;
542
543 for (i = 0; i < adapter->num_q_vectors; i++) {
544 struct igb_q_vector *q_vector = adapter->q_vector[i];
545 igb_assign_vector(q_vector, vector++);
546 adapter->eims_enable_mask |= q_vector->eims_value;
547 }
548
Auke Kok9d5c8242008-01-24 02:22:38 -0800549 wrfl();
550}
551
552/**
553 * igb_request_msix - Initialize MSI-X interrupts
554 *
555 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
556 * kernel.
557 **/
558static int igb_request_msix(struct igb_adapter *adapter)
559{
560 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000561 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800562 int i, err = 0, vector = 0;
563
Auke Kok9d5c8242008-01-24 02:22:38 -0800564 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck047e0032009-10-27 15:49:27 +0000565 &igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800566 if (err)
567 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000568 vector++;
569
570 for (i = 0; i < adapter->num_q_vectors; i++) {
571 struct igb_q_vector *q_vector = adapter->q_vector[i];
572
573 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
574
575 if (q_vector->rx_ring && q_vector->tx_ring)
576 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
577 q_vector->rx_ring->queue_index);
578 else if (q_vector->tx_ring)
579 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
580 q_vector->tx_ring->queue_index);
581 else if (q_vector->rx_ring)
582 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
583 q_vector->rx_ring->queue_index);
584 else
585 sprintf(q_vector->name, "%s-unused", netdev->name);
586
587 err = request_irq(adapter->msix_entries[vector].vector,
588 &igb_msix_ring, 0, q_vector->name,
589 q_vector);
590 if (err)
591 goto out;
592 vector++;
593 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800594
Auke Kok9d5c8242008-01-24 02:22:38 -0800595 igb_configure_msix(adapter);
596 return 0;
597out:
598 return err;
599}
600
601static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
602{
603 if (adapter->msix_entries) {
604 pci_disable_msix(adapter->pdev);
605 kfree(adapter->msix_entries);
606 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000607 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800608 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000609 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800610}
611
Alexander Duyck047e0032009-10-27 15:49:27 +0000612/**
613 * igb_free_q_vectors - Free memory allocated for interrupt vectors
614 * @adapter: board private structure to initialize
615 *
616 * This function frees the memory allocated to the q_vectors. In addition if
617 * NAPI is enabled it will delete any references to the NAPI struct prior
618 * to freeing the q_vector.
619 **/
620static void igb_free_q_vectors(struct igb_adapter *adapter)
621{
622 int v_idx;
623
624 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
625 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
626 adapter->q_vector[v_idx] = NULL;
627 netif_napi_del(&q_vector->napi);
628 kfree(q_vector);
629 }
630 adapter->num_q_vectors = 0;
631}
632
633/**
634 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
635 *
636 * This function resets the device so that it has 0 rx queues, tx queues, and
637 * MSI-X interrupts allocated.
638 */
639static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
640{
641 igb_free_queues(adapter);
642 igb_free_q_vectors(adapter);
643 igb_reset_interrupt_capability(adapter);
644}
Auke Kok9d5c8242008-01-24 02:22:38 -0800645
646/**
647 * igb_set_interrupt_capability - set MSI or MSI-X if supported
648 *
649 * Attempt to configure interrupts using the best available
650 * capabilities of the hardware and kernel.
651 **/
652static void igb_set_interrupt_capability(struct igb_adapter *adapter)
653{
654 int err;
655 int numvecs, i;
656
Alexander Duyck83b71802009-02-06 23:15:45 +0000657 /* Number of supported queues. */
Alexander Duyck83b71802009-02-06 23:15:45 +0000658 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
659 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
660
Alexander Duyck047e0032009-10-27 15:49:27 +0000661 /* start with one vector for every rx queue */
662 numvecs = adapter->num_rx_queues;
663
664 /* if tx handler is seperate add 1 for every tx queue */
665 numvecs += adapter->num_tx_queues;
666
667 /* store the number of vectors reserved for queues */
668 adapter->num_q_vectors = numvecs;
669
670 /* add 1 vector for link status interrupts */
671 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800672 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
673 GFP_KERNEL);
674 if (!adapter->msix_entries)
675 goto msi_only;
676
677 for (i = 0; i < numvecs; i++)
678 adapter->msix_entries[i].entry = i;
679
680 err = pci_enable_msix(adapter->pdev,
681 adapter->msix_entries,
682 numvecs);
683 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700684 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800685
686 igb_reset_interrupt_capability(adapter);
687
688 /* If we can't do MSI-X, try MSI */
689msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000690#ifdef CONFIG_PCI_IOV
691 /* disable SR-IOV for non MSI-X configurations */
692 if (adapter->vf_data) {
693 struct e1000_hw *hw = &adapter->hw;
694 /* disable iov and allow time for transactions to clear */
695 pci_disable_sriov(adapter->pdev);
696 msleep(500);
697
698 kfree(adapter->vf_data);
699 adapter->vf_data = NULL;
700 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
701 msleep(100);
702 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
703 }
704#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000705 adapter->vfs_allocated_count = 0;
706 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800707 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700708 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000709 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800710 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700711 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700712out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700713 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700714 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800715 return;
716}
717
718/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000719 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
720 * @adapter: board private structure to initialize
721 *
722 * We allocate one q_vector per queue interrupt. If allocation fails we
723 * return -ENOMEM.
724 **/
725static int igb_alloc_q_vectors(struct igb_adapter *adapter)
726{
727 struct igb_q_vector *q_vector;
728 struct e1000_hw *hw = &adapter->hw;
729 int v_idx;
730
731 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
732 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
733 if (!q_vector)
734 goto err_out;
735 q_vector->adapter = adapter;
736 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
737 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
738 q_vector->itr_val = IGB_START_ITR;
739 q_vector->set_itr = 1;
740 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
741 adapter->q_vector[v_idx] = q_vector;
742 }
743 return 0;
744
745err_out:
746 while (v_idx) {
747 v_idx--;
748 q_vector = adapter->q_vector[v_idx];
749 netif_napi_del(&q_vector->napi);
750 kfree(q_vector);
751 adapter->q_vector[v_idx] = NULL;
752 }
753 return -ENOMEM;
754}
755
756static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
757 int ring_idx, int v_idx)
758{
759 struct igb_q_vector *q_vector;
760
761 q_vector = adapter->q_vector[v_idx];
762 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
763 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000764 q_vector->itr_val = adapter->rx_itr_setting;
765 if (q_vector->itr_val && q_vector->itr_val <= 3)
766 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767}
768
769static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
770 int ring_idx, int v_idx)
771{
772 struct igb_q_vector *q_vector;
773
774 q_vector = adapter->q_vector[v_idx];
775 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
776 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000777 q_vector->itr_val = adapter->tx_itr_setting;
778 if (q_vector->itr_val && q_vector->itr_val <= 3)
779 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000780}
781
782/**
783 * igb_map_ring_to_vector - maps allocated queues to vectors
784 *
785 * This function maps the recently allocated queues to vectors.
786 **/
787static int igb_map_ring_to_vector(struct igb_adapter *adapter)
788{
789 int i;
790 int v_idx = 0;
791
792 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
793 (adapter->num_q_vectors < adapter->num_tx_queues))
794 return -ENOMEM;
795
796 if (adapter->num_q_vectors >=
797 (adapter->num_rx_queues + adapter->num_tx_queues)) {
798 for (i = 0; i < adapter->num_rx_queues; i++)
799 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
800 for (i = 0; i < adapter->num_tx_queues; i++)
801 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
802 } else {
803 for (i = 0; i < adapter->num_rx_queues; i++) {
804 if (i < adapter->num_tx_queues)
805 igb_map_tx_ring_to_vector(adapter, i, v_idx);
806 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
807 }
808 for (; i < adapter->num_tx_queues; i++)
809 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
810 }
811 return 0;
812}
813
814/**
815 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
816 *
817 * This function initializes the interrupts and allocates all of the queues.
818 **/
819static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
820{
821 struct pci_dev *pdev = adapter->pdev;
822 int err;
823
824 igb_set_interrupt_capability(adapter);
825
826 err = igb_alloc_q_vectors(adapter);
827 if (err) {
828 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
829 goto err_alloc_q_vectors;
830 }
831
832 err = igb_alloc_queues(adapter);
833 if (err) {
834 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
835 goto err_alloc_queues;
836 }
837
838 err = igb_map_ring_to_vector(adapter);
839 if (err) {
840 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
841 goto err_map_queues;
842 }
843
844
845 return 0;
846err_map_queues:
847 igb_free_queues(adapter);
848err_alloc_queues:
849 igb_free_q_vectors(adapter);
850err_alloc_q_vectors:
851 igb_reset_interrupt_capability(adapter);
852 return err;
853}
854
855/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800856 * igb_request_irq - initialize interrupts
857 *
858 * Attempts to configure interrupts using the best available
859 * capabilities of the hardware and kernel.
860 **/
861static int igb_request_irq(struct igb_adapter *adapter)
862{
863 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000864 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800865 struct e1000_hw *hw = &adapter->hw;
866 int err = 0;
867
868 if (adapter->msix_entries) {
869 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700870 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800871 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800872 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000873 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800874 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700875 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800876 igb_free_all_tx_resources(adapter);
877 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000878 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800879 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000880 adapter->num_q_vectors = 1;
881 err = igb_alloc_q_vectors(adapter);
882 if (err) {
883 dev_err(&pdev->dev,
884 "Unable to allocate memory for vectors\n");
885 goto request_done;
886 }
887 err = igb_alloc_queues(adapter);
888 if (err) {
889 dev_err(&pdev->dev,
890 "Unable to allocate memory for queues\n");
891 igb_free_q_vectors(adapter);
892 goto request_done;
893 }
894 igb_setup_all_tx_resources(adapter);
895 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700896 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -0700897 switch (hw->mac.type) {
898 case e1000_82575:
899 wr32(E1000_MSIXBM(0),
Alexander Duyck047e0032009-10-27 15:49:27 +0000900 (E1000_EICR_RX_QUEUE0 |
901 E1000_EICR_TX_QUEUE0 |
902 E1000_EIMS_OTHER));
Alexander Duyck2d064c02008-07-08 15:10:12 -0700903 break;
904 case e1000_82576:
905 wr32(E1000_IVAR0, E1000_IVAR_VALID);
906 break;
907 default:
908 break;
909 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800910 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700911
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700912 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800913 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000914 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800915 if (!err)
916 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000917
Auke Kok9d5c8242008-01-24 02:22:38 -0800918 /* fall back to legacy interrupts */
919 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700920 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800921 }
922
923 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000924 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800925
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800926 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800927 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
928 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800929
930request_done:
931 return err;
932}
933
934static void igb_free_irq(struct igb_adapter *adapter)
935{
Auke Kok9d5c8242008-01-24 02:22:38 -0800936 if (adapter->msix_entries) {
937 int vector = 0, i;
938
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800940
Alexander Duyck047e0032009-10-27 15:49:27 +0000941 for (i = 0; i < adapter->num_q_vectors; i++) {
942 struct igb_q_vector *q_vector = adapter->q_vector[i];
943 free_irq(adapter->msix_entries[vector++].vector,
944 q_vector);
945 }
946 } else {
947 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800948 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800949}
950
951/**
952 * igb_irq_disable - Mask off interrupt generation on the NIC
953 * @adapter: board private structure
954 **/
955static void igb_irq_disable(struct igb_adapter *adapter)
956{
957 struct e1000_hw *hw = &adapter->hw;
958
959 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000960 u32 regval = rd32(E1000_EIAM);
961 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
962 wr32(E1000_EIMC, adapter->eims_enable_mask);
963 regval = rd32(E1000_EIAC);
964 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800965 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700966
967 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800968 wr32(E1000_IMC, ~0);
969 wrfl();
970 synchronize_irq(adapter->pdev->irq);
971}
972
973/**
974 * igb_irq_enable - Enable default interrupt generation settings
975 * @adapter: board private structure
976 **/
977static void igb_irq_enable(struct igb_adapter *adapter)
978{
979 struct e1000_hw *hw = &adapter->hw;
980
981 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000982 u32 regval = rd32(E1000_EIAC);
983 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
984 regval = rd32(E1000_EIAM);
985 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700986 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800987 if (adapter->vfs_allocated_count)
988 wr32(E1000_MBVFIMR, 0xFF);
989 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
990 E1000_IMS_DOUTSYNC));
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700991 } else {
992 wr32(E1000_IMS, IMS_ENABLE_MASK);
993 wr32(E1000_IAM, IMS_ENABLE_MASK);
994 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800995}
996
997static void igb_update_mng_vlan(struct igb_adapter *adapter)
998{
999 struct net_device *netdev = adapter->netdev;
1000 u16 vid = adapter->hw.mng_cookie.vlan_id;
1001 u16 old_vid = adapter->mng_vlan_id;
1002 if (adapter->vlgrp) {
1003 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1004 if (adapter->hw.mng_cookie.status &
1005 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1006 igb_vlan_rx_add_vid(netdev, vid);
1007 adapter->mng_vlan_id = vid;
1008 } else
1009 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1010
1011 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1012 (vid != old_vid) &&
1013 !vlan_group_get_device(adapter->vlgrp, old_vid))
1014 igb_vlan_rx_kill_vid(netdev, old_vid);
1015 } else
1016 adapter->mng_vlan_id = vid;
1017 }
1018}
1019
1020/**
1021 * igb_release_hw_control - release control of the h/w to f/w
1022 * @adapter: address of board private structure
1023 *
1024 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1025 * For ASF and Pass Through versions of f/w this means that the
1026 * driver is no longer loaded.
1027 *
1028 **/
1029static void igb_release_hw_control(struct igb_adapter *adapter)
1030{
1031 struct e1000_hw *hw = &adapter->hw;
1032 u32 ctrl_ext;
1033
1034 /* Let firmware take over control of h/w */
1035 ctrl_ext = rd32(E1000_CTRL_EXT);
1036 wr32(E1000_CTRL_EXT,
1037 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1038}
1039
1040
1041/**
1042 * igb_get_hw_control - get control of the h/w from f/w
1043 * @adapter: address of board private structure
1044 *
1045 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1046 * For ASF and Pass Through versions of f/w this means that
1047 * the driver is loaded.
1048 *
1049 **/
1050static void igb_get_hw_control(struct igb_adapter *adapter)
1051{
1052 struct e1000_hw *hw = &adapter->hw;
1053 u32 ctrl_ext;
1054
1055 /* Let firmware know the driver has taken over */
1056 ctrl_ext = rd32(E1000_CTRL_EXT);
1057 wr32(E1000_CTRL_EXT,
1058 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1059}
1060
Auke Kok9d5c8242008-01-24 02:22:38 -08001061/**
1062 * igb_configure - configure the hardware for RX and TX
1063 * @adapter: private board structure
1064 **/
1065static void igb_configure(struct igb_adapter *adapter)
1066{
1067 struct net_device *netdev = adapter->netdev;
1068 int i;
1069
1070 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001071 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001072
1073 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001074
Alexander Duyck85b430b2009-10-27 15:50:29 +00001075 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001076 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001077 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001078
1079 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001080 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001081
1082 igb_rx_fifo_flush_82575(&adapter->hw);
1083
Alexander Duyckc493ea42009-03-20 00:16:50 +00001084 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001085 * at least 1 descriptor unused to make sure
1086 * next_to_use != next_to_clean */
1087 for (i = 0; i < adapter->num_rx_queues; i++) {
1088 struct igb_ring *ring = &adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001089 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001090 }
1091
1092
1093 adapter->tx_queue_len = netdev->tx_queue_len;
1094}
1095
1096
1097/**
1098 * igb_up - Open the interface and prepare it to handle traffic
1099 * @adapter: board private structure
1100 **/
1101
1102int igb_up(struct igb_adapter *adapter)
1103{
1104 struct e1000_hw *hw = &adapter->hw;
1105 int i;
1106
1107 /* hardware has been reset, we need to reload some things */
1108 igb_configure(adapter);
1109
1110 clear_bit(__IGB_DOWN, &adapter->state);
1111
Alexander Duyck047e0032009-10-27 15:49:27 +00001112 for (i = 0; i < adapter->num_q_vectors; i++) {
1113 struct igb_q_vector *q_vector = adapter->q_vector[i];
1114 napi_enable(&q_vector->napi);
1115 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001116 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001117 igb_configure_msix(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001118
Alexander Duycke1739522009-02-19 20:39:44 -08001119 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1120
Auke Kok9d5c8242008-01-24 02:22:38 -08001121 /* Clear any pending interrupts. */
1122 rd32(E1000_ICR);
1123 igb_irq_enable(adapter);
1124
Alexander Duyckd4960302009-10-27 15:53:45 +00001125 /* notify VFs that reset has been completed */
1126 if (adapter->vfs_allocated_count) {
1127 u32 reg_data = rd32(E1000_CTRL_EXT);
1128 reg_data |= E1000_CTRL_EXT_PFRSTD;
1129 wr32(E1000_CTRL_EXT, reg_data);
1130 }
1131
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001132 netif_tx_start_all_queues(adapter->netdev);
1133
Auke Kok9d5c8242008-01-24 02:22:38 -08001134 /* Fire a link change interrupt to start the watchdog. */
1135 wr32(E1000_ICS, E1000_ICS_LSC);
1136 return 0;
1137}
1138
1139void igb_down(struct igb_adapter *adapter)
1140{
1141 struct e1000_hw *hw = &adapter->hw;
1142 struct net_device *netdev = adapter->netdev;
1143 u32 tctl, rctl;
1144 int i;
1145
1146 /* signal that we're down so the interrupt handler does not
1147 * reschedule our watchdog timer */
1148 set_bit(__IGB_DOWN, &adapter->state);
1149
1150 /* disable receives in the hardware */
1151 rctl = rd32(E1000_RCTL);
1152 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1153 /* flush and sleep below */
1154
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001155 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001156
1157 /* disable transmits in the hardware */
1158 tctl = rd32(E1000_TCTL);
1159 tctl &= ~E1000_TCTL_EN;
1160 wr32(E1000_TCTL, tctl);
1161 /* flush both disables and wait for them to finish */
1162 wrfl();
1163 msleep(10);
1164
Alexander Duyck047e0032009-10-27 15:49:27 +00001165 for (i = 0; i < adapter->num_q_vectors; i++) {
1166 struct igb_q_vector *q_vector = adapter->q_vector[i];
1167 napi_disable(&q_vector->napi);
1168 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001169
Auke Kok9d5c8242008-01-24 02:22:38 -08001170 igb_irq_disable(adapter);
1171
1172 del_timer_sync(&adapter->watchdog_timer);
1173 del_timer_sync(&adapter->phy_info_timer);
1174
1175 netdev->tx_queue_len = adapter->tx_queue_len;
1176 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001177
1178 /* record the stats before reset*/
1179 igb_update_stats(adapter);
1180
Auke Kok9d5c8242008-01-24 02:22:38 -08001181 adapter->link_speed = 0;
1182 adapter->link_duplex = 0;
1183
Jeff Kirsher30236822008-06-24 17:01:15 -07001184 if (!pci_channel_offline(adapter->pdev))
1185 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001186 igb_clean_all_tx_rings(adapter);
1187 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001188#ifdef CONFIG_IGB_DCA
1189
1190 /* since we reset the hardware DCA settings were cleared */
1191 igb_setup_dca(adapter);
1192#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001193}
1194
1195void igb_reinit_locked(struct igb_adapter *adapter)
1196{
1197 WARN_ON(in_interrupt());
1198 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1199 msleep(1);
1200 igb_down(adapter);
1201 igb_up(adapter);
1202 clear_bit(__IGB_RESETTING, &adapter->state);
1203}
1204
1205void igb_reset(struct igb_adapter *adapter)
1206{
1207 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001208 struct e1000_mac_info *mac = &hw->mac;
1209 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001210 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1211 u16 hwm;
1212
1213 /* Repartition Pba for greater than 9k mtu
1214 * To take effect CTRL.RST is required.
1215 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001216 switch (mac->type) {
1217 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001218 pba = rd32(E1000_RXPBS);
1219 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001220 break;
1221 case e1000_82575:
1222 default:
1223 pba = E1000_PBA_34K;
1224 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001225 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001226
Alexander Duyck2d064c02008-07-08 15:10:12 -07001227 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1228 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001229 /* adjust PBA for jumbo frames */
1230 wr32(E1000_PBA, pba);
1231
1232 /* To maintain wire speed transmits, the Tx FIFO should be
1233 * large enough to accommodate two full transmit packets,
1234 * rounded up to the next 1KB and expressed in KB. Likewise,
1235 * the Rx FIFO should be large enough to accommodate at least
1236 * one full receive packet and is similarly rounded up and
1237 * expressed in KB. */
1238 pba = rd32(E1000_PBA);
1239 /* upper 16 bits has Tx packet buffer allocation size in KB */
1240 tx_space = pba >> 16;
1241 /* lower 16 bits has Rx packet buffer allocation size in KB */
1242 pba &= 0xffff;
1243 /* the tx fifo also stores 16 bytes of information about the tx
1244 * but don't include ethernet FCS because hardware appends it */
1245 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001246 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001247 ETH_FCS_LEN) * 2;
1248 min_tx_space = ALIGN(min_tx_space, 1024);
1249 min_tx_space >>= 10;
1250 /* software strips receive CRC, so leave room for it */
1251 min_rx_space = adapter->max_frame_size;
1252 min_rx_space = ALIGN(min_rx_space, 1024);
1253 min_rx_space >>= 10;
1254
1255 /* If current Tx allocation is less than the min Tx FIFO size,
1256 * and the min Tx FIFO size is less than the current Rx FIFO
1257 * allocation, take space away from current Rx allocation */
1258 if (tx_space < min_tx_space &&
1259 ((min_tx_space - tx_space) < pba)) {
1260 pba = pba - (min_tx_space - tx_space);
1261
1262 /* if short on rx space, rx wins and must trump tx
1263 * adjustment */
1264 if (pba < min_rx_space)
1265 pba = min_rx_space;
1266 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001267 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001268 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001269
1270 /* flow control settings */
1271 /* The high water mark must be low enough to fit one full frame
1272 * (or the size used for early receive) above it in the Rx FIFO.
1273 * Set it to the lower of:
1274 * - 90% of the Rx FIFO size, or
1275 * - the full Rx FIFO size minus one full frame */
1276 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001277 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001278
Alexander Duyck2d064c02008-07-08 15:10:12 -07001279 if (mac->type < e1000_82576) {
1280 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1281 fc->low_water = fc->high_water - 8;
1282 } else {
1283 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1284 fc->low_water = fc->high_water - 16;
1285 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001286 fc->pause_time = 0xFFFF;
1287 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001288 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001289
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001290 /* disable receive for all VFs and wait one second */
1291 if (adapter->vfs_allocated_count) {
1292 int i;
1293 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1294 adapter->vf_data[i].clear_to_send = false;
1295
1296 /* ping all the active vfs to let them know we are going down */
1297 igb_ping_all_vfs(adapter);
1298
1299 /* disable transmits and receives */
1300 wr32(E1000_VFRE, 0);
1301 wr32(E1000_VFTE, 0);
1302 }
1303
Auke Kok9d5c8242008-01-24 02:22:38 -08001304 /* Allow time for pending master requests to run */
1305 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1306 wr32(E1000_WUC, 0);
1307
1308 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1309 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1310
1311 igb_update_mng_vlan(adapter);
1312
1313 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1314 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1315
1316 igb_reset_adaptive(&adapter->hw);
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001317 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001318}
1319
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001320static const struct net_device_ops igb_netdev_ops = {
1321 .ndo_open = igb_open,
1322 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001323 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001324 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001325 .ndo_set_rx_mode = igb_set_rx_mode,
1326 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001327 .ndo_set_mac_address = igb_set_mac,
1328 .ndo_change_mtu = igb_change_mtu,
1329 .ndo_do_ioctl = igb_ioctl,
1330 .ndo_tx_timeout = igb_tx_timeout,
1331 .ndo_validate_addr = eth_validate_addr,
1332 .ndo_vlan_rx_register = igb_vlan_rx_register,
1333 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1334 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1335#ifdef CONFIG_NET_POLL_CONTROLLER
1336 .ndo_poll_controller = igb_netpoll,
1337#endif
1338};
1339
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001340/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001341 * igb_probe - Device Initialization Routine
1342 * @pdev: PCI device information struct
1343 * @ent: entry in igb_pci_tbl
1344 *
1345 * Returns 0 on success, negative on failure
1346 *
1347 * igb_probe initializes an adapter identified by a pci_dev structure.
1348 * The OS initialization, configuring of the adapter private structure,
1349 * and a hardware reset occur.
1350 **/
1351static int __devinit igb_probe(struct pci_dev *pdev,
1352 const struct pci_device_id *ent)
1353{
1354 struct net_device *netdev;
1355 struct igb_adapter *adapter;
1356 struct e1000_hw *hw;
1357 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1358 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001359 int err, pci_using_dac;
Alexander Duyck682337f2009-03-14 22:26:40 -07001360 u16 eeprom_data = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08001361 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1362 u32 part_num;
1363
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001364 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001365 if (err)
1366 return err;
1367
1368 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001369 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001370 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001371 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001372 if (!err)
1373 pci_using_dac = 1;
1374 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001375 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001376 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001377 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001378 if (err) {
1379 dev_err(&pdev->dev, "No usable DMA "
1380 "configuration, aborting\n");
1381 goto err_dma;
1382 }
1383 }
1384 }
1385
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001386 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1387 IORESOURCE_MEM),
1388 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001389 if (err)
1390 goto err_pci_reg;
1391
Frans Pop19d5afd2009-10-02 10:04:12 -07001392 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001393
Auke Kok9d5c8242008-01-24 02:22:38 -08001394 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001395 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001396
1397 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001398 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1399 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001400 if (!netdev)
1401 goto err_alloc_etherdev;
1402
1403 SET_NETDEV_DEV(netdev, &pdev->dev);
1404
1405 pci_set_drvdata(pdev, netdev);
1406 adapter = netdev_priv(netdev);
1407 adapter->netdev = netdev;
1408 adapter->pdev = pdev;
1409 hw = &adapter->hw;
1410 hw->back = adapter;
1411 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1412
1413 mmio_start = pci_resource_start(pdev, 0);
1414 mmio_len = pci_resource_len(pdev, 0);
1415
1416 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001417 hw->hw_addr = ioremap(mmio_start, mmio_len);
1418 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001419 goto err_ioremap;
1420
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001421 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001422 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001423 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001424
1425 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1426
1427 netdev->mem_start = mmio_start;
1428 netdev->mem_end = mmio_start + mmio_len;
1429
Auke Kok9d5c8242008-01-24 02:22:38 -08001430 /* PCI config space info */
1431 hw->vendor_id = pdev->vendor;
1432 hw->device_id = pdev->device;
1433 hw->revision_id = pdev->revision;
1434 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1435 hw->subsystem_device_id = pdev->subsystem_device;
1436
1437 /* setup the private structure */
1438 hw->back = adapter;
1439 /* Copy the default MAC, PHY and NVM function pointers */
1440 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1441 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1442 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1443 /* Initialize skew-specific constants */
1444 err = ei->get_invariants(hw);
1445 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001446 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001447
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001448#ifdef CONFIG_PCI_IOV
1449 /* since iov functionality isn't critical to base device function we
1450 * can accept failure. If it fails we don't allow iov to be enabled */
1451 if (hw->mac.type == e1000_82576) {
1452 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1453 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1454 int i;
1455 unsigned char mac_addr[ETH_ALEN];
1456
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001457 if (num_vfs) {
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001458 adapter->vf_data = kcalloc(num_vfs,
1459 sizeof(struct vf_data_storage),
1460 GFP_KERNEL);
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001461 if (!adapter->vf_data) {
1462 dev_err(&pdev->dev,
1463 "Could not allocate VF private data - "
1464 "IOV enable failed\n");
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001465 } else {
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001466 err = pci_enable_sriov(pdev, num_vfs);
1467 if (!err) {
1468 adapter->vfs_allocated_count = num_vfs;
1469 dev_info(&pdev->dev,
1470 "%d vfs allocated\n",
1471 num_vfs);
1472 for (i = 0;
1473 i < adapter->vfs_allocated_count;
1474 i++) {
1475 random_ether_addr(mac_addr);
1476 igb_set_vf_mac(adapter, i,
1477 mac_addr);
1478 }
1479 } else {
1480 kfree(adapter->vf_data);
1481 adapter->vf_data = NULL;
1482 }
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001483 }
1484 }
1485 }
1486
1487#endif
Alexander Duyck450c87c2009-02-06 23:22:11 +00001488 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001489 err = igb_sw_init(adapter);
1490 if (err)
1491 goto err_sw_init;
1492
1493 igb_get_bus_info_pcie(hw);
1494
1495 hw->phy.autoneg_wait_to_complete = false;
1496 hw->mac.adaptive_ifs = true;
1497
1498 /* Copper options */
1499 if (hw->phy.media_type == e1000_media_type_copper) {
1500 hw->phy.mdix = AUTO_ALL_MODES;
1501 hw->phy.disable_polarity_correction = false;
1502 hw->phy.ms_type = e1000_ms_hw_default;
1503 }
1504
1505 if (igb_check_reset_block(hw))
1506 dev_info(&pdev->dev,
1507 "PHY reset is blocked due to SOL/IDER session.\n");
1508
1509 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001510 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001511 NETIF_F_HW_VLAN_TX |
1512 NETIF_F_HW_VLAN_RX |
1513 NETIF_F_HW_VLAN_FILTER;
1514
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001515 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001516 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001517 netdev->features |= NETIF_F_TSO6;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001518
Herbert Xu5c0999b2009-01-19 15:20:57 -08001519 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001520
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001521 netdev->vlan_features |= NETIF_F_TSO;
1522 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001523 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001524 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001525 netdev->vlan_features |= NETIF_F_SG;
1526
Auke Kok9d5c8242008-01-24 02:22:38 -08001527 if (pci_using_dac)
1528 netdev->features |= NETIF_F_HIGHDMA;
1529
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001530 if (adapter->hw.mac.type == e1000_82576)
1531 netdev->features |= NETIF_F_SCTP_CSUM;
1532
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1534
1535 /* before reading the NVM, reset the controller to put the device in a
1536 * known good starting state */
1537 hw->mac.ops.reset_hw(hw);
1538
1539 /* make sure the NVM is good */
1540 if (igb_validate_nvm_checksum(hw) < 0) {
1541 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1542 err = -EIO;
1543 goto err_eeprom;
1544 }
1545
1546 /* copy the MAC address out of the NVM */
1547 if (hw->mac.ops.read_mac_addr(hw))
1548 dev_err(&pdev->dev, "NVM Read Error\n");
1549
1550 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1551 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1552
1553 if (!is_valid_ether_addr(netdev->perm_addr)) {
1554 dev_err(&pdev->dev, "Invalid MAC Address\n");
1555 err = -EIO;
1556 goto err_eeprom;
1557 }
1558
Alexander Duyck0e340482009-03-20 00:17:08 +00001559 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1560 (unsigned long) adapter);
1561 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1562 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001563
1564 INIT_WORK(&adapter->reset_task, igb_reset_task);
1565 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1566
Alexander Duyck450c87c2009-02-06 23:22:11 +00001567 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001568 adapter->fc_autoneg = true;
1569 hw->mac.autoneg = true;
1570 hw->phy.autoneg_advertised = 0x2f;
1571
Alexander Duyck0cce1192009-07-23 18:10:24 +00001572 hw->fc.requested_mode = e1000_fc_default;
1573 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001574
Auke Kok9d5c8242008-01-24 02:22:38 -08001575 igb_validate_mdi_setting(hw);
1576
Auke Kok9d5c8242008-01-24 02:22:38 -08001577 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1578 * enable the ACPI Magic Packet filter
1579 */
1580
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001581 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001582 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001583 else if (hw->bus.func == 1)
1584 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001585
1586 if (eeprom_data & eeprom_apme_mask)
1587 adapter->eeprom_wol |= E1000_WUFC_MAG;
1588
1589 /* now that we have the eeprom settings, apply the special cases where
1590 * the eeprom may be wrong or the board simply won't support wake on
1591 * lan on a particular port */
1592 switch (pdev->device) {
1593 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1594 adapter->eeprom_wol = 0;
1595 break;
1596 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001597 case E1000_DEV_ID_82576_FIBER:
1598 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001599 /* Wake events only supported on port A for dual fiber
1600 * regardless of eeprom setting */
1601 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1602 adapter->eeprom_wol = 0;
1603 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001604 case E1000_DEV_ID_82576_QUAD_COPPER:
1605 /* if quad port adapter, disable WoL on all but port A */
1606 if (global_quad_port_a != 0)
1607 adapter->eeprom_wol = 0;
1608 else
1609 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1610 /* Reset for multiple quad port adapters */
1611 if (++global_quad_port_a == 4)
1612 global_quad_port_a = 0;
1613 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001614 }
1615
1616 /* initialize the wol settings based on the eeprom settings */
1617 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001618 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001619
1620 /* reset the hardware with the new settings */
1621 igb_reset(adapter);
1622
1623 /* let the f/w know that the h/w is now under the control of the
1624 * driver. */
1625 igb_get_hw_control(adapter);
1626
Auke Kok9d5c8242008-01-24 02:22:38 -08001627 strcpy(netdev->name, "eth%d");
1628 err = register_netdev(netdev);
1629 if (err)
1630 goto err_register;
1631
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001632 /* carrier off reporting is important to ethtool even BEFORE open */
1633 netif_carrier_off(netdev);
1634
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001635#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001636 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001637 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001638 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001639 igb_setup_dca(adapter);
1640 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001641
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001642#endif
1643
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001644 switch (hw->mac.type) {
1645 case e1000_82576:
1646 /*
1647 * Initialize hardware timer: we keep it running just in case
1648 * that some program needs it later on.
1649 */
1650 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1651 adapter->cycles.read = igb_read_clock;
1652 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1653 adapter->cycles.mult = 1;
1654 /**
1655 * Scale the NIC clock cycle by a large factor so that
1656 * relatively small clock corrections can be added or
1657 * substracted at each clock tick. The drawbacks of a large
1658 * factor are a) that the clock register overflows more quickly
1659 * (not such a big deal) and b) that the increment per tick has
1660 * to fit into 24 bits. As a result we need to use a shift of
1661 * 19 so we can fit a value of 16 into the TIMINCA register.
1662 */
1663 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1664 wr32(E1000_TIMINCA,
1665 (1 << E1000_TIMINCA_16NS_SHIFT) |
1666 (16 << IGB_82576_TSYNC_SHIFT));
Patrick Ohly38c845c2009-02-12 05:03:41 +00001667
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001668 /* Set registers so that rollover occurs soon to test this. */
1669 wr32(E1000_SYSTIML, 0x00000000);
1670 wr32(E1000_SYSTIMH, 0xFF800000);
1671 wrfl();
Patrick Ohly33af6bc2009-02-12 05:03:43 +00001672
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001673 timecounter_init(&adapter->clock,
1674 &adapter->cycles,
1675 ktime_to_ns(ktime_get_real()));
1676 /*
1677 * Synchronize our NIC clock against system wall clock. NIC
1678 * time stamp reading requires ~3us per sample, each sample
1679 * was pretty stable even under load => only require 10
1680 * samples for each offset comparison.
1681 */
1682 memset(&adapter->compare, 0, sizeof(adapter->compare));
1683 adapter->compare.source = &adapter->clock;
1684 adapter->compare.target = ktime_get_real;
1685 adapter->compare.num_samples = 10;
1686 timecompare_update(&adapter->compare, 0);
1687 break;
1688 case e1000_82575:
1689 /* 82575 does not support timesync */
1690 default:
1691 break;
Patrick Ohly38c845c2009-02-12 05:03:41 +00001692 }
Patrick Ohly38c845c2009-02-12 05:03:41 +00001693
Auke Kok9d5c8242008-01-24 02:22:38 -08001694 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1695 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001696 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001697 netdev->name,
1698 ((hw->bus.speed == e1000_bus_speed_2500)
1699 ? "2.5Gb/s" : "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001700 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1701 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1702 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1703 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001704 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001705
1706 igb_read_part_num(hw, &part_num);
1707 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1708 (part_num >> 8), (part_num & 0xff));
1709
1710 dev_info(&pdev->dev,
1711 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1712 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001713 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001714 adapter->num_rx_queues, adapter->num_tx_queues);
1715
Auke Kok9d5c8242008-01-24 02:22:38 -08001716 return 0;
1717
1718err_register:
1719 igb_release_hw_control(adapter);
1720err_eeprom:
1721 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001722 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001723
1724 if (hw->flash_address)
1725 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001726err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001727 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001728 iounmap(hw->hw_addr);
1729err_ioremap:
1730 free_netdev(netdev);
1731err_alloc_etherdev:
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001732 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1733 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001734err_pci_reg:
1735err_dma:
1736 pci_disable_device(pdev);
1737 return err;
1738}
1739
1740/**
1741 * igb_remove - Device Removal Routine
1742 * @pdev: PCI device information struct
1743 *
1744 * igb_remove is called by the PCI subsystem to alert the driver
1745 * that it should release a PCI device. The could be caused by a
1746 * Hot-Plug event, or because the driver is going to be removed from
1747 * memory.
1748 **/
1749static void __devexit igb_remove(struct pci_dev *pdev)
1750{
1751 struct net_device *netdev = pci_get_drvdata(pdev);
1752 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001753 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001754
1755 /* flush_scheduled work may reschedule our watchdog task, so
1756 * explicitly disable watchdog tasks from being rescheduled */
1757 set_bit(__IGB_DOWN, &adapter->state);
1758 del_timer_sync(&adapter->watchdog_timer);
1759 del_timer_sync(&adapter->phy_info_timer);
1760
1761 flush_scheduled_work();
1762
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001763#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001764 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001765 dev_info(&pdev->dev, "DCA disabled\n");
1766 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001767 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001768 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001769 }
1770#endif
1771
Auke Kok9d5c8242008-01-24 02:22:38 -08001772 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1773 * would have already happened in close and is redundant. */
1774 igb_release_hw_control(adapter);
1775
1776 unregister_netdev(netdev);
1777
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001778 if (!igb_check_reset_block(&adapter->hw))
1779 igb_reset_phy(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001780
Alexander Duyck047e0032009-10-27 15:49:27 +00001781 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001782
Alexander Duyck37680112009-02-19 20:40:30 -08001783#ifdef CONFIG_PCI_IOV
1784 /* reclaim resources allocated to VFs */
1785 if (adapter->vf_data) {
1786 /* disable iov and allow time for transactions to clear */
1787 pci_disable_sriov(pdev);
1788 msleep(500);
1789
1790 kfree(adapter->vf_data);
1791 adapter->vf_data = NULL;
1792 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1793 msleep(100);
1794 dev_info(&pdev->dev, "IOV Disabled\n");
1795 }
1796#endif
Alexander Duyck28b07592009-02-06 23:20:31 +00001797 iounmap(hw->hw_addr);
1798 if (hw->flash_address)
1799 iounmap(hw->flash_address);
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001800 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1801 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001802
1803 free_netdev(netdev);
1804
Frans Pop19d5afd2009-10-02 10:04:12 -07001805 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001806
Auke Kok9d5c8242008-01-24 02:22:38 -08001807 pci_disable_device(pdev);
1808}
1809
1810/**
1811 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1812 * @adapter: board private structure to initialize
1813 *
1814 * igb_sw_init initializes the Adapter private data structure.
1815 * Fields are initialized based on PCI device information and
1816 * OS network device settings (MTU size).
1817 **/
1818static int __devinit igb_sw_init(struct igb_adapter *adapter)
1819{
1820 struct e1000_hw *hw = &adapter->hw;
1821 struct net_device *netdev = adapter->netdev;
1822 struct pci_dev *pdev = adapter->pdev;
1823
1824 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1825
Alexander Duyck68fd9912008-11-20 00:48:10 -08001826 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1827 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001828 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1829 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1830
Auke Kok9d5c8242008-01-24 02:22:38 -08001831 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1832 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1833
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001834 /* This call may decrease the number of queues depending on
1835 * interrupt mode. */
Alexander Duyck047e0032009-10-27 15:49:27 +00001836 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001837 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1838 return -ENOMEM;
1839 }
1840
1841 /* Explicitly disable IRQ since the NIC can be in any state. */
1842 igb_irq_disable(adapter);
1843
1844 set_bit(__IGB_DOWN, &adapter->state);
1845 return 0;
1846}
1847
1848/**
1849 * igb_open - Called when a network interface is made active
1850 * @netdev: network interface device structure
1851 *
1852 * Returns 0 on success, negative value on failure
1853 *
1854 * The open entry point is called when a network interface is made
1855 * active by the system (IFF_UP). At this point all resources needed
1856 * for transmit and receive operations are allocated, the interrupt
1857 * handler is registered with the OS, the watchdog timer is started,
1858 * and the stack is notified that the interface is ready.
1859 **/
1860static int igb_open(struct net_device *netdev)
1861{
1862 struct igb_adapter *adapter = netdev_priv(netdev);
1863 struct e1000_hw *hw = &adapter->hw;
1864 int err;
1865 int i;
1866
1867 /* disallow open during test */
1868 if (test_bit(__IGB_TESTING, &adapter->state))
1869 return -EBUSY;
1870
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001871 netif_carrier_off(netdev);
1872
Auke Kok9d5c8242008-01-24 02:22:38 -08001873 /* allocate transmit descriptors */
1874 err = igb_setup_all_tx_resources(adapter);
1875 if (err)
1876 goto err_setup_tx;
1877
1878 /* allocate receive descriptors */
1879 err = igb_setup_all_rx_resources(adapter);
1880 if (err)
1881 goto err_setup_rx;
1882
1883 /* e1000_power_up_phy(adapter); */
1884
1885 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1886 if ((adapter->hw.mng_cookie.status &
1887 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1888 igb_update_mng_vlan(adapter);
1889
1890 /* before we allocate an interrupt, we must be ready to handle it.
1891 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1892 * as soon as we call pci_request_irq, so we have to setup our
1893 * clean_rx handler before we do so. */
1894 igb_configure(adapter);
1895
Alexander Duycke1739522009-02-19 20:39:44 -08001896 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1897
Auke Kok9d5c8242008-01-24 02:22:38 -08001898 err = igb_request_irq(adapter);
1899 if (err)
1900 goto err_req_irq;
1901
1902 /* From here on the code is the same as igb_up() */
1903 clear_bit(__IGB_DOWN, &adapter->state);
1904
Alexander Duyck047e0032009-10-27 15:49:27 +00001905 for (i = 0; i < adapter->num_q_vectors; i++) {
1906 struct igb_q_vector *q_vector = adapter->q_vector[i];
1907 napi_enable(&q_vector->napi);
1908 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001909
1910 /* Clear any pending interrupts. */
1911 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001912
1913 igb_irq_enable(adapter);
1914
Alexander Duyckd4960302009-10-27 15:53:45 +00001915 /* notify VFs that reset has been completed */
1916 if (adapter->vfs_allocated_count) {
1917 u32 reg_data = rd32(E1000_CTRL_EXT);
1918 reg_data |= E1000_CTRL_EXT_PFRSTD;
1919 wr32(E1000_CTRL_EXT, reg_data);
1920 }
1921
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07001922 netif_tx_start_all_queues(netdev);
1923
Auke Kok9d5c8242008-01-24 02:22:38 -08001924 /* Fire a link status change interrupt to start the watchdog. */
1925 wr32(E1000_ICS, E1000_ICS_LSC);
1926
1927 return 0;
1928
1929err_req_irq:
1930 igb_release_hw_control(adapter);
1931 /* e1000_power_down_phy(adapter); */
1932 igb_free_all_rx_resources(adapter);
1933err_setup_rx:
1934 igb_free_all_tx_resources(adapter);
1935err_setup_tx:
1936 igb_reset(adapter);
1937
1938 return err;
1939}
1940
1941/**
1942 * igb_close - Disables a network interface
1943 * @netdev: network interface device structure
1944 *
1945 * Returns 0, this is not allowed to fail
1946 *
1947 * The close entry point is called when an interface is de-activated
1948 * by the OS. The hardware is still under the driver's control, but
1949 * needs to be disabled. A global MAC reset is issued to stop the
1950 * hardware, and all transmit and receive resources are freed.
1951 **/
1952static int igb_close(struct net_device *netdev)
1953{
1954 struct igb_adapter *adapter = netdev_priv(netdev);
1955
1956 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1957 igb_down(adapter);
1958
1959 igb_free_irq(adapter);
1960
1961 igb_free_all_tx_resources(adapter);
1962 igb_free_all_rx_resources(adapter);
1963
1964 /* kill manageability vlan ID if supported, but not if a vlan with
1965 * the same ID is registered on the host OS (let 8021q kill it) */
1966 if ((adapter->hw.mng_cookie.status &
1967 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1968 !(adapter->vlgrp &&
1969 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1970 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1971
1972 return 0;
1973}
1974
1975/**
1976 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08001977 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1978 *
1979 * Return 0 on success, negative on failure
1980 **/
Alexander Duyck80785292009-10-27 15:51:47 +00001981int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08001982{
Alexander Duyck80785292009-10-27 15:51:47 +00001983 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001984 int size;
1985
1986 size = sizeof(struct igb_buffer) * tx_ring->count;
1987 tx_ring->buffer_info = vmalloc(size);
1988 if (!tx_ring->buffer_info)
1989 goto err;
1990 memset(tx_ring->buffer_info, 0, size);
1991
1992 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08001993 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08001994 tx_ring->size = ALIGN(tx_ring->size, 4096);
1995
1996 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1997 &tx_ring->dma);
1998
1999 if (!tx_ring->desc)
2000 goto err;
2001
Auke Kok9d5c8242008-01-24 02:22:38 -08002002 tx_ring->next_to_use = 0;
2003 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002004 return 0;
2005
2006err:
2007 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002008 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002009 "Unable to allocate memory for the transmit descriptor ring\n");
2010 return -ENOMEM;
2011}
2012
2013/**
2014 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2015 * (Descriptors) for all queues
2016 * @adapter: board private structure
2017 *
2018 * Return 0 on success, negative on failure
2019 **/
2020static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2021{
2022 int i, err = 0;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07002023 int r_idx;
Auke Kok9d5c8242008-01-24 02:22:38 -08002024
2025 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00002026 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002027 if (err) {
2028 dev_err(&adapter->pdev->dev,
2029 "Allocation for Tx Queue %u failed\n", i);
2030 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002031 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002032 break;
2033 }
2034 }
2035
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07002036 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
2037 r_idx = i % adapter->num_tx_queues;
2038 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002039 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002040 return err;
2041}
2042
2043/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002044 * igb_setup_tctl - configure the transmit control registers
2045 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002046 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002047void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002048{
Auke Kok9d5c8242008-01-24 02:22:38 -08002049 struct e1000_hw *hw = &adapter->hw;
2050 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002051
Alexander Duyck85b430b2009-10-27 15:50:29 +00002052 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2053 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002054
2055 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002056 tctl = rd32(E1000_TCTL);
2057 tctl &= ~E1000_TCTL_CT;
2058 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2059 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2060
2061 igb_config_collision_dist(hw);
2062
Auke Kok9d5c8242008-01-24 02:22:38 -08002063 /* Enable transmits */
2064 tctl |= E1000_TCTL_EN;
2065
2066 wr32(E1000_TCTL, tctl);
2067}
2068
2069/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002070 * igb_configure_tx_ring - Configure transmit ring after Reset
2071 * @adapter: board private structure
2072 * @ring: tx ring to configure
2073 *
2074 * Configure a transmit ring after a reset.
2075 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002076void igb_configure_tx_ring(struct igb_adapter *adapter,
2077 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002078{
2079 struct e1000_hw *hw = &adapter->hw;
2080 u32 txdctl;
2081 u64 tdba = ring->dma;
2082 int reg_idx = ring->reg_idx;
2083
2084 /* disable the queue */
2085 txdctl = rd32(E1000_TXDCTL(reg_idx));
2086 wr32(E1000_TXDCTL(reg_idx),
2087 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2088 wrfl();
2089 mdelay(10);
2090
2091 wr32(E1000_TDLEN(reg_idx),
2092 ring->count * sizeof(union e1000_adv_tx_desc));
2093 wr32(E1000_TDBAL(reg_idx),
2094 tdba & 0x00000000ffffffffULL);
2095 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2096
Alexander Duyckfce99e32009-10-27 15:51:27 +00002097 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2098 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2099 writel(0, ring->head);
2100 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002101
2102 txdctl |= IGB_TX_PTHRESH;
2103 txdctl |= IGB_TX_HTHRESH << 8;
2104 txdctl |= IGB_TX_WTHRESH << 16;
2105
2106 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2107 wr32(E1000_TXDCTL(reg_idx), txdctl);
2108}
2109
2110/**
2111 * igb_configure_tx - Configure transmit Unit after Reset
2112 * @adapter: board private structure
2113 *
2114 * Configure the Tx unit of the MAC after a reset.
2115 **/
2116static void igb_configure_tx(struct igb_adapter *adapter)
2117{
2118 int i;
2119
2120 for (i = 0; i < adapter->num_tx_queues; i++)
2121 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002122}
2123
2124/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002125 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2127 *
2128 * Returns 0 on success, negative on failure
2129 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002130int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002131{
Alexander Duyck80785292009-10-27 15:51:47 +00002132 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002133 int size, desc_len;
2134
2135 size = sizeof(struct igb_buffer) * rx_ring->count;
2136 rx_ring->buffer_info = vmalloc(size);
2137 if (!rx_ring->buffer_info)
2138 goto err;
2139 memset(rx_ring->buffer_info, 0, size);
2140
2141 desc_len = sizeof(union e1000_adv_rx_desc);
2142
2143 /* Round up to nearest 4K */
2144 rx_ring->size = rx_ring->count * desc_len;
2145 rx_ring->size = ALIGN(rx_ring->size, 4096);
2146
2147 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2148 &rx_ring->dma);
2149
2150 if (!rx_ring->desc)
2151 goto err;
2152
2153 rx_ring->next_to_clean = 0;
2154 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002155
Auke Kok9d5c8242008-01-24 02:22:38 -08002156 return 0;
2157
2158err:
2159 vfree(rx_ring->buffer_info);
Alexander Duyck80785292009-10-27 15:51:47 +00002160 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002161 "the receive descriptor ring\n");
2162 return -ENOMEM;
2163}
2164
2165/**
2166 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2167 * (Descriptors) for all queues
2168 * @adapter: board private structure
2169 *
2170 * Return 0 on success, negative on failure
2171 **/
2172static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2173{
2174 int i, err = 0;
2175
2176 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00002177 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002178 if (err) {
2179 dev_err(&adapter->pdev->dev,
2180 "Allocation for Rx Queue %u failed\n", i);
2181 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002182 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002183 break;
2184 }
2185 }
2186
2187 return err;
2188}
2189
2190/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002191 * igb_setup_mrqc - configure the multiple receive queue control registers
2192 * @adapter: Board private structure
2193 **/
2194static void igb_setup_mrqc(struct igb_adapter *adapter)
2195{
2196 struct e1000_hw *hw = &adapter->hw;
2197 u32 mrqc, rxcsum;
2198 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2199 union e1000_reta {
2200 u32 dword;
2201 u8 bytes[4];
2202 } reta;
2203 static const u8 rsshash[40] = {
2204 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2205 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2206 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2207 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2208
2209 /* Fill out hash function seeds */
2210 for (j = 0; j < 10; j++) {
2211 u32 rsskey = rsshash[(j * 4)];
2212 rsskey |= rsshash[(j * 4) + 1] << 8;
2213 rsskey |= rsshash[(j * 4) + 2] << 16;
2214 rsskey |= rsshash[(j * 4) + 3] << 24;
2215 array_wr32(E1000_RSSRK(0), j, rsskey);
2216 }
2217
2218 num_rx_queues = adapter->num_rx_queues;
2219
2220 if (adapter->vfs_allocated_count) {
2221 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2222 switch (hw->mac.type) {
2223 case e1000_82576:
2224 shift = 3;
2225 num_rx_queues = 2;
2226 break;
2227 case e1000_82575:
2228 shift = 2;
2229 shift2 = 6;
2230 default:
2231 break;
2232 }
2233 } else {
2234 if (hw->mac.type == e1000_82575)
2235 shift = 6;
2236 }
2237
2238 for (j = 0; j < (32 * 4); j++) {
2239 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2240 if (shift2)
2241 reta.bytes[j & 3] |= num_rx_queues << shift2;
2242 if ((j & 3) == 3)
2243 wr32(E1000_RETA(j >> 2), reta.dword);
2244 }
2245
2246 /*
2247 * Disable raw packet checksumming so that RSS hash is placed in
2248 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2249 * offloads as they are enabled by default
2250 */
2251 rxcsum = rd32(E1000_RXCSUM);
2252 rxcsum |= E1000_RXCSUM_PCSD;
2253
2254 if (adapter->hw.mac.type >= e1000_82576)
2255 /* Enable Receive Checksum Offload for SCTP */
2256 rxcsum |= E1000_RXCSUM_CRCOFL;
2257
2258 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2259 wr32(E1000_RXCSUM, rxcsum);
2260
2261 /* If VMDq is enabled then we set the appropriate mode for that, else
2262 * we default to RSS so that an RSS hash is calculated per packet even
2263 * if we are only using one queue */
2264 if (adapter->vfs_allocated_count) {
2265 if (hw->mac.type > e1000_82575) {
2266 /* Set the default pool for the PF's first queue */
2267 u32 vtctl = rd32(E1000_VT_CTL);
2268 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2269 E1000_VT_CTL_DISABLE_DEF_POOL);
2270 vtctl |= adapter->vfs_allocated_count <<
2271 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2272 wr32(E1000_VT_CTL, vtctl);
2273 }
2274 if (adapter->num_rx_queues > 1)
2275 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2276 else
2277 mrqc = E1000_MRQC_ENABLE_VMDQ;
2278 } else {
2279 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2280 }
2281 igb_vmm_control(adapter);
2282
2283 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2284 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2285 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2286 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2287 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2288 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2289 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2290 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2291
2292 wr32(E1000_MRQC, mrqc);
2293}
2294
2295/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002296 * igb_setup_rctl - configure the receive control registers
2297 * @adapter: Board private structure
2298 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002299void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002300{
2301 struct e1000_hw *hw = &adapter->hw;
2302 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002303
2304 rctl = rd32(E1000_RCTL);
2305
2306 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002307 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002308
Alexander Duyck69d728b2008-11-25 01:04:03 -08002309 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002310 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002311
Auke Kok87cb7e82008-07-08 15:08:29 -07002312 /*
2313 * enable stripping of CRC. It's unlikely this will break BMC
2314 * redirection as it did with e1000. Newer features require
2315 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002316 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002317 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002318
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002319 /*
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002320 * disable store bad packets and clear size bits.
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002321 */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002322 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002323
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002324 /* enable LPE to prevent packets larger than max_frame_size */
2325 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002326
Alexander Duyck952f72a2009-10-27 15:51:07 +00002327 /* disable queue 0 to prevent tail write w/o re-config */
2328 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002329
Alexander Duycke1739522009-02-19 20:39:44 -08002330 /* Attention!!! For SR-IOV PF driver operations you must enable
2331 * queue drop for all VF and PF queues to prevent head of line blocking
2332 * if an un-trusted VF does not provide descriptors to hardware.
2333 */
2334 if (adapter->vfs_allocated_count) {
2335 u32 vmolr;
2336
Alexander Duycke1739522009-02-19 20:39:44 -08002337 /* set all queue drop enable bits */
2338 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002339
Alexander Duyck77a22942009-05-06 16:43:48 -07002340 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
Alexander Duycke1739522009-02-19 20:39:44 -08002341 if (rctl & E1000_RCTL_LPE)
2342 vmolr |= E1000_VMOLR_LPE;
Alexander Duyck77a22942009-05-06 16:43:48 -07002343 if (adapter->num_rx_queues > 1)
Alexander Duycke1739522009-02-19 20:39:44 -08002344 vmolr |= E1000_VMOLR_RSSE;
Alexander Duyck77a22942009-05-06 16:43:48 -07002345 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
Alexander Duycke1739522009-02-19 20:39:44 -08002346 }
2347
Auke Kok9d5c8242008-01-24 02:22:38 -08002348 wr32(E1000_RCTL, rctl);
2349}
2350
2351/**
Alexander Duycke1739522009-02-19 20:39:44 -08002352 * igb_rlpml_set - set maximum receive packet size
2353 * @adapter: board private structure
2354 *
2355 * Configure maximum receivable packet size.
2356 **/
2357static void igb_rlpml_set(struct igb_adapter *adapter)
2358{
2359 u32 max_frame_size = adapter->max_frame_size;
2360 struct e1000_hw *hw = &adapter->hw;
2361 u16 pf_id = adapter->vfs_allocated_count;
2362
2363 if (adapter->vlgrp)
2364 max_frame_size += VLAN_TAG_SIZE;
2365
2366 /* if vfs are enabled we set RLPML to the largest possible request
2367 * size and set the VMOLR RLPML to the size we need */
2368 if (pf_id) {
2369 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2370 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2371 }
2372
2373 wr32(E1000_RLPML, max_frame_size);
2374}
2375
2376/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002377 * igb_configure_rx_ring - Configure a receive ring after Reset
2378 * @adapter: board private structure
2379 * @ring: receive ring to be configured
2380 *
2381 * Configure the Rx unit of the MAC after a reset.
2382 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002383void igb_configure_rx_ring(struct igb_adapter *adapter,
2384 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002385{
2386 struct e1000_hw *hw = &adapter->hw;
2387 u64 rdba = ring->dma;
2388 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002389 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002390
2391 /* disable the queue */
2392 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2393 wr32(E1000_RXDCTL(reg_idx),
2394 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2395
2396 /* Set DMA base address registers */
2397 wr32(E1000_RDBAL(reg_idx),
2398 rdba & 0x00000000ffffffffULL);
2399 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2400 wr32(E1000_RDLEN(reg_idx),
2401 ring->count * sizeof(union e1000_adv_rx_desc));
2402
2403 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002404 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2405 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2406 writel(0, ring->head);
2407 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002408
Alexander Duyck952f72a2009-10-27 15:51:07 +00002409 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002410 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2411 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002412 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2413#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2414 srrctl |= IGB_RXBUFFER_16384 >>
2415 E1000_SRRCTL_BSIZEPKT_SHIFT;
2416#else
2417 srrctl |= (PAGE_SIZE / 2) >>
2418 E1000_SRRCTL_BSIZEPKT_SHIFT;
2419#endif
2420 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2421 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002422 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002423 E1000_SRRCTL_BSIZEPKT_SHIFT;
2424 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2425 }
2426
2427 wr32(E1000_SRRCTL(reg_idx), srrctl);
2428
Alexander Duyck85b430b2009-10-27 15:50:29 +00002429 /* enable receive descriptor fetching */
2430 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2431 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2432 rxdctl &= 0xFFF00000;
2433 rxdctl |= IGB_RX_PTHRESH;
2434 rxdctl |= IGB_RX_HTHRESH << 8;
2435 rxdctl |= IGB_RX_WTHRESH << 16;
2436 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2437}
2438
2439/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002440 * igb_configure_rx - Configure receive Unit after Reset
2441 * @adapter: board private structure
2442 *
2443 * Configure the Rx unit of the MAC after a reset.
2444 **/
2445static void igb_configure_rx(struct igb_adapter *adapter)
2446{
Hannes Eder91075842009-02-18 19:36:04 -08002447 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002448
Alexander Duyck68d480c2009-10-05 06:33:08 +00002449 /* set UTA to appropriate mode */
2450 igb_set_uta(adapter);
2451
Alexander Duyck26ad9172009-10-05 06:32:49 +00002452 /* set the correct pool for the PF default MAC address in entry 0 */
2453 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2454 adapter->vfs_allocated_count);
2455
Alexander Duyck06cf2662009-10-27 15:53:25 +00002456 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2457 * the Base and Length of the Rx Descriptor Ring */
2458 for (i = 0; i < adapter->num_rx_queues; i++)
2459 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002460}
2461
2462/**
2463 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002464 * @tx_ring: Tx descriptor ring for a specific queue
2465 *
2466 * Free all transmit software resources
2467 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002468void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002469{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002470 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002471
2472 vfree(tx_ring->buffer_info);
2473 tx_ring->buffer_info = NULL;
2474
Alexander Duyck80785292009-10-27 15:51:47 +00002475 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2476 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002477
2478 tx_ring->desc = NULL;
2479}
2480
2481/**
2482 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2483 * @adapter: board private structure
2484 *
2485 * Free all transmit software resources
2486 **/
2487static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2488{
2489 int i;
2490
2491 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002492 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002493}
2494
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002495void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2496 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002497{
Alexander Duyck65689fe2009-03-20 00:17:43 +00002498 buffer_info->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002499 if (buffer_info->skb) {
Alexander Duyck80785292009-10-27 15:51:47 +00002500 skb_dma_unmap(&tx_ring->pdev->dev,
2501 buffer_info->skb,
Alexander Duyck65689fe2009-03-20 00:17:43 +00002502 DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002503 dev_kfree_skb_any(buffer_info->skb);
2504 buffer_info->skb = NULL;
2505 }
2506 buffer_info->time_stamp = 0;
2507 /* buffer_info must be completely set up in the transmit path */
2508}
2509
2510/**
2511 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002512 * @tx_ring: ring to be cleaned
2513 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002514static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002515{
2516 struct igb_buffer *buffer_info;
2517 unsigned long size;
2518 unsigned int i;
2519
2520 if (!tx_ring->buffer_info)
2521 return;
2522 /* Free all the Tx ring sk_buffs */
2523
2524 for (i = 0; i < tx_ring->count; i++) {
2525 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002526 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002527 }
2528
2529 size = sizeof(struct igb_buffer) * tx_ring->count;
2530 memset(tx_ring->buffer_info, 0, size);
2531
2532 /* Zero out the descriptor ring */
2533
2534 memset(tx_ring->desc, 0, tx_ring->size);
2535
2536 tx_ring->next_to_use = 0;
2537 tx_ring->next_to_clean = 0;
2538
Alexander Duyckfce99e32009-10-27 15:51:27 +00002539 writel(0, tx_ring->head);
2540 writel(0, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08002541}
2542
2543/**
2544 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2545 * @adapter: board private structure
2546 **/
2547static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2548{
2549 int i;
2550
2551 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002552 igb_clean_tx_ring(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002553}
2554
2555/**
2556 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002557 * @rx_ring: ring to clean the resources from
2558 *
2559 * Free all receive software resources
2560 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002561void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002562{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002563 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002564
2565 vfree(rx_ring->buffer_info);
2566 rx_ring->buffer_info = NULL;
2567
Alexander Duyck80785292009-10-27 15:51:47 +00002568 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2569 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002570
2571 rx_ring->desc = NULL;
2572}
2573
2574/**
2575 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2576 * @adapter: board private structure
2577 *
2578 * Free all receive software resources
2579 **/
2580static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2581{
2582 int i;
2583
2584 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002585 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002586}
2587
2588/**
2589 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002590 * @rx_ring: ring to free buffers from
2591 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002592static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002593{
2594 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002595 unsigned long size;
2596 unsigned int i;
2597
2598 if (!rx_ring->buffer_info)
2599 return;
2600 /* Free all the Rx ring sk_buffs */
2601 for (i = 0; i < rx_ring->count; i++) {
2602 buffer_info = &rx_ring->buffer_info[i];
2603 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002604 pci_unmap_single(rx_ring->pdev,
2605 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002606 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002607 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002608 buffer_info->dma = 0;
2609 }
2610
2611 if (buffer_info->skb) {
2612 dev_kfree_skb(buffer_info->skb);
2613 buffer_info->skb = NULL;
2614 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002615 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002616 pci_unmap_page(rx_ring->pdev,
2617 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002618 PAGE_SIZE / 2,
2619 PCI_DMA_FROMDEVICE);
2620 buffer_info->page_dma = 0;
2621 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002622 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002623 put_page(buffer_info->page);
2624 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002625 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002626 }
2627 }
2628
Auke Kok9d5c8242008-01-24 02:22:38 -08002629 size = sizeof(struct igb_buffer) * rx_ring->count;
2630 memset(rx_ring->buffer_info, 0, size);
2631
2632 /* Zero out the descriptor ring */
2633 memset(rx_ring->desc, 0, rx_ring->size);
2634
2635 rx_ring->next_to_clean = 0;
2636 rx_ring->next_to_use = 0;
2637
Alexander Duyckfce99e32009-10-27 15:51:27 +00002638 writel(0, rx_ring->head);
2639 writel(0, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08002640}
2641
2642/**
2643 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2644 * @adapter: board private structure
2645 **/
2646static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2647{
2648 int i;
2649
2650 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002651 igb_clean_rx_ring(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002652}
2653
2654/**
2655 * igb_set_mac - Change the Ethernet Address of the NIC
2656 * @netdev: network interface device structure
2657 * @p: pointer to an address structure
2658 *
2659 * Returns 0 on success, negative on failure
2660 **/
2661static int igb_set_mac(struct net_device *netdev, void *p)
2662{
2663 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002664 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 struct sockaddr *addr = p;
2666
2667 if (!is_valid_ether_addr(addr->sa_data))
2668 return -EADDRNOTAVAIL;
2669
2670 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002671 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002672
Alexander Duyck26ad9172009-10-05 06:32:49 +00002673 /* set the correct pool for the new PF MAC address in entry 0 */
2674 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2675 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002676
Auke Kok9d5c8242008-01-24 02:22:38 -08002677 return 0;
2678}
2679
2680/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002681 * igb_write_mc_addr_list - write multicast addresses to MTA
2682 * @netdev: network interface device structure
2683 *
2684 * Writes multicast address list to the MTA hash table.
2685 * Returns: -ENOMEM on failure
2686 * 0 on no addresses written
2687 * X on writing X addresses to MTA
2688 **/
2689static int igb_write_mc_addr_list(struct net_device *netdev)
2690{
2691 struct igb_adapter *adapter = netdev_priv(netdev);
2692 struct e1000_hw *hw = &adapter->hw;
2693 struct dev_mc_list *mc_ptr = netdev->mc_list;
2694 u8 *mta_list;
2695 u32 vmolr = 0;
2696 int i;
2697
2698 if (!netdev->mc_count) {
2699 /* nothing to program, so clear mc list */
2700 igb_update_mc_addr_list(hw, NULL, 0);
2701 igb_restore_vf_multicasts(adapter);
2702 return 0;
2703 }
2704
2705 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2706 if (!mta_list)
2707 return -ENOMEM;
2708
2709 /* set vmolr receive overflow multicast bit */
2710 vmolr |= E1000_VMOLR_ROMPE;
2711
2712 /* The shared function expects a packed array of only addresses. */
2713 mc_ptr = netdev->mc_list;
2714
2715 for (i = 0; i < netdev->mc_count; i++) {
2716 if (!mc_ptr)
2717 break;
2718 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2719 mc_ptr = mc_ptr->next;
2720 }
2721 igb_update_mc_addr_list(hw, mta_list, i);
2722 kfree(mta_list);
2723
2724 return netdev->mc_count;
2725}
2726
2727/**
2728 * igb_write_uc_addr_list - write unicast addresses to RAR table
2729 * @netdev: network interface device structure
2730 *
2731 * Writes unicast address list to the RAR table.
2732 * Returns: -ENOMEM on failure/insufficient address space
2733 * 0 on no addresses written
2734 * X on writing X addresses to the RAR table
2735 **/
2736static int igb_write_uc_addr_list(struct net_device *netdev)
2737{
2738 struct igb_adapter *adapter = netdev_priv(netdev);
2739 struct e1000_hw *hw = &adapter->hw;
2740 unsigned int vfn = adapter->vfs_allocated_count;
2741 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2742 int count = 0;
2743
2744 /* return ENOMEM indicating insufficient memory for addresses */
2745 if (netdev->uc.count > rar_entries)
2746 return -ENOMEM;
2747
2748 if (netdev->uc.count && rar_entries) {
2749 struct netdev_hw_addr *ha;
2750 list_for_each_entry(ha, &netdev->uc.list, list) {
2751 if (!rar_entries)
2752 break;
2753 igb_rar_set_qsel(adapter, ha->addr,
2754 rar_entries--,
2755 vfn);
2756 count++;
2757 }
2758 }
2759 /* write the addresses in reverse order to avoid write combining */
2760 for (; rar_entries > 0 ; rar_entries--) {
2761 wr32(E1000_RAH(rar_entries), 0);
2762 wr32(E1000_RAL(rar_entries), 0);
2763 }
2764 wrfl();
2765
2766 return count;
2767}
2768
2769/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002770 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002771 * @netdev: network interface device structure
2772 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002773 * The set_rx_mode entry point is called whenever the unicast or multicast
2774 * address lists or the network interface flags are updated. This routine is
2775 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 * promiscuous mode, and all-multi behavior.
2777 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002778static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002779{
2780 struct igb_adapter *adapter = netdev_priv(netdev);
2781 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002782 unsigned int vfn = adapter->vfs_allocated_count;
2783 u32 rctl, vmolr = 0;
2784 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002785
2786 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002787 rctl = rd32(E1000_RCTL);
2788
Alexander Duyck68d480c2009-10-05 06:33:08 +00002789 /* clear the effected bits */
2790 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2791
Patrick McHardy746b9f02008-07-16 20:15:45 -07002792 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002793 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002794 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002795 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002796 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002797 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002798 vmolr |= E1000_VMOLR_MPME;
2799 } else {
2800 /*
2801 * Write addresses to the MTA, if the attempt fails
2802 * then we should just turn on promiscous mode so
2803 * that we can at least receive multicast traffic
2804 */
2805 count = igb_write_mc_addr_list(netdev);
2806 if (count < 0) {
2807 rctl |= E1000_RCTL_MPE;
2808 vmolr |= E1000_VMOLR_MPME;
2809 } else if (count) {
2810 vmolr |= E1000_VMOLR_ROMPE;
2811 }
2812 }
2813 /*
2814 * Write addresses to available RAR registers, if there is not
2815 * sufficient space to store all the addresses then enable
2816 * unicast promiscous mode
2817 */
2818 count = igb_write_uc_addr_list(netdev);
2819 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002820 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002821 vmolr |= E1000_VMOLR_ROPE;
2822 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07002823 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07002824 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002825 wr32(E1000_RCTL, rctl);
2826
Alexander Duyck68d480c2009-10-05 06:33:08 +00002827 /*
2828 * In order to support SR-IOV and eventually VMDq it is necessary to set
2829 * the VMOLR to enable the appropriate modes. Without this workaround
2830 * we will have issues with VLAN tag stripping not being done for frames
2831 * that are only arriving because we are the default pool
2832 */
2833 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002834 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002835
Alexander Duyck68d480c2009-10-05 06:33:08 +00002836 vmolr |= rd32(E1000_VMOLR(vfn)) &
2837 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2838 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002839 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002840}
2841
2842/* Need to wait a few seconds after link up to get diagnostic information from
2843 * the phy */
2844static void igb_update_phy_info(unsigned long data)
2845{
2846 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002847 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002848}
2849
2850/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002851 * igb_has_link - check shared code for link and determine up/down
2852 * @adapter: pointer to driver private info
2853 **/
2854static bool igb_has_link(struct igb_adapter *adapter)
2855{
2856 struct e1000_hw *hw = &adapter->hw;
2857 bool link_active = false;
2858 s32 ret_val = 0;
2859
2860 /* get_link_status is set on LSC (link status) interrupt or
2861 * rx sequence error interrupt. get_link_status will stay
2862 * false until the e1000_check_for_link establishes link
2863 * for copper adapters ONLY
2864 */
2865 switch (hw->phy.media_type) {
2866 case e1000_media_type_copper:
2867 if (hw->mac.get_link_status) {
2868 ret_val = hw->mac.ops.check_for_link(hw);
2869 link_active = !hw->mac.get_link_status;
2870 } else {
2871 link_active = true;
2872 }
2873 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002874 case e1000_media_type_internal_serdes:
2875 ret_val = hw->mac.ops.check_for_link(hw);
2876 link_active = hw->mac.serdes_has_link;
2877 break;
2878 default:
2879 case e1000_media_type_unknown:
2880 break;
2881 }
2882
2883 return link_active;
2884}
2885
2886/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002887 * igb_watchdog - Timer Call-back
2888 * @data: pointer to adapter cast into an unsigned long
2889 **/
2890static void igb_watchdog(unsigned long data)
2891{
2892 struct igb_adapter *adapter = (struct igb_adapter *)data;
2893 /* Do the rest outside of interrupt context */
2894 schedule_work(&adapter->watchdog_task);
2895}
2896
2897static void igb_watchdog_task(struct work_struct *work)
2898{
2899 struct igb_adapter *adapter = container_of(work,
2900 struct igb_adapter, watchdog_task);
2901 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002902 struct net_device *netdev = adapter->netdev;
2903 struct igb_ring *tx_ring = adapter->tx_ring;
Auke Kok9d5c8242008-01-24 02:22:38 -08002904 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07002905 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002906
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002907 link = igb_has_link(adapter);
2908 if ((netif_carrier_ok(netdev)) && link)
Auke Kok9d5c8242008-01-24 02:22:38 -08002909 goto link_up;
2910
Auke Kok9d5c8242008-01-24 02:22:38 -08002911 if (link) {
2912 if (!netif_carrier_ok(netdev)) {
2913 u32 ctrl;
2914 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2915 &adapter->link_speed,
2916 &adapter->link_duplex);
2917
2918 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08002919 /* Links status message must follow this format */
2920 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08002921 "Flow Control: %s\n",
Alexander Duyck527d47c2008-11-27 00:21:39 -08002922 netdev->name,
Auke Kok9d5c8242008-01-24 02:22:38 -08002923 adapter->link_speed,
2924 adapter->link_duplex == FULL_DUPLEX ?
2925 "Full Duplex" : "Half Duplex",
2926 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2927 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2928 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2929 E1000_CTRL_TFCE) ? "TX" : "None")));
2930
2931 /* tweak tx_queue_len according to speed/duplex and
2932 * adjust the timeout factor */
2933 netdev->tx_queue_len = adapter->tx_queue_len;
2934 adapter->tx_timeout_factor = 1;
2935 switch (adapter->link_speed) {
2936 case SPEED_10:
2937 netdev->tx_queue_len = 10;
2938 adapter->tx_timeout_factor = 14;
2939 break;
2940 case SPEED_100:
2941 netdev->tx_queue_len = 100;
2942 /* maybe add some timeout factor ? */
2943 break;
2944 }
2945
2946 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002947
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002948 igb_ping_all_vfs(adapter);
2949
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002950 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08002951 if (!test_bit(__IGB_DOWN, &adapter->state))
2952 mod_timer(&adapter->phy_info_timer,
2953 round_jiffies(jiffies + 2 * HZ));
2954 }
2955 } else {
2956 if (netif_carrier_ok(netdev)) {
2957 adapter->link_speed = 0;
2958 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08002959 /* Links status message must follow this format */
2960 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2961 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002962 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002963
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002964 igb_ping_all_vfs(adapter);
2965
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002966 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08002967 if (!test_bit(__IGB_DOWN, &adapter->state))
2968 mod_timer(&adapter->phy_info_timer,
2969 round_jiffies(jiffies + 2 * HZ));
2970 }
2971 }
2972
2973link_up:
2974 igb_update_stats(adapter);
2975
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002976 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08002977 adapter->tpt_old = adapter->stats.tpt;
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002978 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08002979 adapter->colc_old = adapter->stats.colc;
2980
2981 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2982 adapter->gorc_old = adapter->stats.gorc;
2983 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2984 adapter->gotc_old = adapter->stats.gotc;
2985
2986 igb_update_adaptive(&adapter->hw);
2987
2988 if (!netif_carrier_ok(netdev)) {
Alexander Duyckc493ea42009-03-20 00:16:50 +00002989 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002990 /* We've lost link, so the controller stops DMA,
2991 * but we've got queued Tx work that's never going
2992 * to get done, so reset controller to flush Tx.
2993 * (Do the reset outside of interrupt context). */
2994 adapter->tx_timeout_count++;
2995 schedule_work(&adapter->reset_task);
Jesse Brandeburgc2d5ab42009-05-07 11:07:35 +00002996 /* return immediately since reset is imminent */
2997 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08002998 }
2999 }
3000
3001 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003002 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003003 u32 eics = 0;
3004 for (i = 0; i < adapter->num_q_vectors; i++) {
3005 struct igb_q_vector *q_vector = adapter->q_vector[i];
3006 eics |= q_vector->eims_value;
3007 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003008 wr32(E1000_EICS, eics);
3009 } else {
3010 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3011 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003012
3013 /* Force detection of hung controller every watchdog period */
3014 tx_ring->detect_tx_hung = true;
3015
3016 /* Reset the timer */
3017 if (!test_bit(__IGB_DOWN, &adapter->state))
3018 mod_timer(&adapter->watchdog_timer,
3019 round_jiffies(jiffies + 2 * HZ));
3020}
3021
3022enum latency_range {
3023 lowest_latency = 0,
3024 low_latency = 1,
3025 bulk_latency = 2,
3026 latency_invalid = 255
3027};
3028
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003029/**
3030 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3031 *
3032 * Stores a new ITR value based on strictly on packet size. This
3033 * algorithm is less sophisticated than that used in igb_update_itr,
3034 * due to the difficulty of synchronizing statistics across multiple
3035 * receive rings. The divisors and thresholds used by this fuction
3036 * were determined based on theoretical maximum wire speed and testing
3037 * data, in order to minimize response time while increasing bulk
3038 * throughput.
3039 * This functionality is controlled by the InterruptThrottleRate module
3040 * parameter (see igb_param.c)
3041 * NOTE: This function is called only when operating in a multiqueue
3042 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003043 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003044 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003045static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003046{
Alexander Duyck047e0032009-10-27 15:49:27 +00003047 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003048 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003049 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003050
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003051 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3052 * ints/sec - ITR timer value of 120 ticks.
3053 */
3054 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003055 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003056 goto set_itr_val;
3057 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003058
3059 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3060 struct igb_ring *ring = q_vector->rx_ring;
3061 avg_wire_size = ring->total_bytes / ring->total_packets;
3062 }
3063
3064 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3065 struct igb_ring *ring = q_vector->tx_ring;
3066 avg_wire_size = max_t(u32, avg_wire_size,
3067 (ring->total_bytes /
3068 ring->total_packets));
3069 }
3070
3071 /* if avg_wire_size isn't set no work was done */
3072 if (!avg_wire_size)
3073 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003074
3075 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3076 avg_wire_size += 24;
3077
3078 /* Don't starve jumbo frames */
3079 avg_wire_size = min(avg_wire_size, 3000);
3080
3081 /* Give a little boost to mid-size frames */
3082 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3083 new_val = avg_wire_size / 3;
3084 else
3085 new_val = avg_wire_size / 2;
3086
3087set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003088 if (new_val != q_vector->itr_val) {
3089 q_vector->itr_val = new_val;
3090 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003091 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003092clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003093 if (q_vector->rx_ring) {
3094 q_vector->rx_ring->total_bytes = 0;
3095 q_vector->rx_ring->total_packets = 0;
3096 }
3097 if (q_vector->tx_ring) {
3098 q_vector->tx_ring->total_bytes = 0;
3099 q_vector->tx_ring->total_packets = 0;
3100 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003101}
3102
3103/**
3104 * igb_update_itr - update the dynamic ITR value based on statistics
3105 * Stores a new ITR value based on packets and byte
3106 * counts during the last interrupt. The advantage of per interrupt
3107 * computation is faster updates and more accurate ITR for the current
3108 * traffic pattern. Constants in this function were computed
3109 * based on theoretical maximum wire speed and thresholds were set based
3110 * on testing data as well as attempting to minimize response time
3111 * while increasing bulk throughput.
3112 * this functionality is controlled by the InterruptThrottleRate module
3113 * parameter (see igb_param.c)
3114 * NOTE: These calculations are only valid when operating in a single-
3115 * queue environment.
3116 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003117 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003118 * @packets: the number of packets during this measurement interval
3119 * @bytes: the number of bytes during this measurement interval
3120 **/
3121static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3122 int packets, int bytes)
3123{
3124 unsigned int retval = itr_setting;
3125
3126 if (packets == 0)
3127 goto update_itr_done;
3128
3129 switch (itr_setting) {
3130 case lowest_latency:
3131 /* handle TSO and jumbo frames */
3132 if (bytes/packets > 8000)
3133 retval = bulk_latency;
3134 else if ((packets < 5) && (bytes > 512))
3135 retval = low_latency;
3136 break;
3137 case low_latency: /* 50 usec aka 20000 ints/s */
3138 if (bytes > 10000) {
3139 /* this if handles the TSO accounting */
3140 if (bytes/packets > 8000) {
3141 retval = bulk_latency;
3142 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3143 retval = bulk_latency;
3144 } else if ((packets > 35)) {
3145 retval = lowest_latency;
3146 }
3147 } else if (bytes/packets > 2000) {
3148 retval = bulk_latency;
3149 } else if (packets <= 2 && bytes < 512) {
3150 retval = lowest_latency;
3151 }
3152 break;
3153 case bulk_latency: /* 250 usec aka 4000 ints/s */
3154 if (bytes > 25000) {
3155 if (packets > 35)
3156 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003157 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003158 retval = low_latency;
3159 }
3160 break;
3161 }
3162
3163update_itr_done:
3164 return retval;
3165}
3166
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003167static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003168{
Alexander Duyck047e0032009-10-27 15:49:27 +00003169 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003170 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003171 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003172
3173 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3174 if (adapter->link_speed != SPEED_1000) {
3175 current_itr = 0;
3176 new_itr = 4000;
3177 goto set_itr_now;
3178 }
3179
3180 adapter->rx_itr = igb_update_itr(adapter,
3181 adapter->rx_itr,
3182 adapter->rx_ring->total_packets,
3183 adapter->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003184
Alexander Duyck047e0032009-10-27 15:49:27 +00003185 adapter->tx_itr = igb_update_itr(adapter,
3186 adapter->tx_itr,
3187 adapter->tx_ring->total_packets,
3188 adapter->tx_ring->total_bytes);
3189 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003190
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003191 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003192 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003193 current_itr = low_latency;
3194
Auke Kok9d5c8242008-01-24 02:22:38 -08003195 switch (current_itr) {
3196 /* counts and packets in update_itr are dependent on these numbers */
3197 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003198 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003199 break;
3200 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003201 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003202 break;
3203 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003204 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003205 break;
3206 default:
3207 break;
3208 }
3209
3210set_itr_now:
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003211 adapter->rx_ring->total_bytes = 0;
3212 adapter->rx_ring->total_packets = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003213 adapter->tx_ring->total_bytes = 0;
3214 adapter->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003215
Alexander Duyck047e0032009-10-27 15:49:27 +00003216 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003217 /* this attempts to bias the interrupt rate towards Bulk
3218 * by adding intermediate steps when interrupt rate is
3219 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003220 new_itr = new_itr > q_vector->itr_val ?
3221 max((new_itr * q_vector->itr_val) /
3222 (new_itr + (q_vector->itr_val >> 2)),
3223 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003224 new_itr;
3225 /* Don't write the value here; it resets the adapter's
3226 * internal timer, and causes us to delay far longer than
3227 * we should between interrupts. Instead, we write the ITR
3228 * value at the beginning of the next interrupt so the timing
3229 * ends up being correct.
3230 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003231 q_vector->itr_val = new_itr;
3232 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003233 }
3234
3235 return;
3236}
3237
Auke Kok9d5c8242008-01-24 02:22:38 -08003238#define IGB_TX_FLAGS_CSUM 0x00000001
3239#define IGB_TX_FLAGS_VLAN 0x00000002
3240#define IGB_TX_FLAGS_TSO 0x00000004
3241#define IGB_TX_FLAGS_IPV4 0x00000008
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003242#define IGB_TX_FLAGS_TSTAMP 0x00000010
Auke Kok9d5c8242008-01-24 02:22:38 -08003243#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3244#define IGB_TX_FLAGS_VLAN_SHIFT 16
3245
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003246static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003247 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3248{
3249 struct e1000_adv_tx_context_desc *context_desc;
3250 unsigned int i;
3251 int err;
3252 struct igb_buffer *buffer_info;
3253 u32 info = 0, tu_cmd = 0;
3254 u32 mss_l4len_idx, l4len;
3255 *hdr_len = 0;
3256
3257 if (skb_header_cloned(skb)) {
3258 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3259 if (err)
3260 return err;
3261 }
3262
3263 l4len = tcp_hdrlen(skb);
3264 *hdr_len += l4len;
3265
3266 if (skb->protocol == htons(ETH_P_IP)) {
3267 struct iphdr *iph = ip_hdr(skb);
3268 iph->tot_len = 0;
3269 iph->check = 0;
3270 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3271 iph->daddr, 0,
3272 IPPROTO_TCP,
3273 0);
3274 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3275 ipv6_hdr(skb)->payload_len = 0;
3276 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3277 &ipv6_hdr(skb)->daddr,
3278 0, IPPROTO_TCP, 0);
3279 }
3280
3281 i = tx_ring->next_to_use;
3282
3283 buffer_info = &tx_ring->buffer_info[i];
3284 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3285 /* VLAN MACLEN IPLEN */
3286 if (tx_flags & IGB_TX_FLAGS_VLAN)
3287 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3288 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3289 *hdr_len += skb_network_offset(skb);
3290 info |= skb_network_header_len(skb);
3291 *hdr_len += skb_network_header_len(skb);
3292 context_desc->vlan_macip_lens = cpu_to_le32(info);
3293
3294 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3295 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3296
3297 if (skb->protocol == htons(ETH_P_IP))
3298 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3299 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3300
3301 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3302
3303 /* MSS L4LEN IDX */
3304 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3305 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3306
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003307 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003308 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3309 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003310
3311 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3312 context_desc->seqnum_seed = 0;
3313
3314 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003315 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003316 buffer_info->dma = 0;
3317 i++;
3318 if (i == tx_ring->count)
3319 i = 0;
3320
3321 tx_ring->next_to_use = i;
3322
3323 return true;
3324}
3325
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003326static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3327 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003328{
3329 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003330 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003331 struct igb_buffer *buffer_info;
3332 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003333 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003334
3335 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3336 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3337 i = tx_ring->next_to_use;
3338 buffer_info = &tx_ring->buffer_info[i];
3339 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3340
3341 if (tx_flags & IGB_TX_FLAGS_VLAN)
3342 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3343 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3344 if (skb->ip_summed == CHECKSUM_PARTIAL)
3345 info |= skb_network_header_len(skb);
3346
3347 context_desc->vlan_macip_lens = cpu_to_le32(info);
3348
3349 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3350
3351 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003352 __be16 protocol;
3353
3354 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3355 const struct vlan_ethhdr *vhdr =
3356 (const struct vlan_ethhdr*)skb->data;
3357
3358 protocol = vhdr->h_vlan_encapsulated_proto;
3359 } else {
3360 protocol = skb->protocol;
3361 }
3362
3363 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003364 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003365 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003366 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3367 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003368 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3369 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003370 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003371 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003372 /* XXX what about other V6 headers?? */
3373 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3374 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003375 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3376 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003377 break;
3378 default:
3379 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003380 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003381 "partial checksum but proto=%x!\n",
3382 skb->protocol);
3383 break;
3384 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003385 }
3386
3387 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3388 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003389 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003390 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003391 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003392
3393 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003394 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003395 buffer_info->dma = 0;
3396
3397 i++;
3398 if (i == tx_ring->count)
3399 i = 0;
3400 tx_ring->next_to_use = i;
3401
3402 return true;
3403 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003404 return false;
3405}
3406
3407#define IGB_MAX_TXD_PWR 16
3408#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3409
Alexander Duyck80785292009-10-27 15:51:47 +00003410static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003411 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003412{
3413 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003414 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003415 unsigned int len = skb_headlen(skb);
3416 unsigned int count = 0, i;
3417 unsigned int f;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003418 dma_addr_t *map;
Auke Kok9d5c8242008-01-24 02:22:38 -08003419
3420 i = tx_ring->next_to_use;
3421
Alexander Duyck80785292009-10-27 15:51:47 +00003422 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3423 dev_err(&pdev->dev, "TX DMA map failed\n");
Alexander Duyck65689fe2009-03-20 00:17:43 +00003424 return 0;
3425 }
3426
3427 map = skb_shinfo(skb)->dma_maps;
3428
Auke Kok9d5c8242008-01-24 02:22:38 -08003429 buffer_info = &tx_ring->buffer_info[i];
3430 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3431 buffer_info->length = len;
3432 /* set time_stamp *before* dma to help avoid a possible race */
3433 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003434 buffer_info->next_to_watch = i;
Eric Dumazet042a53a2009-06-05 04:04:16 +00003435 buffer_info->dma = skb_shinfo(skb)->dma_head;
Auke Kok9d5c8242008-01-24 02:22:38 -08003436
3437 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3438 struct skb_frag_struct *frag;
3439
Alexander Duyck65689fe2009-03-20 00:17:43 +00003440 i++;
3441 if (i == tx_ring->count)
3442 i = 0;
3443
Auke Kok9d5c8242008-01-24 02:22:38 -08003444 frag = &skb_shinfo(skb)->frags[f];
3445 len = frag->size;
3446
3447 buffer_info = &tx_ring->buffer_info[i];
3448 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3449 buffer_info->length = len;
3450 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003451 buffer_info->next_to_watch = i;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003452 buffer_info->dma = map[count];
Auke Kok9d5c8242008-01-24 02:22:38 -08003453 count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003454 }
3455
Auke Kok9d5c8242008-01-24 02:22:38 -08003456 tx_ring->buffer_info[i].skb = skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003457 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003458
Eric Dumazet042a53a2009-06-05 04:04:16 +00003459 return count + 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003460}
3461
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003462static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003463 int tx_flags, int count, u32 paylen,
3464 u8 hdr_len)
3465{
3466 union e1000_adv_tx_desc *tx_desc = NULL;
3467 struct igb_buffer *buffer_info;
3468 u32 olinfo_status = 0, cmd_type_len;
3469 unsigned int i;
3470
3471 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3472 E1000_ADVTXD_DCMD_DEXT);
3473
3474 if (tx_flags & IGB_TX_FLAGS_VLAN)
3475 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3476
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003477 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3478 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3479
Auke Kok9d5c8242008-01-24 02:22:38 -08003480 if (tx_flags & IGB_TX_FLAGS_TSO) {
3481 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3482
3483 /* insert tcp checksum */
3484 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3485
3486 /* insert ip checksum */
3487 if (tx_flags & IGB_TX_FLAGS_IPV4)
3488 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3489
3490 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3491 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3492 }
3493
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003494 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3495 (tx_flags & (IGB_TX_FLAGS_CSUM |
3496 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003497 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003498 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003499
3500 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3501
3502 i = tx_ring->next_to_use;
3503 while (count--) {
3504 buffer_info = &tx_ring->buffer_info[i];
3505 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3506 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3507 tx_desc->read.cmd_type_len =
3508 cpu_to_le32(cmd_type_len | buffer_info->length);
3509 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3510 i++;
3511 if (i == tx_ring->count)
3512 i = 0;
3513 }
3514
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003515 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003516 /* Force memory writes to complete before letting h/w
3517 * know there are new descriptors to fetch. (Only
3518 * applicable for weak-ordered memory model archs,
3519 * such as IA-64). */
3520 wmb();
3521
3522 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003523 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003524 /* we need this if more than one processor can write to our tail
3525 * at a time, it syncronizes IO on IA64/Altix systems */
3526 mmiowb();
3527}
3528
Alexander Duycke694e962009-10-27 15:53:06 +00003529static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003530{
Alexander Duycke694e962009-10-27 15:53:06 +00003531 struct net_device *netdev = tx_ring->netdev;
3532
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003533 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003534
Auke Kok9d5c8242008-01-24 02:22:38 -08003535 /* Herbert's original patch had:
3536 * smp_mb__after_netif_stop_queue();
3537 * but since that doesn't exist yet, just open code it. */
3538 smp_mb();
3539
3540 /* We need to check again in a case another CPU has just
3541 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003542 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003543 return -EBUSY;
3544
3545 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003546 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003547 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003548 return 0;
3549}
3550
Alexander Duycke694e962009-10-27 15:53:06 +00003551static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003552{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003553 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003554 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003555 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003556}
3557
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003558netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3559 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003560{
Alexander Duycke694e962009-10-27 15:53:06 +00003561 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003562 unsigned int first;
Auke Kok9d5c8242008-01-24 02:22:38 -08003563 unsigned int tx_flags = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003564 u8 hdr_len = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003565 int count = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003566 int tso = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003567 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003568
Auke Kok9d5c8242008-01-24 02:22:38 -08003569 /* need: 1 descriptor per page,
3570 * + 2 desc gap to keep tail from touching head,
3571 * + 1 desc for skb->data,
3572 * + 1 desc for context descriptor,
3573 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003574 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003575 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003576 return NETDEV_TX_BUSY;
3577 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003578
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003579 if (unlikely(shtx->hardware)) {
3580 shtx->in_progress = 1;
3581 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003582 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003583
3584 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3585 tx_flags |= IGB_TX_FLAGS_VLAN;
3586 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3587 }
3588
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003589 if (skb->protocol == htons(ETH_P_IP))
3590 tx_flags |= IGB_TX_FLAGS_IPV4;
3591
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003592 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003593 if (skb_is_gso(skb)) {
3594 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3595 if (tso < 0) {
3596 dev_kfree_skb_any(skb);
3597 return NETDEV_TX_OK;
3598 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003599 }
3600
3601 if (tso)
3602 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003603 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003604 (skb->ip_summed == CHECKSUM_PARTIAL))
3605 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003606
Alexander Duyck65689fe2009-03-20 00:17:43 +00003607 /*
3608 * count reflects descriptors mapped, if 0 then mapping error
3609 * has occured and we need to rewind the descriptor queue
3610 */
Alexander Duyck80785292009-10-27 15:51:47 +00003611 count = igb_tx_map_adv(tx_ring, skb, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08003612
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003613 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003614 dev_kfree_skb_any(skb);
3615 tx_ring->buffer_info[first].time_stamp = 0;
3616 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003617 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003618 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003619
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003620 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3621
3622 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003623 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003624
Auke Kok9d5c8242008-01-24 02:22:38 -08003625 return NETDEV_TX_OK;
3626}
3627
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003628static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3629 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003630{
3631 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003632 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003633 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003634
3635 if (test_bit(__IGB_DOWN, &adapter->state)) {
3636 dev_kfree_skb_any(skb);
3637 return NETDEV_TX_OK;
3638 }
3639
3640 if (skb->len <= 0) {
3641 dev_kfree_skb_any(skb);
3642 return NETDEV_TX_OK;
3643 }
3644
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003645 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003646 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003647
3648 /* This goes back to the question of how to logically map a tx queue
3649 * to a flow. Right now, performance is impacted slightly negatively
3650 * if using multiple tx queues. If the stack breaks away from a
3651 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003652 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003653}
3654
3655/**
3656 * igb_tx_timeout - Respond to a Tx Hang
3657 * @netdev: network interface device structure
3658 **/
3659static void igb_tx_timeout(struct net_device *netdev)
3660{
3661 struct igb_adapter *adapter = netdev_priv(netdev);
3662 struct e1000_hw *hw = &adapter->hw;
3663
3664 /* Do the reset outside of interrupt context */
3665 adapter->tx_timeout_count++;
3666 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003667 wr32(E1000_EICS,
3668 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003669}
3670
3671static void igb_reset_task(struct work_struct *work)
3672{
3673 struct igb_adapter *adapter;
3674 adapter = container_of(work, struct igb_adapter, reset_task);
3675
3676 igb_reinit_locked(adapter);
3677}
3678
3679/**
3680 * igb_get_stats - Get System Network Statistics
3681 * @netdev: network interface device structure
3682 *
3683 * Returns the address of the device statistics structure.
3684 * The statistics are actually updated from the timer callback.
3685 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003686static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003687{
Auke Kok9d5c8242008-01-24 02:22:38 -08003688 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003689 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003690}
3691
3692/**
3693 * igb_change_mtu - Change the Maximum Transfer Unit
3694 * @netdev: network interface device structure
3695 * @new_mtu: new value for maximum frame size
3696 *
3697 * Returns 0 on success, negative on failure
3698 **/
3699static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3700{
3701 struct igb_adapter *adapter = netdev_priv(netdev);
3702 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003703 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003704
3705 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3706 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3707 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3708 return -EINVAL;
3709 }
3710
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3712 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3713 return -EINVAL;
3714 }
3715
3716 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3717 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003718
Auke Kok9d5c8242008-01-24 02:22:38 -08003719 /* igb_down has a dependency on max_frame_size */
3720 adapter->max_frame_size = max_frame;
Auke Kok9d5c8242008-01-24 02:22:38 -08003721 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3722 * means we reserve 2 more, this pushes us to allocate from the next
3723 * larger slab size.
3724 * i.e. RXBUFFER_2048 --> size-4096 slab
3725 */
3726
Alexander Duyck7d95b712009-10-27 15:50:08 +00003727 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003728 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003729 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003730 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003731 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003732 rx_buffer_len = IGB_RXBUFFER_128;
3733
3734 if (netif_running(netdev))
3735 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003736
3737 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3738 netdev->mtu, new_mtu);
3739 netdev->mtu = new_mtu;
3740
Alexander Duyck4c844852009-10-27 15:52:07 +00003741 for (i = 0; i < adapter->num_rx_queues; i++)
3742 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3743
Auke Kok9d5c8242008-01-24 02:22:38 -08003744 if (netif_running(netdev))
3745 igb_up(adapter);
3746 else
3747 igb_reset(adapter);
3748
3749 clear_bit(__IGB_RESETTING, &adapter->state);
3750
3751 return 0;
3752}
3753
3754/**
3755 * igb_update_stats - Update the board statistics counters
3756 * @adapter: board private structure
3757 **/
3758
3759void igb_update_stats(struct igb_adapter *adapter)
3760{
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003761 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003762 struct e1000_hw *hw = &adapter->hw;
3763 struct pci_dev *pdev = adapter->pdev;
3764 u16 phy_tmp;
3765
3766#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3767
3768 /*
3769 * Prevent stats update while adapter is being reset, or if the pci
3770 * connection is down.
3771 */
3772 if (adapter->link_speed == 0)
3773 return;
3774 if (pci_channel_offline(pdev))
3775 return;
3776
3777 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3778 adapter->stats.gprc += rd32(E1000_GPRC);
3779 adapter->stats.gorc += rd32(E1000_GORCL);
3780 rd32(E1000_GORCH); /* clear GORCL */
3781 adapter->stats.bprc += rd32(E1000_BPRC);
3782 adapter->stats.mprc += rd32(E1000_MPRC);
3783 adapter->stats.roc += rd32(E1000_ROC);
3784
3785 adapter->stats.prc64 += rd32(E1000_PRC64);
3786 adapter->stats.prc127 += rd32(E1000_PRC127);
3787 adapter->stats.prc255 += rd32(E1000_PRC255);
3788 adapter->stats.prc511 += rd32(E1000_PRC511);
3789 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3790 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3791 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3792 adapter->stats.sec += rd32(E1000_SEC);
3793
3794 adapter->stats.mpc += rd32(E1000_MPC);
3795 adapter->stats.scc += rd32(E1000_SCC);
3796 adapter->stats.ecol += rd32(E1000_ECOL);
3797 adapter->stats.mcc += rd32(E1000_MCC);
3798 adapter->stats.latecol += rd32(E1000_LATECOL);
3799 adapter->stats.dc += rd32(E1000_DC);
3800 adapter->stats.rlec += rd32(E1000_RLEC);
3801 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3802 adapter->stats.xontxc += rd32(E1000_XONTXC);
3803 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3804 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3805 adapter->stats.fcruc += rd32(E1000_FCRUC);
3806 adapter->stats.gptc += rd32(E1000_GPTC);
3807 adapter->stats.gotc += rd32(E1000_GOTCL);
3808 rd32(E1000_GOTCH); /* clear GOTCL */
3809 adapter->stats.rnbc += rd32(E1000_RNBC);
3810 adapter->stats.ruc += rd32(E1000_RUC);
3811 adapter->stats.rfc += rd32(E1000_RFC);
3812 adapter->stats.rjc += rd32(E1000_RJC);
3813 adapter->stats.tor += rd32(E1000_TORH);
3814 adapter->stats.tot += rd32(E1000_TOTH);
3815 adapter->stats.tpr += rd32(E1000_TPR);
3816
3817 adapter->stats.ptc64 += rd32(E1000_PTC64);
3818 adapter->stats.ptc127 += rd32(E1000_PTC127);
3819 adapter->stats.ptc255 += rd32(E1000_PTC255);
3820 adapter->stats.ptc511 += rd32(E1000_PTC511);
3821 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3822 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3823
3824 adapter->stats.mptc += rd32(E1000_MPTC);
3825 adapter->stats.bptc += rd32(E1000_BPTC);
3826
3827 /* used for adaptive IFS */
3828
3829 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3830 adapter->stats.tpt += hw->mac.tx_packet_delta;
3831 hw->mac.collision_delta = rd32(E1000_COLC);
3832 adapter->stats.colc += hw->mac.collision_delta;
3833
3834 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3835 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3836 adapter->stats.tncrs += rd32(E1000_TNCRS);
3837 adapter->stats.tsctc += rd32(E1000_TSCTC);
3838 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3839
3840 adapter->stats.iac += rd32(E1000_IAC);
3841 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3842 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3843 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3844 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3845 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3846 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3847 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3848 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3849
3850 /* Fill out the OS statistics structure */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003851 netdev->stats.multicast = adapter->stats.mprc;
3852 netdev->stats.collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003853
3854 /* Rx Errors */
3855
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003856 if (hw->mac.type != e1000_82575) {
3857 u32 rqdpc_tmp;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003858 u64 rqdpc_total = 0;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003859 int i;
3860 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3861 * Queue Drop Packet Count) stats only gets incremented, if
3862 * the DROP_EN but it set (in the SRRCTL register for that
3863 * queue). If DROP_EN bit is NOT set, then the some what
3864 * equivalent count is stored in RNBC (not per queue basis).
3865 * Also note the drop count is due to lack of available
3866 * descriptors.
3867 */
3868 for (i = 0; i < adapter->num_rx_queues; i++) {
3869 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3870 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003871 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003872 }
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003873 netdev->stats.rx_fifo_errors = rqdpc_total;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003874 }
3875
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003876 /* Note RNBC (Receive No Buffers Count) is an not an exact
3877 * drop count as the hardware FIFO might save the day. Thats
3878 * one of the reason for saving it in rx_fifo_errors, as its
3879 * potentially not a true drop.
3880 */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003881 netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003882
Auke Kok9d5c8242008-01-24 02:22:38 -08003883 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003884 * our own version based on RUC and ROC */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003885 netdev->stats.rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003886 adapter->stats.crcerrs + adapter->stats.algnerrc +
3887 adapter->stats.ruc + adapter->stats.roc +
3888 adapter->stats.cexterr;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003889 netdev->stats.rx_length_errors = adapter->stats.ruc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003890 adapter->stats.roc;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003891 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3892 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3893 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003894
3895 /* Tx Errors */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003896 netdev->stats.tx_errors = adapter->stats.ecol +
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 adapter->stats.latecol;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003898 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3899 netdev->stats.tx_window_errors = adapter->stats.latecol;
3900 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08003901
3902 /* Tx Dropped needs to be maintained elsewhere */
3903
3904 /* Phy Stats */
3905 if (hw->phy.media_type == e1000_media_type_copper) {
3906 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003907 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3909 adapter->phy_stats.idle_errors += phy_tmp;
3910 }
3911 }
3912
3913 /* Management Stats */
3914 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3915 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3916 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3917}
3918
Auke Kok9d5c8242008-01-24 02:22:38 -08003919static irqreturn_t igb_msix_other(int irq, void *data)
3920{
Alexander Duyck047e0032009-10-27 15:49:27 +00003921 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003923 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003924 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00003925
Alexander Duyck047e0032009-10-27 15:49:27 +00003926 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00003927 /* HW is reporting DMA is out of sync */
3928 adapter->stats.doosync++;
3929 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00003930
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003931 /* Check for a mailbox event */
3932 if (icr & E1000_ICR_VMMB)
3933 igb_msg_task(adapter);
3934
3935 if (icr & E1000_ICR_LSC) {
3936 hw->mac.get_link_status = 1;
3937 /* guard against interrupt when we're going down */
3938 if (!test_bit(__IGB_DOWN, &adapter->state))
3939 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3940 }
3941
3942 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003943 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08003944
3945 return IRQ_HANDLED;
3946}
3947
Alexander Duyck047e0032009-10-27 15:49:27 +00003948static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003949{
Alexander Duyck047e0032009-10-27 15:49:27 +00003950 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08003951
Alexander Duyck047e0032009-10-27 15:49:27 +00003952 if (!q_vector->set_itr)
3953 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003954
Alexander Duyck047e0032009-10-27 15:49:27 +00003955 if (!itr_val)
3956 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003957
Alexander Duyck047e0032009-10-27 15:49:27 +00003958 if (q_vector->itr_shift)
3959 itr_val |= itr_val << q_vector->itr_shift;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003960 else
Alexander Duyck047e0032009-10-27 15:49:27 +00003961 itr_val |= 0x8000000;
3962
3963 writel(itr_val, q_vector->itr_register);
3964 q_vector->set_itr = 0;
3965}
3966
3967static irqreturn_t igb_msix_ring(int irq, void *data)
3968{
3969 struct igb_q_vector *q_vector = data;
3970
3971 /* Write the ITR value calculated from the previous interrupt. */
3972 igb_write_itr(q_vector);
3973
3974 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003975
Auke Kok9d5c8242008-01-24 02:22:38 -08003976 return IRQ_HANDLED;
3977}
3978
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003979#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00003980static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003981{
Alexander Duyck047e0032009-10-27 15:49:27 +00003982 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003983 struct e1000_hw *hw = &adapter->hw;
3984 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003985
Alexander Duyck047e0032009-10-27 15:49:27 +00003986 if (q_vector->cpu == cpu)
3987 goto out_no_update;
3988
3989 if (q_vector->tx_ring) {
3990 int q = q_vector->tx_ring->reg_idx;
3991 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3992 if (hw->mac.type == e1000_82575) {
3993 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3994 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3995 } else {
3996 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3997 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3998 E1000_DCA_TXCTRL_CPUID_SHIFT;
3999 }
4000 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4001 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4002 }
4003 if (q_vector->rx_ring) {
4004 int q = q_vector->rx_ring->reg_idx;
4005 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4006 if (hw->mac.type == e1000_82575) {
4007 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4008 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4009 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004010 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004011 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004012 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004013 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004014 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4015 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4016 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4017 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004018 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004019 q_vector->cpu = cpu;
4020out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004021 put_cpu();
4022}
4023
4024static void igb_setup_dca(struct igb_adapter *adapter)
4025{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004026 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004027 int i;
4028
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004029 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004030 return;
4031
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004032 /* Always use CB2 mode, difference is masked in the CB driver. */
4033 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4034
Alexander Duyck047e0032009-10-27 15:49:27 +00004035 for (i = 0; i < adapter->num_q_vectors; i++) {
4036 struct igb_q_vector *q_vector = adapter->q_vector[i];
4037 q_vector->cpu = -1;
4038 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004039 }
4040}
4041
4042static int __igb_notify_dca(struct device *dev, void *data)
4043{
4044 struct net_device *netdev = dev_get_drvdata(dev);
4045 struct igb_adapter *adapter = netdev_priv(netdev);
4046 struct e1000_hw *hw = &adapter->hw;
4047 unsigned long event = *(unsigned long *)data;
4048
4049 switch (event) {
4050 case DCA_PROVIDER_ADD:
4051 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004052 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004053 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004054 /* Always use CB2 mode, difference is masked
4055 * in the CB driver. */
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004056 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004057 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004058 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004059 dev_info(&adapter->pdev->dev, "DCA enabled\n");
4060 igb_setup_dca(adapter);
4061 break;
4062 }
4063 /* Fall Through since DCA is disabled. */
4064 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004065 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004066 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004067 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004068 dca_remove_requester(dev);
4069 dev_info(&adapter->pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004070 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004071 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004072 }
4073 break;
4074 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004075
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004076 return 0;
4077}
4078
4079static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4080 void *p)
4081{
4082 int ret_val;
4083
4084 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4085 __igb_notify_dca);
4086
4087 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4088}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004089#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004090
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004091static void igb_ping_all_vfs(struct igb_adapter *adapter)
4092{
4093 struct e1000_hw *hw = &adapter->hw;
4094 u32 ping;
4095 int i;
4096
4097 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4098 ping = E1000_PF_CONTROL_MSG;
4099 if (adapter->vf_data[i].clear_to_send)
4100 ping |= E1000_VT_MSGTYPE_CTS;
4101 igb_write_mbx(hw, &ping, 1, i);
4102 }
4103}
4104
4105static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4106 u32 *msgbuf, u32 vf)
4107{
4108 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4109 u16 *hash_list = (u16 *)&msgbuf[1];
4110 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4111 int i;
4112
4113 /* only up to 30 hash values supported */
4114 if (n > 30)
4115 n = 30;
4116
4117 /* salt away the number of multi cast addresses assigned
4118 * to this VF for later use to restore when the PF multi cast
4119 * list changes
4120 */
4121 vf_data->num_vf_mc_hashes = n;
4122
4123 /* VFs are limited to using the MTA hash table for their multicast
4124 * addresses */
4125 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004126 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004127
4128 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004129 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004130
4131 return 0;
4132}
4133
4134static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4135{
4136 struct e1000_hw *hw = &adapter->hw;
4137 struct vf_data_storage *vf_data;
4138 int i, j;
4139
4140 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4141 vf_data = &adapter->vf_data[i];
Alexander Duyck75f4f382009-03-13 20:41:55 +00004142 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004143 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4144 }
4145}
4146
4147static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4148{
4149 struct e1000_hw *hw = &adapter->hw;
4150 u32 pool_mask, reg, vid;
4151 int i;
4152
4153 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4154
4155 /* Find the vlan filter for this id */
4156 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4157 reg = rd32(E1000_VLVF(i));
4158
4159 /* remove the vf from the pool */
4160 reg &= ~pool_mask;
4161
4162 /* if pool is empty then remove entry from vfta */
4163 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4164 (reg & E1000_VLVF_VLANID_ENABLE)) {
4165 reg = 0;
4166 vid = reg & E1000_VLVF_VLANID_MASK;
4167 igb_vfta_set(hw, vid, false);
4168 }
4169
4170 wr32(E1000_VLVF(i), reg);
4171 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004172
4173 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004174}
4175
4176static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4177{
4178 struct e1000_hw *hw = &adapter->hw;
4179 u32 reg, i;
4180
4181 /* It is an error to call this function when VFs are not enabled */
4182 if (!adapter->vfs_allocated_count)
4183 return -1;
4184
4185 /* Find the vlan filter for this id */
4186 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4187 reg = rd32(E1000_VLVF(i));
4188 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4189 vid == (reg & E1000_VLVF_VLANID_MASK))
4190 break;
4191 }
4192
4193 if (add) {
4194 if (i == E1000_VLVF_ARRAY_SIZE) {
4195 /* Did not find a matching VLAN ID entry that was
4196 * enabled. Search for a free filter entry, i.e.
4197 * one without the enable bit set
4198 */
4199 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4200 reg = rd32(E1000_VLVF(i));
4201 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4202 break;
4203 }
4204 }
4205 if (i < E1000_VLVF_ARRAY_SIZE) {
4206 /* Found an enabled/available entry */
4207 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4208
4209 /* if !enabled we need to set this up in vfta */
4210 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyckcad6d052009-03-13 20:41:37 +00004211 /* add VID to filter table, if bit already set
4212 * PF must have added it outside of table */
4213 if (igb_vfta_set(hw, vid, true))
4214 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4215 adapter->vfs_allocated_count);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004216 reg |= E1000_VLVF_VLANID_ENABLE;
4217 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004218 reg &= ~E1000_VLVF_VLANID_MASK;
4219 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004220
4221 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004222
4223 /* do not modify RLPML for PF devices */
4224 if (vf >= adapter->vfs_allocated_count)
4225 return 0;
4226
4227 if (!adapter->vf_data[vf].vlans_enabled) {
4228 u32 size;
4229 reg = rd32(E1000_VMOLR(vf));
4230 size = reg & E1000_VMOLR_RLPML_MASK;
4231 size += 4;
4232 reg &= ~E1000_VMOLR_RLPML_MASK;
4233 reg |= size;
4234 wr32(E1000_VMOLR(vf), reg);
4235 }
4236 adapter->vf_data[vf].vlans_enabled++;
4237
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004238 return 0;
4239 }
4240 } else {
4241 if (i < E1000_VLVF_ARRAY_SIZE) {
4242 /* remove vf from the pool */
4243 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4244 /* if pool is empty then remove entry from vfta */
4245 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4246 reg = 0;
4247 igb_vfta_set(hw, vid, false);
4248 }
4249 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004250
4251 /* do not modify RLPML for PF devices */
4252 if (vf >= adapter->vfs_allocated_count)
4253 return 0;
4254
4255 adapter->vf_data[vf].vlans_enabled--;
4256 if (!adapter->vf_data[vf].vlans_enabled) {
4257 u32 size;
4258 reg = rd32(E1000_VMOLR(vf));
4259 size = reg & E1000_VMOLR_RLPML_MASK;
4260 size -= 4;
4261 reg &= ~E1000_VMOLR_RLPML_MASK;
4262 reg |= size;
4263 wr32(E1000_VMOLR(vf), reg);
4264 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004265 return 0;
4266 }
4267 }
4268 return -1;
4269}
4270
4271static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4272{
4273 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4274 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4275
4276 return igb_vlvf_set(adapter, vid, add, vf);
4277}
4278
4279static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4280{
4281 struct e1000_hw *hw = &adapter->hw;
4282
4283 /* disable mailbox functionality for vf */
4284 adapter->vf_data[vf].clear_to_send = false;
4285
4286 /* reset offloads to defaults */
4287 igb_set_vmolr(hw, vf);
4288
4289 /* reset vlans for device */
4290 igb_clear_vf_vfta(adapter, vf);
4291
4292 /* reset multicast table array for vf */
4293 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4294
4295 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004296 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004297}
4298
4299static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4300{
4301 struct e1000_hw *hw = &adapter->hw;
4302 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004303 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004304 u32 reg, msgbuf[3];
4305 u8 *addr = (u8 *)(&msgbuf[1]);
4306
4307 /* process all the same items cleared in a function level reset */
4308 igb_vf_reset_event(adapter, vf);
4309
4310 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004311 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004312
4313 /* enable transmit and receive for vf */
4314 reg = rd32(E1000_VFTE);
4315 wr32(E1000_VFTE, reg | (1 << vf));
4316 reg = rd32(E1000_VFRE);
4317 wr32(E1000_VFRE, reg | (1 << vf));
4318
4319 /* enable mailbox functionality for vf */
4320 adapter->vf_data[vf].clear_to_send = true;
4321
4322 /* reply to reset with ack and vf mac address */
4323 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4324 memcpy(addr, vf_mac, 6);
4325 igb_write_mbx(hw, msgbuf, 3, vf);
4326}
4327
4328static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4329{
4330 unsigned char *addr = (char *)&msg[1];
4331 int err = -1;
4332
4333 if (is_valid_ether_addr(addr))
4334 err = igb_set_vf_mac(adapter, vf, addr);
4335
4336 return err;
4337
4338}
4339
4340static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4341{
4342 struct e1000_hw *hw = &adapter->hw;
4343 u32 msg = E1000_VT_MSGTYPE_NACK;
4344
4345 /* if device isn't clear to send it shouldn't be reading either */
4346 if (!adapter->vf_data[vf].clear_to_send)
4347 igb_write_mbx(hw, &msg, 1, vf);
4348}
4349
4350
4351static void igb_msg_task(struct igb_adapter *adapter)
4352{
4353 struct e1000_hw *hw = &adapter->hw;
4354 u32 vf;
4355
4356 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4357 /* process any reset requests */
4358 if (!igb_check_for_rst(hw, vf)) {
4359 adapter->vf_data[vf].clear_to_send = false;
4360 igb_vf_reset_event(adapter, vf);
4361 }
4362
4363 /* process any messages pending */
4364 if (!igb_check_for_msg(hw, vf))
4365 igb_rcv_msg_from_vf(adapter, vf);
4366
4367 /* process any acks */
4368 if (!igb_check_for_ack(hw, vf))
4369 igb_rcv_ack_from_vf(adapter, vf);
4370
4371 }
4372}
4373
4374static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4375{
4376 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4377 u32 msgbuf[mbx_size];
4378 struct e1000_hw *hw = &adapter->hw;
4379 s32 retval;
4380
4381 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4382
4383 if (retval)
4384 dev_err(&adapter->pdev->dev,
4385 "Error receiving message from VF\n");
4386
4387 /* this is a message we already processed, do nothing */
4388 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4389 return retval;
4390
4391 /*
4392 * until the vf completes a reset it should not be
4393 * allowed to start any configuration.
4394 */
4395
4396 if (msgbuf[0] == E1000_VF_RESET) {
4397 igb_vf_reset_msg(adapter, vf);
4398
4399 return retval;
4400 }
4401
4402 if (!adapter->vf_data[vf].clear_to_send) {
4403 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4404 igb_write_mbx(hw, msgbuf, 1, vf);
4405 return retval;
4406 }
4407
4408 switch ((msgbuf[0] & 0xFFFF)) {
4409 case E1000_VF_SET_MAC_ADDR:
4410 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4411 break;
4412 case E1000_VF_SET_MULTICAST:
4413 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4414 break;
4415 case E1000_VF_SET_LPE:
4416 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4417 break;
4418 case E1000_VF_SET_VLAN:
4419 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4420 break;
4421 default:
4422 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4423 retval = -1;
4424 break;
4425 }
4426
4427 /* notify the VF of the results of what it sent us */
4428 if (retval)
4429 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4430 else
4431 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4432
4433 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4434
4435 igb_write_mbx(hw, msgbuf, 1, vf);
4436
4437 return retval;
4438}
4439
Auke Kok9d5c8242008-01-24 02:22:38 -08004440/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004441 * igb_set_uta - Set unicast filter table address
4442 * @adapter: board private structure
4443 *
4444 * The unicast table address is a register array of 32-bit registers.
4445 * The table is meant to be used in a way similar to how the MTA is used
4446 * however due to certain limitations in the hardware it is necessary to
4447 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4448 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4449 **/
4450static void igb_set_uta(struct igb_adapter *adapter)
4451{
4452 struct e1000_hw *hw = &adapter->hw;
4453 int i;
4454
4455 /* The UTA table only exists on 82576 hardware and newer */
4456 if (hw->mac.type < e1000_82576)
4457 return;
4458
4459 /* we only need to do this if VMDq is enabled */
4460 if (!adapter->vfs_allocated_count)
4461 return;
4462
4463 for (i = 0; i < hw->mac.uta_reg_count; i++)
4464 array_wr32(E1000_UTA, i, ~0);
4465}
4466
4467/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004468 * igb_intr_msi - Interrupt Handler
4469 * @irq: interrupt number
4470 * @data: pointer to a network interface device structure
4471 **/
4472static irqreturn_t igb_intr_msi(int irq, void *data)
4473{
Alexander Duyck047e0032009-10-27 15:49:27 +00004474 struct igb_adapter *adapter = data;
4475 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004476 struct e1000_hw *hw = &adapter->hw;
4477 /* read ICR disables interrupts using IAM */
4478 u32 icr = rd32(E1000_ICR);
4479
Alexander Duyck047e0032009-10-27 15:49:27 +00004480 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004481
Alexander Duyck047e0032009-10-27 15:49:27 +00004482 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004483 /* HW is reporting DMA is out of sync */
4484 adapter->stats.doosync++;
4485 }
4486
Auke Kok9d5c8242008-01-24 02:22:38 -08004487 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4488 hw->mac.get_link_status = 1;
4489 if (!test_bit(__IGB_DOWN, &adapter->state))
4490 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4491 }
4492
Alexander Duyck047e0032009-10-27 15:49:27 +00004493 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004494
4495 return IRQ_HANDLED;
4496}
4497
4498/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004499 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004500 * @irq: interrupt number
4501 * @data: pointer to a network interface device structure
4502 **/
4503static irqreturn_t igb_intr(int irq, void *data)
4504{
Alexander Duyck047e0032009-10-27 15:49:27 +00004505 struct igb_adapter *adapter = data;
4506 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004507 struct e1000_hw *hw = &adapter->hw;
4508 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4509 * need for the IMC write */
4510 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004511 if (!icr)
4512 return IRQ_NONE; /* Not our interrupt */
4513
Alexander Duyck047e0032009-10-27 15:49:27 +00004514 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004515
4516 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4517 * not set, then the adapter didn't send an interrupt */
4518 if (!(icr & E1000_ICR_INT_ASSERTED))
4519 return IRQ_NONE;
4520
Alexander Duyck047e0032009-10-27 15:49:27 +00004521 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004522 /* HW is reporting DMA is out of sync */
4523 adapter->stats.doosync++;
4524 }
4525
Auke Kok9d5c8242008-01-24 02:22:38 -08004526 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4527 hw->mac.get_link_status = 1;
4528 /* guard against interrupt when we're going down */
4529 if (!test_bit(__IGB_DOWN, &adapter->state))
4530 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4531 }
4532
Alexander Duyck047e0032009-10-27 15:49:27 +00004533 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004534
4535 return IRQ_HANDLED;
4536}
4537
Alexander Duyck047e0032009-10-27 15:49:27 +00004538static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004539{
Alexander Duyck047e0032009-10-27 15:49:27 +00004540 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004541 struct e1000_hw *hw = &adapter->hw;
4542
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004543 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4544 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004545 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004546 igb_set_itr(adapter);
4547 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004548 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004549 }
4550
4551 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4552 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004553 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004554 else
4555 igb_irq_enable(adapter);
4556 }
4557}
4558
Auke Kok9d5c8242008-01-24 02:22:38 -08004559/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004560 * igb_poll - NAPI Rx polling callback
4561 * @napi: napi polling structure
4562 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004563 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004564static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004565{
Alexander Duyck047e0032009-10-27 15:49:27 +00004566 struct igb_q_vector *q_vector = container_of(napi,
4567 struct igb_q_vector,
4568 napi);
4569 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004570
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004571#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004572 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4573 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004574#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004575 if (q_vector->tx_ring)
4576 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004577
Alexander Duyck047e0032009-10-27 15:49:27 +00004578 if (q_vector->rx_ring)
4579 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4580
4581 if (!tx_clean_complete)
4582 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004583
Alexander Duyck46544252009-02-19 20:39:04 -08004584 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004585 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004586 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004587 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004588 }
4589
4590 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004591}
Al Viro6d8126f2008-03-16 22:23:24 +00004592
Auke Kok9d5c8242008-01-24 02:22:38 -08004593/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004594 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004595 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004596 * @shhwtstamps: timestamp structure to update
4597 * @regval: unsigned 64bit system time value.
4598 *
4599 * We need to convert the system time value stored in the RX/TXSTMP registers
4600 * into a hwtstamp which can be used by the upper level timestamping functions
4601 */
4602static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4603 struct skb_shared_hwtstamps *shhwtstamps,
4604 u64 regval)
4605{
4606 u64 ns;
4607
4608 ns = timecounter_cyc2time(&adapter->clock, regval);
4609 timecompare_update(&adapter->compare, ns);
4610 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4611 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4612 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4613}
4614
4615/**
4616 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4617 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004618 * @skb: packet that was just sent
4619 *
4620 * If we were asked to do hardware stamping and such a time stamp is
4621 * available, then it must have been for this skb here because we only
4622 * allow only one such packet into the queue.
4623 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004624static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004625{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004626 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004627 union skb_shared_tx *shtx = skb_tx(skb);
4628 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004629 struct skb_shared_hwtstamps shhwtstamps;
4630 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004631
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004632 /* if skb does not support hw timestamp or TX stamp not valid exit */
4633 if (likely(!shtx->hardware) ||
4634 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4635 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004636
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004637 regval = rd32(E1000_TXSTMPL);
4638 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4639
4640 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4641 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004642}
4643
4644/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004645 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004646 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004647 * returns true if ring is completely cleaned
4648 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004649static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004650{
Alexander Duyck047e0032009-10-27 15:49:27 +00004651 struct igb_adapter *adapter = q_vector->adapter;
4652 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004653 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004654 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004655 struct igb_buffer *buffer_info;
4656 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004657 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004658 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004659 unsigned int i, eop, count = 0;
4660 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08004661
Auke Kok9d5c8242008-01-24 02:22:38 -08004662 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004663 eop = tx_ring->buffer_info[i].next_to_watch;
4664 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4665
4666 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4667 (count < tx_ring->count)) {
4668 for (cleaned = false; !cleaned; count++) {
4669 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004670 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004671 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08004672 skb = buffer_info->skb;
4673
4674 if (skb) {
4675 unsigned int segs, bytecount;
4676 /* gso_segs is currently only valid for tcp */
4677 segs = skb_shinfo(skb)->gso_segs ?: 1;
4678 /* multiply data chunks by size of headers */
4679 bytecount = ((segs - 1) * skb_headlen(skb)) +
4680 skb->len;
4681 total_packets += segs;
4682 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004683
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004684 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004685 }
4686
Alexander Duyck80785292009-10-27 15:51:47 +00004687 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004688 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004689
4690 i++;
4691 if (i == tx_ring->count)
4692 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004693 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004694 eop = tx_ring->buffer_info[i].next_to_watch;
4695 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4696 }
4697
Auke Kok9d5c8242008-01-24 02:22:38 -08004698 tx_ring->next_to_clean = i;
4699
Alexander Duyckfc7d3452008-08-26 04:25:08 -07004700 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08004701 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00004702 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004703 /* Make sure that anybody stopping the queue after this
4704 * sees the new next_to_clean.
4705 */
4706 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004707 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4708 !(test_bit(__IGB_DOWN, &adapter->state))) {
4709 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004710 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004711 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004712 }
4713
4714 if (tx_ring->detect_tx_hung) {
4715 /* Detect a transmit hang in hardware, this serializes the
4716 * check with the clearing of time_stamp and movement of i */
4717 tx_ring->detect_tx_hung = false;
4718 if (tx_ring->buffer_info[i].time_stamp &&
4719 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4720 (adapter->tx_timeout_factor * HZ))
4721 && !(rd32(E1000_STATUS) &
4722 E1000_STATUS_TXOFF)) {
4723
Auke Kok9d5c8242008-01-24 02:22:38 -08004724 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00004725 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08004726 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07004727 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004728 " TDH <%x>\n"
4729 " TDT <%x>\n"
4730 " next_to_use <%x>\n"
4731 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004732 "buffer_info[next_to_clean]\n"
4733 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004734 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004735 " jiffies <%lx>\n"
4736 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07004737 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00004738 readl(tx_ring->head),
4739 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08004740 tx_ring->next_to_use,
4741 tx_ring->next_to_clean,
Auke Kok9d5c8242008-01-24 02:22:38 -08004742 tx_ring->buffer_info[i].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004743 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08004744 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004745 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004746 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08004747 }
4748 }
4749 tx_ring->total_bytes += total_bytes;
4750 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07004751 tx_ring->tx_stats.bytes += total_bytes;
4752 tx_ring->tx_stats.packets += total_packets;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00004753 netdev->stats.tx_bytes += total_bytes;
4754 netdev->stats.tx_packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004755 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08004756}
4757
Auke Kok9d5c8242008-01-24 02:22:38 -08004758/**
4759 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00004760 * @q_vector: structure containing interrupt and ring information
4761 * @skb: packet to send up
4762 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08004763 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004764static void igb_receive_skb(struct igb_q_vector *q_vector,
4765 struct sk_buff *skb,
4766 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08004767{
Alexander Duyck047e0032009-10-27 15:49:27 +00004768 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07004769
Alexander Duyck047e0032009-10-27 15:49:27 +00004770 if (vlan_tag)
4771 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4772 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00004773 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004774 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004775}
4776
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004777static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08004778 u32 status_err, struct sk_buff *skb)
4779{
4780 skb->ip_summed = CHECKSUM_NONE;
4781
4782 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004783 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4784 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08004785 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004786
Auke Kok9d5c8242008-01-24 02:22:38 -08004787 /* TCP/UDP checksum error bit is set */
4788 if (status_err &
4789 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004790 /*
4791 * work around errata with sctp packets where the TCPE aka
4792 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4793 * packets, (aka let the stack check the crc32c)
4794 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004795 if ((skb->len == 60) &&
4796 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004797 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004798
Auke Kok9d5c8242008-01-24 02:22:38 -08004799 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08004800 return;
4801 }
4802 /* It must be a TCP or UDP packet with a valid checksum */
4803 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4804 skb->ip_summed = CHECKSUM_UNNECESSARY;
4805
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004806 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08004807}
4808
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004809static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4810 struct sk_buff *skb)
4811{
4812 struct igb_adapter *adapter = q_vector->adapter;
4813 struct e1000_hw *hw = &adapter->hw;
4814 u64 regval;
4815
4816 /*
4817 * If this bit is set, then the RX registers contain the time stamp. No
4818 * other packet will be time stamped until we read these registers, so
4819 * read the registers to make them available again. Because only one
4820 * packet can be time stamped at a time, we know that the register
4821 * values must belong to this one here and therefore we don't need to
4822 * compare any of the additional attributes stored for it.
4823 *
4824 * If nothing went wrong, then it should have a skb_shared_tx that we
4825 * can turn into a skb_shared_hwtstamps.
4826 */
4827 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4828 return;
4829 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4830 return;
4831
4832 regval = rd32(E1000_RXSTMPL);
4833 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4834
4835 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4836}
Alexander Duyck4c844852009-10-27 15:52:07 +00004837static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004838 union e1000_adv_rx_desc *rx_desc)
4839{
4840 /* HW will not DMA in data larger than the given buffer, even if it
4841 * parses the (NFS, of course) header to be larger. In that case, it
4842 * fills the header buffer and spills the rest into the page.
4843 */
4844 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4845 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00004846 if (hlen > rx_ring->rx_buffer_len)
4847 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004848 return hlen;
4849}
4850
Alexander Duyck047e0032009-10-27 15:49:27 +00004851static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4852 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004853{
Alexander Duyck047e0032009-10-27 15:49:27 +00004854 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004855 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00004856 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004857 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4858 struct igb_buffer *buffer_info , *next_buffer;
4859 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08004860 bool cleaned = false;
4861 int cleaned_count = 0;
4862 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004863 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004864 u32 staterr;
4865 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00004866 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08004867
4868 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004869 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004870 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4871 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4872
4873 while (staterr & E1000_RXD_STAT_DD) {
4874 if (*work_done >= budget)
4875 break;
4876 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004877
4878 skb = buffer_info->skb;
4879 prefetch(skb->data - NET_IP_ALIGN);
4880 buffer_info->skb = NULL;
4881
4882 i++;
4883 if (i == rx_ring->count)
4884 i = 0;
4885 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4886 prefetch(next_rxd);
4887 next_buffer = &rx_ring->buffer_info[i];
4888
4889 length = le16_to_cpu(rx_desc->wb.upper.length);
4890 cleaned = true;
4891 cleaned_count++;
4892
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004893 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004894 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00004895 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004896 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00004897 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00004898 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004899 skb_put(skb, length);
4900 goto send_up;
4901 }
Alexander Duyck4c844852009-10-27 15:52:07 +00004902 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004903 }
4904
4905 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004906 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004907 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004908 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004909
4910 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4911 buffer_info->page,
4912 buffer_info->page_offset,
4913 length);
4914
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004915 if (page_count(buffer_info->page) != 1)
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004916 buffer_info->page = NULL;
4917 else
4918 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08004919
4920 skb->len += length;
4921 skb->data_len += length;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004922
Auke Kok9d5c8242008-01-24 02:22:38 -08004923 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08004924 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004925
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004926 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08004927 buffer_info->skb = next_buffer->skb;
4928 buffer_info->dma = next_buffer->dma;
4929 next_buffer->skb = skb;
4930 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004931 goto next_desc;
4932 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004933send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08004934 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4935 dev_kfree_skb_irq(skb);
4936 goto next_desc;
4937 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004938
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004939 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004940 total_bytes += skb->len;
4941 total_packets++;
4942
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004943 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004944
4945 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00004946 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08004947
Alexander Duyck047e0032009-10-27 15:49:27 +00004948 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
4949 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
4950
4951 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08004952
Auke Kok9d5c8242008-01-24 02:22:38 -08004953next_desc:
4954 rx_desc->wb.upper.status_error = 0;
4955
4956 /* return some buffers to hardware, one at a time is too slow */
4957 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07004958 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08004959 cleaned_count = 0;
4960 }
4961
4962 /* use prefetched values */
4963 rx_desc = next_rxd;
4964 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08004965 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4966 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004967
Auke Kok9d5c8242008-01-24 02:22:38 -08004968 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00004969 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004970
4971 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07004972 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08004973
4974 rx_ring->total_packets += total_packets;
4975 rx_ring->total_bytes += total_bytes;
4976 rx_ring->rx_stats.packets += total_packets;
4977 rx_ring->rx_stats.bytes += total_bytes;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00004978 netdev->stats.rx_bytes += total_bytes;
4979 netdev->stats.rx_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004980 return cleaned;
4981}
4982
Auke Kok9d5c8242008-01-24 02:22:38 -08004983/**
4984 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4985 * @adapter: address of board private structure
4986 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00004987void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08004988{
Alexander Duycke694e962009-10-27 15:53:06 +00004989 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004990 union e1000_adv_rx_desc *rx_desc;
4991 struct igb_buffer *buffer_info;
4992 struct sk_buff *skb;
4993 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00004994 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08004995
4996 i = rx_ring->next_to_use;
4997 buffer_info = &rx_ring->buffer_info[i];
4998
Alexander Duyck4c844852009-10-27 15:52:07 +00004999 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005000
Auke Kok9d5c8242008-01-24 02:22:38 -08005001 while (cleaned_count--) {
5002 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5003
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005004 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005005 if (!buffer_info->page) {
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005006 buffer_info->page = alloc_page(GFP_ATOMIC);
5007 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005008 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005009 goto no_buffers;
5010 }
5011 buffer_info->page_offset = 0;
5012 } else {
5013 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005014 }
5015 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005016 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005017 buffer_info->page_offset,
5018 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005019 PCI_DMA_FROMDEVICE);
5020 }
5021
5022 if (!buffer_info->skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005023 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005024 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005025 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005026 goto no_buffers;
5027 }
5028
Auke Kok9d5c8242008-01-24 02:22:38 -08005029 buffer_info->skb = skb;
Alexander Duyck80785292009-10-27 15:51:47 +00005030 buffer_info->dma = pci_map_single(rx_ring->pdev,
5031 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005032 bufsz,
5033 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005034 }
5035 /* Refresh the desc even if buffer_addrs didn't change because
5036 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005037 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005038 rx_desc->read.pkt_addr =
5039 cpu_to_le64(buffer_info->page_dma);
5040 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5041 } else {
5042 rx_desc->read.pkt_addr =
5043 cpu_to_le64(buffer_info->dma);
5044 rx_desc->read.hdr_addr = 0;
5045 }
5046
5047 i++;
5048 if (i == rx_ring->count)
5049 i = 0;
5050 buffer_info = &rx_ring->buffer_info[i];
5051 }
5052
5053no_buffers:
5054 if (rx_ring->next_to_use != i) {
5055 rx_ring->next_to_use = i;
5056 if (i == 0)
5057 i = (rx_ring->count - 1);
5058 else
5059 i--;
5060
5061 /* Force memory writes to complete before letting h/w
5062 * know there are new descriptors to fetch. (Only
5063 * applicable for weak-ordered memory model archs,
5064 * such as IA-64). */
5065 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005066 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005067 }
5068}
5069
5070/**
5071 * igb_mii_ioctl -
5072 * @netdev:
5073 * @ifreq:
5074 * @cmd:
5075 **/
5076static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5077{
5078 struct igb_adapter *adapter = netdev_priv(netdev);
5079 struct mii_ioctl_data *data = if_mii(ifr);
5080
5081 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5082 return -EOPNOTSUPP;
5083
5084 switch (cmd) {
5085 case SIOCGMIIPHY:
5086 data->phy_id = adapter->hw.phy.addr;
5087 break;
5088 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005089 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5090 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005091 return -EIO;
5092 break;
5093 case SIOCSMIIREG:
5094 default:
5095 return -EOPNOTSUPP;
5096 }
5097 return 0;
5098}
5099
5100/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005101 * igb_hwtstamp_ioctl - control hardware time stamping
5102 * @netdev:
5103 * @ifreq:
5104 * @cmd:
5105 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005106 * Outgoing time stamping can be enabled and disabled. Play nice and
5107 * disable it when requested, although it shouldn't case any overhead
5108 * when no packet needs it. At most one packet in the queue may be
5109 * marked for time stamping, otherwise it would be impossible to tell
5110 * for sure to which packet the hardware time stamp belongs.
5111 *
5112 * Incoming time stamping has to be configured via the hardware
5113 * filters. Not all combinations are supported, in particular event
5114 * type has to be specified. Matching the kind of event packet is
5115 * not supported, with the exception of "all V2 events regardless of
5116 * level 2 or 4".
5117 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005118 **/
5119static int igb_hwtstamp_ioctl(struct net_device *netdev,
5120 struct ifreq *ifr, int cmd)
5121{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005122 struct igb_adapter *adapter = netdev_priv(netdev);
5123 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005124 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005125 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5126 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005127 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005128 bool is_l4 = false;
5129 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005130 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005131
5132 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5133 return -EFAULT;
5134
5135 /* reserved for future extensions */
5136 if (config.flags)
5137 return -EINVAL;
5138
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005139 switch (config.tx_type) {
5140 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005141 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005142 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005143 break;
5144 default:
5145 return -ERANGE;
5146 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005147
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005148 switch (config.rx_filter) {
5149 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005150 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005151 break;
5152 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5153 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5154 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5155 case HWTSTAMP_FILTER_ALL:
5156 /*
5157 * register TSYNCRXCFG must be set, therefore it is not
5158 * possible to time stamp both Sync and Delay_Req messages
5159 * => fall back to time stamping all packets
5160 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005161 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005162 config.rx_filter = HWTSTAMP_FILTER_ALL;
5163 break;
5164 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005165 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005166 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005167 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005168 break;
5169 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005170 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005171 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005172 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005173 break;
5174 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5175 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005176 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005177 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005178 is_l2 = true;
5179 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005180 config.rx_filter = HWTSTAMP_FILTER_SOME;
5181 break;
5182 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5183 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005184 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005185 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005186 is_l2 = true;
5187 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005188 config.rx_filter = HWTSTAMP_FILTER_SOME;
5189 break;
5190 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5191 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5192 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005193 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005194 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005195 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005196 break;
5197 default:
5198 return -ERANGE;
5199 }
5200
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005201 if (hw->mac.type == e1000_82575) {
5202 if (tsync_rx_ctl | tsync_tx_ctl)
5203 return -EINVAL;
5204 return 0;
5205 }
5206
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005207 /* enable/disable TX */
5208 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005209 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5210 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005211 wr32(E1000_TSYNCTXCTL, regval);
5212
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005213 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005214 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005215 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5216 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005217 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005218
5219 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005220 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5221
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005222 /* define ethertype filter for timestamped packets */
5223 if (is_l2)
5224 wr32(E1000_ETQF(3),
5225 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5226 E1000_ETQF_1588 | /* enable timestamping */
5227 ETH_P_1588)); /* 1588 eth protocol type */
5228 else
5229 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005230
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005231#define PTP_PORT 319
5232 /* L4 Queue Filter[3]: filter by destination port and protocol */
5233 if (is_l4) {
5234 u32 ftqf = (IPPROTO_UDP /* UDP */
5235 | E1000_FTQF_VF_BP /* VF not compared */
5236 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5237 | E1000_FTQF_MASK); /* mask all inputs */
5238 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005239
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005240 wr32(E1000_IMIR(3), htons(PTP_PORT));
5241 wr32(E1000_IMIREXT(3),
5242 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5243 if (hw->mac.type == e1000_82576) {
5244 /* enable source port check */
5245 wr32(E1000_SPQF(3), htons(PTP_PORT));
5246 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5247 }
5248 wr32(E1000_FTQF(3), ftqf);
5249 } else {
5250 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5251 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005252 wrfl();
5253
5254 adapter->hwtstamp_config = config;
5255
5256 /* clear TX/RX time stamp registers, just to be sure */
5257 regval = rd32(E1000_TXSTMPH);
5258 regval = rd32(E1000_RXSTMPH);
5259
5260 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5261 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005262}
5263
5264/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005265 * igb_ioctl -
5266 * @netdev:
5267 * @ifreq:
5268 * @cmd:
5269 **/
5270static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5271{
5272 switch (cmd) {
5273 case SIOCGMIIPHY:
5274 case SIOCGMIIREG:
5275 case SIOCSMIIREG:
5276 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005277 case SIOCSHWTSTAMP:
5278 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005279 default:
5280 return -EOPNOTSUPP;
5281 }
5282}
5283
Alexander Duyck009bc062009-07-23 18:08:35 +00005284s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5285{
5286 struct igb_adapter *adapter = hw->back;
5287 u16 cap_offset;
5288
5289 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5290 if (!cap_offset)
5291 return -E1000_ERR_CONFIG;
5292
5293 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5294
5295 return 0;
5296}
5297
5298s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5299{
5300 struct igb_adapter *adapter = hw->back;
5301 u16 cap_offset;
5302
5303 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5304 if (!cap_offset)
5305 return -E1000_ERR_CONFIG;
5306
5307 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5308
5309 return 0;
5310}
5311
Auke Kok9d5c8242008-01-24 02:22:38 -08005312static void igb_vlan_rx_register(struct net_device *netdev,
5313 struct vlan_group *grp)
5314{
5315 struct igb_adapter *adapter = netdev_priv(netdev);
5316 struct e1000_hw *hw = &adapter->hw;
5317 u32 ctrl, rctl;
5318
5319 igb_irq_disable(adapter);
5320 adapter->vlgrp = grp;
5321
5322 if (grp) {
5323 /* enable VLAN tag insert/strip */
5324 ctrl = rd32(E1000_CTRL);
5325 ctrl |= E1000_CTRL_VME;
5326 wr32(E1000_CTRL, ctrl);
5327
5328 /* enable VLAN receive filtering */
5329 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005330 rctl &= ~E1000_RCTL_CFIEN;
5331 wr32(E1000_RCTL, rctl);
5332 igb_update_mng_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005333 } else {
5334 /* disable VLAN tag insert/strip */
5335 ctrl = rd32(E1000_CTRL);
5336 ctrl &= ~E1000_CTRL_VME;
5337 wr32(E1000_CTRL, ctrl);
5338
Auke Kok9d5c8242008-01-24 02:22:38 -08005339 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5340 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5341 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5342 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005343 }
5344
Alexander Duycke1739522009-02-19 20:39:44 -08005345 igb_rlpml_set(adapter);
5346
Auke Kok9d5c8242008-01-24 02:22:38 -08005347 if (!test_bit(__IGB_DOWN, &adapter->state))
5348 igb_irq_enable(adapter);
5349}
5350
5351static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5352{
5353 struct igb_adapter *adapter = netdev_priv(netdev);
5354 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005355 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005356
Alexander Duyck28b07592009-02-06 23:20:31 +00005357 if ((hw->mng_cookie.status &
Auke Kok9d5c8242008-01-24 02:22:38 -08005358 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5359 (vid == adapter->mng_vlan_id))
5360 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005361
5362 /* add vid to vlvf if sr-iov is enabled,
5363 * if that fails add directly to filter table */
5364 if (igb_vlvf_set(adapter, vid, true, pf_id))
5365 igb_vfta_set(hw, vid, true);
5366
Auke Kok9d5c8242008-01-24 02:22:38 -08005367}
5368
5369static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5370{
5371 struct igb_adapter *adapter = netdev_priv(netdev);
5372 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005373 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005374
5375 igb_irq_disable(adapter);
5376 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5377
5378 if (!test_bit(__IGB_DOWN, &adapter->state))
5379 igb_irq_enable(adapter);
5380
5381 if ((adapter->hw.mng_cookie.status &
5382 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5383 (vid == adapter->mng_vlan_id)) {
5384 /* release control to f/w */
5385 igb_release_hw_control(adapter);
5386 return;
5387 }
5388
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389 /* remove vid from vlvf if sr-iov is enabled,
5390 * if not in vlvf remove from vfta */
5391 if (igb_vlvf_set(adapter, vid, false, pf_id))
5392 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005393}
5394
5395static void igb_restore_vlan(struct igb_adapter *adapter)
5396{
5397 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5398
5399 if (adapter->vlgrp) {
5400 u16 vid;
5401 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5402 if (!vlan_group_get_device(adapter->vlgrp, vid))
5403 continue;
5404 igb_vlan_rx_add_vid(adapter->netdev, vid);
5405 }
5406 }
5407}
5408
5409int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5410{
5411 struct e1000_mac_info *mac = &adapter->hw.mac;
5412
5413 mac->autoneg = 0;
5414
Auke Kok9d5c8242008-01-24 02:22:38 -08005415 switch (spddplx) {
5416 case SPEED_10 + DUPLEX_HALF:
5417 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5418 break;
5419 case SPEED_10 + DUPLEX_FULL:
5420 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5421 break;
5422 case SPEED_100 + DUPLEX_HALF:
5423 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5424 break;
5425 case SPEED_100 + DUPLEX_FULL:
5426 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5427 break;
5428 case SPEED_1000 + DUPLEX_FULL:
5429 mac->autoneg = 1;
5430 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5431 break;
5432 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5433 default:
5434 dev_err(&adapter->pdev->dev,
5435 "Unsupported Speed/Duplex configuration\n");
5436 return -EINVAL;
5437 }
5438 return 0;
5439}
5440
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005441static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005442{
5443 struct net_device *netdev = pci_get_drvdata(pdev);
5444 struct igb_adapter *adapter = netdev_priv(netdev);
5445 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005446 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005447 u32 wufc = adapter->wol;
5448#ifdef CONFIG_PM
5449 int retval = 0;
5450#endif
5451
5452 netif_device_detach(netdev);
5453
Alexander Duycka88f10e2008-07-08 15:13:38 -07005454 if (netif_running(netdev))
5455 igb_close(netdev);
5456
Alexander Duyck047e0032009-10-27 15:49:27 +00005457 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005458
5459#ifdef CONFIG_PM
5460 retval = pci_save_state(pdev);
5461 if (retval)
5462 return retval;
5463#endif
5464
5465 status = rd32(E1000_STATUS);
5466 if (status & E1000_STATUS_LU)
5467 wufc &= ~E1000_WUFC_LNKC;
5468
5469 if (wufc) {
5470 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005471 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005472
5473 /* turn on all-multi mode if wake on multicast is enabled */
5474 if (wufc & E1000_WUFC_MC) {
5475 rctl = rd32(E1000_RCTL);
5476 rctl |= E1000_RCTL_MPE;
5477 wr32(E1000_RCTL, rctl);
5478 }
5479
5480 ctrl = rd32(E1000_CTRL);
5481 /* advertise wake from D3Cold */
5482 #define E1000_CTRL_ADVD3WUC 0x00100000
5483 /* phy power management enable */
5484 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5485 ctrl |= E1000_CTRL_ADVD3WUC;
5486 wr32(E1000_CTRL, ctrl);
5487
Auke Kok9d5c8242008-01-24 02:22:38 -08005488 /* Allow time for pending master requests to run */
5489 igb_disable_pcie_master(&adapter->hw);
5490
5491 wr32(E1000_WUC, E1000_WUC_PME_EN);
5492 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005493 } else {
5494 wr32(E1000_WUC, 0);
5495 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005496 }
5497
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005498 *enable_wake = wufc || adapter->en_mng_pt;
5499 if (!*enable_wake)
Alexander Duyck2fb02a22009-09-14 08:22:54 +00005500 igb_shutdown_serdes_link_82575(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005501
5502 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5503 * would have already happened in close and is redundant. */
5504 igb_release_hw_control(adapter);
5505
5506 pci_disable_device(pdev);
5507
Auke Kok9d5c8242008-01-24 02:22:38 -08005508 return 0;
5509}
5510
5511#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005512static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5513{
5514 int retval;
5515 bool wake;
5516
5517 retval = __igb_shutdown(pdev, &wake);
5518 if (retval)
5519 return retval;
5520
5521 if (wake) {
5522 pci_prepare_to_sleep(pdev);
5523 } else {
5524 pci_wake_from_d3(pdev, false);
5525 pci_set_power_state(pdev, PCI_D3hot);
5526 }
5527
5528 return 0;
5529}
5530
Auke Kok9d5c8242008-01-24 02:22:38 -08005531static int igb_resume(struct pci_dev *pdev)
5532{
5533 struct net_device *netdev = pci_get_drvdata(pdev);
5534 struct igb_adapter *adapter = netdev_priv(netdev);
5535 struct e1000_hw *hw = &adapter->hw;
5536 u32 err;
5537
5538 pci_set_power_state(pdev, PCI_D0);
5539 pci_restore_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005540
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005541 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005542 if (err) {
5543 dev_err(&pdev->dev,
5544 "igb: Cannot enable PCI device from suspend\n");
5545 return err;
5546 }
5547 pci_set_master(pdev);
5548
5549 pci_enable_wake(pdev, PCI_D3hot, 0);
5550 pci_enable_wake(pdev, PCI_D3cold, 0);
5551
Alexander Duyck047e0032009-10-27 15:49:27 +00005552 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005553 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5554 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005555 }
5556
5557 /* e1000_power_up_phy(adapter); */
5558
5559 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005560
5561 /* let the f/w know that the h/w is now under the control of the
5562 * driver. */
5563 igb_get_hw_control(adapter);
5564
Auke Kok9d5c8242008-01-24 02:22:38 -08005565 wr32(E1000_WUS, ~0);
5566
Alexander Duycka88f10e2008-07-08 15:13:38 -07005567 if (netif_running(netdev)) {
5568 err = igb_open(netdev);
5569 if (err)
5570 return err;
5571 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005572
5573 netif_device_attach(netdev);
5574
Auke Kok9d5c8242008-01-24 02:22:38 -08005575 return 0;
5576}
5577#endif
5578
5579static void igb_shutdown(struct pci_dev *pdev)
5580{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005581 bool wake;
5582
5583 __igb_shutdown(pdev, &wake);
5584
5585 if (system_state == SYSTEM_POWER_OFF) {
5586 pci_wake_from_d3(pdev, wake);
5587 pci_set_power_state(pdev, PCI_D3hot);
5588 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005589}
5590
5591#ifdef CONFIG_NET_POLL_CONTROLLER
5592/*
5593 * Polling 'interrupt' - used by things like netconsole to send skbs
5594 * without having to re-enable interrupts. It's not called while
5595 * the interrupt routine is executing.
5596 */
5597static void igb_netpoll(struct net_device *netdev)
5598{
5599 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005600 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005601 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005602
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005603 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005604 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005605 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005606 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005607 return;
5608 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005609
Alexander Duyck047e0032009-10-27 15:49:27 +00005610 for (i = 0; i < adapter->num_q_vectors; i++) {
5611 struct igb_q_vector *q_vector = adapter->q_vector[i];
5612 wr32(E1000_EIMC, q_vector->eims_value);
5613 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005614 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005615}
5616#endif /* CONFIG_NET_POLL_CONTROLLER */
5617
5618/**
5619 * igb_io_error_detected - called when PCI error is detected
5620 * @pdev: Pointer to PCI device
5621 * @state: The current pci connection state
5622 *
5623 * This function is called after a PCI bus error affecting
5624 * this device has been detected.
5625 */
5626static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5627 pci_channel_state_t state)
5628{
5629 struct net_device *netdev = pci_get_drvdata(pdev);
5630 struct igb_adapter *adapter = netdev_priv(netdev);
5631
5632 netif_device_detach(netdev);
5633
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005634 if (state == pci_channel_io_perm_failure)
5635 return PCI_ERS_RESULT_DISCONNECT;
5636
Auke Kok9d5c8242008-01-24 02:22:38 -08005637 if (netif_running(netdev))
5638 igb_down(adapter);
5639 pci_disable_device(pdev);
5640
5641 /* Request a slot slot reset. */
5642 return PCI_ERS_RESULT_NEED_RESET;
5643}
5644
5645/**
5646 * igb_io_slot_reset - called after the pci bus has been reset.
5647 * @pdev: Pointer to PCI device
5648 *
5649 * Restart the card from scratch, as if from a cold-boot. Implementation
5650 * resembles the first-half of the igb_resume routine.
5651 */
5652static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5653{
5654 struct net_device *netdev = pci_get_drvdata(pdev);
5655 struct igb_adapter *adapter = netdev_priv(netdev);
5656 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005657 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005658 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005659
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005660 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005661 dev_err(&pdev->dev,
5662 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005663 result = PCI_ERS_RESULT_DISCONNECT;
5664 } else {
5665 pci_set_master(pdev);
5666 pci_restore_state(pdev);
5667
5668 pci_enable_wake(pdev, PCI_D3hot, 0);
5669 pci_enable_wake(pdev, PCI_D3cold, 0);
5670
5671 igb_reset(adapter);
5672 wr32(E1000_WUS, ~0);
5673 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08005674 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005675
Jeff Kirsherea943d42008-12-11 20:34:19 -08005676 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5677 if (err) {
5678 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5679 "failed 0x%0x\n", err);
5680 /* non-fatal, continue */
5681 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005682
Alexander Duyck40a914f2008-11-27 00:24:37 -08005683 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08005684}
5685
5686/**
5687 * igb_io_resume - called when traffic can start flowing again.
5688 * @pdev: Pointer to PCI device
5689 *
5690 * This callback is called when the error recovery driver tells us that
5691 * its OK to resume normal operation. Implementation resembles the
5692 * second-half of the igb_resume routine.
5693 */
5694static void igb_io_resume(struct pci_dev *pdev)
5695{
5696 struct net_device *netdev = pci_get_drvdata(pdev);
5697 struct igb_adapter *adapter = netdev_priv(netdev);
5698
Auke Kok9d5c8242008-01-24 02:22:38 -08005699 if (netif_running(netdev)) {
5700 if (igb_up(adapter)) {
5701 dev_err(&pdev->dev, "igb_up failed after reset\n");
5702 return;
5703 }
5704 }
5705
5706 netif_device_attach(netdev);
5707
5708 /* let the f/w know that the h/w is now under the control of the
5709 * driver. */
5710 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005711}
5712
Alexander Duyck26ad9172009-10-05 06:32:49 +00005713static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5714 u8 qsel)
5715{
5716 u32 rar_low, rar_high;
5717 struct e1000_hw *hw = &adapter->hw;
5718
5719 /* HW expects these in little endian so we reverse the byte order
5720 * from network order (big endian) to little endian
5721 */
5722 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5723 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5724 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5725
5726 /* Indicate to hardware the Address is Valid. */
5727 rar_high |= E1000_RAH_AV;
5728
5729 if (hw->mac.type == e1000_82575)
5730 rar_high |= E1000_RAH_POOL_1 * qsel;
5731 else
5732 rar_high |= E1000_RAH_POOL_1 << qsel;
5733
5734 wr32(E1000_RAL(index), rar_low);
5735 wrfl();
5736 wr32(E1000_RAH(index), rar_high);
5737 wrfl();
5738}
5739
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005740static int igb_set_vf_mac(struct igb_adapter *adapter,
5741 int vf, unsigned char *mac_addr)
5742{
5743 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005744 /* VF MAC addresses start at end of receive addresses and moves
5745 * torwards the first, as a result a collision should not be possible */
5746 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005747
Alexander Duyck37680112009-02-19 20:40:30 -08005748 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005749
Alexander Duyck26ad9172009-10-05 06:32:49 +00005750 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005751
5752 return 0;
5753}
5754
5755static void igb_vmm_control(struct igb_adapter *adapter)
5756{
5757 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00005758 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005759
Alexander Duyckd4960302009-10-27 15:53:45 +00005760 /* replication is not supported for 82575 */
5761 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005762 return;
5763
Alexander Duyck10d8e902009-10-27 15:54:04 +00005764 /* enable replication vlan tag stripping */
5765 reg = rd32(E1000_RPLOLR);
5766 reg |= E1000_RPLOLR_STRVLAN;
5767 wr32(E1000_RPLOLR, reg);
5768
5769 /* notify HW that the MAC is adding vlan tags */
5770 reg = rd32(E1000_DTXCTL);
5771 reg |= E1000_DTXCTL_VLAN_ADDED;
5772 wr32(E1000_DTXCTL, reg);
5773
Alexander Duyckd4960302009-10-27 15:53:45 +00005774 if (adapter->vfs_allocated_count) {
5775 igb_vmdq_set_loopback_pf(hw, true);
5776 igb_vmdq_set_replication_pf(hw, true);
5777 } else {
5778 igb_vmdq_set_loopback_pf(hw, false);
5779 igb_vmdq_set_replication_pf(hw, false);
5780 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005781}
5782
Auke Kok9d5c8242008-01-24 02:22:38 -08005783/* igb_main.c */