blob: e67ff0ea2a560f705be92683f1a123e44b32ced0 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck86d5d382009-02-06 23:23:12 +000052#define DRV_VERSION "1.3.16-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
Alexander Duyck2d064c02008-07-08 15:10:12 -070064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74 /* required last entry */
75 {0, }
76};
77
78MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
79
80void igb_reset(struct igb_adapter *);
81static int igb_setup_all_tx_resources(struct igb_adapter *);
82static int igb_setup_all_rx_resources(struct igb_adapter *);
83static void igb_free_all_tx_resources(struct igb_adapter *);
84static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000085static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080086void igb_update_stats(struct igb_adapter *);
87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
88static void __devexit igb_remove(struct pci_dev *pdev);
89static int igb_sw_init(struct igb_adapter *);
90static int igb_open(struct net_device *);
91static int igb_close(struct net_device *);
92static void igb_configure_tx(struct igb_adapter *);
93static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080094static void igb_clean_all_tx_rings(struct igb_adapter *);
95static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -070096static void igb_clean_tx_ring(struct igb_ring *);
97static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +000098static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -080099static void igb_update_phy_info(unsigned long);
100static void igb_watchdog(unsigned long);
101static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800103static struct net_device_stats *igb_get_stats(struct net_device *);
104static int igb_change_mtu(struct net_device *, int);
105static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000106static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800107static irqreturn_t igb_intr(int irq, void *);
108static irqreturn_t igb_intr_msi(int irq, void *);
109static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000110static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700111#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000112static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700113static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700114#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000115static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700116static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
119static void igb_tx_timeout(struct net_device *);
120static void igb_reset_task(struct work_struct *);
121static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122static void igb_vlan_rx_add_vid(struct net_device *, u16);
123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800126static void igb_ping_all_vfs(struct igb_adapter *);
127static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800128static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000129static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800130static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131
Auke Kok9d5c8242008-01-24 02:22:38 -0800132#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000133static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static int igb_resume(struct pci_dev *);
135#endif
136static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700137#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700138static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
139static struct notifier_block dca_notifier = {
140 .notifier_call = igb_notify_dca,
141 .next = NULL,
142 .priority = 0
143};
144#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800145#ifdef CONFIG_NET_POLL_CONTROLLER
146/* for netdump / net console */
147static void igb_netpoll(struct net_device *);
148#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800149#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000150static unsigned int max_vfs = 0;
151module_param(max_vfs, uint, 0);
152MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
153 "per physical function");
154#endif /* CONFIG_PCI_IOV */
155
Auke Kok9d5c8242008-01-24 02:22:38 -0800156static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
157 pci_channel_state_t);
158static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
159static void igb_io_resume(struct pci_dev *);
160
161static struct pci_error_handlers igb_err_handler = {
162 .error_detected = igb_io_error_detected,
163 .slot_reset = igb_io_slot_reset,
164 .resume = igb_io_resume,
165};
166
167
168static struct pci_driver igb_driver = {
169 .name = igb_driver_name,
170 .id_table = igb_pci_tbl,
171 .probe = igb_probe,
172 .remove = __devexit_p(igb_remove),
173#ifdef CONFIG_PM
174 /* Power Managment Hooks */
175 .suspend = igb_suspend,
176 .resume = igb_resume,
177#endif
178 .shutdown = igb_shutdown,
179 .err_handler = &igb_err_handler
180};
181
182MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
183MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
184MODULE_LICENSE("GPL");
185MODULE_VERSION(DRV_VERSION);
186
Patrick Ohly38c845c2009-02-12 05:03:41 +0000187/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000188 * igb_read_clock - read raw cycle counter (to be used by time counter)
189 */
190static cycle_t igb_read_clock(const struct cyclecounter *tc)
191{
192 struct igb_adapter *adapter =
193 container_of(tc, struct igb_adapter, cycles);
194 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000195 u64 stamp = 0;
196 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000197
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000198 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000200 return stamp;
201}
202
Auke Kok9d5c8242008-01-24 02:22:38 -0800203#ifdef DEBUG
204/**
205 * igb_get_hw_dev_name - return device name string
206 * used by hardware layer to print debugging information
207 **/
208char *igb_get_hw_dev_name(struct e1000_hw *hw)
209{
210 struct igb_adapter *adapter = hw->back;
211 return adapter->netdev->name;
212}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000213
214/**
215 * igb_get_time_str - format current NIC and system time as string
216 */
217static char *igb_get_time_str(struct igb_adapter *adapter,
218 char buffer[160])
219{
220 cycle_t hw = adapter->cycles.read(&adapter->cycles);
221 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
222 struct timespec sys;
223 struct timespec delta;
224 getnstimeofday(&sys);
225
226 delta = timespec_sub(nic, sys);
227
228 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000229 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
230 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000231 (long)nic.tv_sec, nic.tv_nsec,
232 (long)sys.tv_sec, sys.tv_nsec,
233 (long)delta.tv_sec, delta.tv_nsec);
234
235 return buffer;
236}
Auke Kok9d5c8242008-01-24 02:22:38 -0800237#endif
238
239/**
240 * igb_init_module - Driver Registration Routine
241 *
242 * igb_init_module is the first routine called when the driver is
243 * loaded. All it does is register with the PCI subsystem.
244 **/
245static int __init igb_init_module(void)
246{
247 int ret;
248 printk(KERN_INFO "%s - version %s\n",
249 igb_driver_string, igb_driver_version);
250
251 printk(KERN_INFO "%s\n", igb_copyright);
252
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700253#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700254 dca_register_notify(&dca_notifier);
255#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800256 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800257 return ret;
258}
259
260module_init(igb_init_module);
261
262/**
263 * igb_exit_module - Driver Exit Cleanup Routine
264 *
265 * igb_exit_module is called just before the driver is removed
266 * from memory.
267 **/
268static void __exit igb_exit_module(void)
269{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700270#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700271 dca_unregister_notify(&dca_notifier);
272#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800273 pci_unregister_driver(&igb_driver);
274}
275
276module_exit(igb_exit_module);
277
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800278#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
279/**
280 * igb_cache_ring_register - Descriptor ring to register mapping
281 * @adapter: board private structure to initialize
282 *
283 * Once we know the feature-set enabled for the device, we'll cache
284 * the register offset the descriptor ring is assigned to.
285 **/
286static void igb_cache_ring_register(struct igb_adapter *adapter)
287{
288 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000289 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800290
291 switch (adapter->hw.mac.type) {
292 case e1000_82576:
293 /* The queues are allocated for virtualization such that VF 0
294 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
295 * In order to avoid collision we start at the first free queue
296 * and continue consuming queues in the same sequence
297 */
298 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800299 adapter->rx_ring[i].reg_idx = rbase_offset +
300 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800301 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800302 adapter->tx_ring[i].reg_idx = rbase_offset +
303 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800304 break;
305 case e1000_82575:
306 default:
307 for (i = 0; i < adapter->num_rx_queues; i++)
308 adapter->rx_ring[i].reg_idx = i;
309 for (i = 0; i < adapter->num_tx_queues; i++)
310 adapter->tx_ring[i].reg_idx = i;
311 break;
312 }
313}
314
Alexander Duyck047e0032009-10-27 15:49:27 +0000315static void igb_free_queues(struct igb_adapter *adapter)
316{
317 kfree(adapter->tx_ring);
318 kfree(adapter->rx_ring);
319
320 adapter->tx_ring = NULL;
321 adapter->rx_ring = NULL;
322
323 adapter->num_rx_queues = 0;
324 adapter->num_tx_queues = 0;
325}
326
Auke Kok9d5c8242008-01-24 02:22:38 -0800327/**
328 * igb_alloc_queues - Allocate memory for all rings
329 * @adapter: board private structure to initialize
330 *
331 * We allocate one ring per queue at run-time since we don't know the
332 * number of queues at compile-time.
333 **/
334static int igb_alloc_queues(struct igb_adapter *adapter)
335{
336 int i;
337
338 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
339 sizeof(struct igb_ring), GFP_KERNEL);
340 if (!adapter->tx_ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000341 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -0800342
343 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
344 sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +0000345 if (!adapter->rx_ring)
346 goto err;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -0700347
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700348 for (i = 0; i < adapter->num_tx_queues; i++) {
349 struct igb_ring *ring = &(adapter->tx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800350 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700351 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000352 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000353 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000354 /* For 82575, context index must be unique per ring. */
355 if (adapter->hw.mac.type == e1000_82575)
356 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700357 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000358
Auke Kok9d5c8242008-01-24 02:22:38 -0800359 for (i = 0; i < adapter->num_rx_queues; i++) {
360 struct igb_ring *ring = &(adapter->rx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800361 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700362 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000363 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000364 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000365 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000366 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
367 /* set flag indicating ring supports SCTP checksum offload */
368 if (adapter->hw.mac.type >= e1000_82576)
369 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -0800370 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800371
372 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000373
Auke Kok9d5c8242008-01-24 02:22:38 -0800374 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800375
Alexander Duyck047e0032009-10-27 15:49:27 +0000376err:
377 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700378
Alexander Duyck047e0032009-10-27 15:49:27 +0000379 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700380}
381
Auke Kok9d5c8242008-01-24 02:22:38 -0800382#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000383static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800384{
385 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000386 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800387 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700388 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000389 int rx_queue = IGB_N0_QUEUE;
390 int tx_queue = IGB_N0_QUEUE;
391
392 if (q_vector->rx_ring)
393 rx_queue = q_vector->rx_ring->reg_idx;
394 if (q_vector->tx_ring)
395 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700396
397 switch (hw->mac.type) {
398 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800399 /* The 82575 assigns vectors using a bitmask, which matches the
400 bitmask for the EICR/EIMS/EIMC registers. To assign one
401 or more queues to a vector, we write the appropriate bits
402 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000403 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800404 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000405 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800406 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Auke Kok9d5c8242008-01-24 02:22:38 -0800407 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000408 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700409 break;
410 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800411 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700412 Each queue has a single entry in the table to which we write
413 a vector number along with a "valid" bit. Sadly, the layout
414 of the table is somewhat counterintuitive. */
415 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000416 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700417 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000418 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800419 /* vector goes into low byte of register */
420 ivar = ivar & 0xFFFFFF00;
421 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000422 } else {
423 /* vector goes into third byte of register */
424 ivar = ivar & 0xFF00FFFF;
425 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700426 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700427 array_wr32(E1000_IVAR0, index, ivar);
428 }
429 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000430 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700431 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000432 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800433 /* vector goes into second byte of register */
434 ivar = ivar & 0xFFFF00FF;
435 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000436 } else {
437 /* vector goes into high byte of register */
438 ivar = ivar & 0x00FFFFFF;
439 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700440 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700441 array_wr32(E1000_IVAR0, index, ivar);
442 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000443 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700444 break;
445 default:
446 BUG();
447 break;
448 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800449}
450
451/**
452 * igb_configure_msix - Configure MSI-X hardware
453 *
454 * igb_configure_msix sets up the hardware to properly
455 * generate MSI-X interrupts.
456 **/
457static void igb_configure_msix(struct igb_adapter *adapter)
458{
459 u32 tmp;
460 int i, vector = 0;
461 struct e1000_hw *hw = &adapter->hw;
462
463 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800464
465 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700466 switch (hw->mac.type) {
467 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800468 tmp = rd32(E1000_CTRL_EXT);
469 /* enable MSI-X PBA support*/
470 tmp |= E1000_CTRL_EXT_PBA_CLR;
471
472 /* Auto-Mask interrupts upon ICR read. */
473 tmp |= E1000_CTRL_EXT_EIAME;
474 tmp |= E1000_CTRL_EXT_IRCA;
475
476 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000477
478 /* enable msix_other interrupt */
479 array_wr32(E1000_MSIXBM(0), vector++,
480 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700481 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800482
Alexander Duyck2d064c02008-07-08 15:10:12 -0700483 break;
484
485 case e1000_82576:
Alexander Duyck047e0032009-10-27 15:49:27 +0000486 /* Turn on MSI-X capability first, or our settings
487 * won't stick. And it will take days to debug. */
488 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
489 E1000_GPIE_PBA | E1000_GPIE_EIAME |
490 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700491
Alexander Duyck047e0032009-10-27 15:49:27 +0000492 /* enable msix_other interrupt */
493 adapter->eims_other = 1 << vector;
494 tmp = (vector++ | E1000_IVAR_VALID) << 8;
495
496 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700497 break;
498 default:
499 /* do nothing, since nothing else supports MSI-X */
500 break;
501 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000502
503 adapter->eims_enable_mask |= adapter->eims_other;
504
505 for (i = 0; i < adapter->num_q_vectors; i++) {
506 struct igb_q_vector *q_vector = adapter->q_vector[i];
507 igb_assign_vector(q_vector, vector++);
508 adapter->eims_enable_mask |= q_vector->eims_value;
509 }
510
Auke Kok9d5c8242008-01-24 02:22:38 -0800511 wrfl();
512}
513
514/**
515 * igb_request_msix - Initialize MSI-X interrupts
516 *
517 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
518 * kernel.
519 **/
520static int igb_request_msix(struct igb_adapter *adapter)
521{
522 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000523 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800524 int i, err = 0, vector = 0;
525
Auke Kok9d5c8242008-01-24 02:22:38 -0800526 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck047e0032009-10-27 15:49:27 +0000527 &igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800528 if (err)
529 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000530 vector++;
531
532 for (i = 0; i < adapter->num_q_vectors; i++) {
533 struct igb_q_vector *q_vector = adapter->q_vector[i];
534
535 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
536
537 if (q_vector->rx_ring && q_vector->tx_ring)
538 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
539 q_vector->rx_ring->queue_index);
540 else if (q_vector->tx_ring)
541 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
542 q_vector->tx_ring->queue_index);
543 else if (q_vector->rx_ring)
544 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
545 q_vector->rx_ring->queue_index);
546 else
547 sprintf(q_vector->name, "%s-unused", netdev->name);
548
549 err = request_irq(adapter->msix_entries[vector].vector,
550 &igb_msix_ring, 0, q_vector->name,
551 q_vector);
552 if (err)
553 goto out;
554 vector++;
555 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800556
Auke Kok9d5c8242008-01-24 02:22:38 -0800557 igb_configure_msix(adapter);
558 return 0;
559out:
560 return err;
561}
562
563static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
564{
565 if (adapter->msix_entries) {
566 pci_disable_msix(adapter->pdev);
567 kfree(adapter->msix_entries);
568 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000569 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800570 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000571 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800572}
573
Alexander Duyck047e0032009-10-27 15:49:27 +0000574/**
575 * igb_free_q_vectors - Free memory allocated for interrupt vectors
576 * @adapter: board private structure to initialize
577 *
578 * This function frees the memory allocated to the q_vectors. In addition if
579 * NAPI is enabled it will delete any references to the NAPI struct prior
580 * to freeing the q_vector.
581 **/
582static void igb_free_q_vectors(struct igb_adapter *adapter)
583{
584 int v_idx;
585
586 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
587 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
588 adapter->q_vector[v_idx] = NULL;
589 netif_napi_del(&q_vector->napi);
590 kfree(q_vector);
591 }
592 adapter->num_q_vectors = 0;
593}
594
595/**
596 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
597 *
598 * This function resets the device so that it has 0 rx queues, tx queues, and
599 * MSI-X interrupts allocated.
600 */
601static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
602{
603 igb_free_queues(adapter);
604 igb_free_q_vectors(adapter);
605 igb_reset_interrupt_capability(adapter);
606}
Auke Kok9d5c8242008-01-24 02:22:38 -0800607
608/**
609 * igb_set_interrupt_capability - set MSI or MSI-X if supported
610 *
611 * Attempt to configure interrupts using the best available
612 * capabilities of the hardware and kernel.
613 **/
614static void igb_set_interrupt_capability(struct igb_adapter *adapter)
615{
616 int err;
617 int numvecs, i;
618
Alexander Duyck83b71802009-02-06 23:15:45 +0000619 /* Number of supported queues. */
Alexander Duyck83b71802009-02-06 23:15:45 +0000620 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
621 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
622
Alexander Duyck047e0032009-10-27 15:49:27 +0000623 /* start with one vector for every rx queue */
624 numvecs = adapter->num_rx_queues;
625
626 /* if tx handler is seperate add 1 for every tx queue */
627 numvecs += adapter->num_tx_queues;
628
629 /* store the number of vectors reserved for queues */
630 adapter->num_q_vectors = numvecs;
631
632 /* add 1 vector for link status interrupts */
633 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800634 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
635 GFP_KERNEL);
636 if (!adapter->msix_entries)
637 goto msi_only;
638
639 for (i = 0; i < numvecs; i++)
640 adapter->msix_entries[i].entry = i;
641
642 err = pci_enable_msix(adapter->pdev,
643 adapter->msix_entries,
644 numvecs);
645 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700646 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800647
648 igb_reset_interrupt_capability(adapter);
649
650 /* If we can't do MSI-X, try MSI */
651msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000652#ifdef CONFIG_PCI_IOV
653 /* disable SR-IOV for non MSI-X configurations */
654 if (adapter->vf_data) {
655 struct e1000_hw *hw = &adapter->hw;
656 /* disable iov and allow time for transactions to clear */
657 pci_disable_sriov(adapter->pdev);
658 msleep(500);
659
660 kfree(adapter->vf_data);
661 adapter->vf_data = NULL;
662 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
663 msleep(100);
664 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
665 }
666#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000667 adapter->vfs_allocated_count = 0;
668 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800669 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700670 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000671 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800672 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700673 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700674out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700675 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700676 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800677 return;
678}
679
680/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000681 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
682 * @adapter: board private structure to initialize
683 *
684 * We allocate one q_vector per queue interrupt. If allocation fails we
685 * return -ENOMEM.
686 **/
687static int igb_alloc_q_vectors(struct igb_adapter *adapter)
688{
689 struct igb_q_vector *q_vector;
690 struct e1000_hw *hw = &adapter->hw;
691 int v_idx;
692
693 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
694 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
695 if (!q_vector)
696 goto err_out;
697 q_vector->adapter = adapter;
698 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
699 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
700 q_vector->itr_val = IGB_START_ITR;
701 q_vector->set_itr = 1;
702 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
703 adapter->q_vector[v_idx] = q_vector;
704 }
705 return 0;
706
707err_out:
708 while (v_idx) {
709 v_idx--;
710 q_vector = adapter->q_vector[v_idx];
711 netif_napi_del(&q_vector->napi);
712 kfree(q_vector);
713 adapter->q_vector[v_idx] = NULL;
714 }
715 return -ENOMEM;
716}
717
718static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
719 int ring_idx, int v_idx)
720{
721 struct igb_q_vector *q_vector;
722
723 q_vector = adapter->q_vector[v_idx];
724 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
725 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000726 q_vector->itr_val = adapter->rx_itr_setting;
727 if (q_vector->itr_val && q_vector->itr_val <= 3)
728 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000729}
730
731static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
732 int ring_idx, int v_idx)
733{
734 struct igb_q_vector *q_vector;
735
736 q_vector = adapter->q_vector[v_idx];
737 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
738 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000739 q_vector->itr_val = adapter->tx_itr_setting;
740 if (q_vector->itr_val && q_vector->itr_val <= 3)
741 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000742}
743
744/**
745 * igb_map_ring_to_vector - maps allocated queues to vectors
746 *
747 * This function maps the recently allocated queues to vectors.
748 **/
749static int igb_map_ring_to_vector(struct igb_adapter *adapter)
750{
751 int i;
752 int v_idx = 0;
753
754 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
755 (adapter->num_q_vectors < adapter->num_tx_queues))
756 return -ENOMEM;
757
758 if (adapter->num_q_vectors >=
759 (adapter->num_rx_queues + adapter->num_tx_queues)) {
760 for (i = 0; i < adapter->num_rx_queues; i++)
761 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
762 for (i = 0; i < adapter->num_tx_queues; i++)
763 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
764 } else {
765 for (i = 0; i < adapter->num_rx_queues; i++) {
766 if (i < adapter->num_tx_queues)
767 igb_map_tx_ring_to_vector(adapter, i, v_idx);
768 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
769 }
770 for (; i < adapter->num_tx_queues; i++)
771 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
772 }
773 return 0;
774}
775
776/**
777 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
778 *
779 * This function initializes the interrupts and allocates all of the queues.
780 **/
781static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
782{
783 struct pci_dev *pdev = adapter->pdev;
784 int err;
785
786 igb_set_interrupt_capability(adapter);
787
788 err = igb_alloc_q_vectors(adapter);
789 if (err) {
790 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
791 goto err_alloc_q_vectors;
792 }
793
794 err = igb_alloc_queues(adapter);
795 if (err) {
796 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
797 goto err_alloc_queues;
798 }
799
800 err = igb_map_ring_to_vector(adapter);
801 if (err) {
802 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
803 goto err_map_queues;
804 }
805
806
807 return 0;
808err_map_queues:
809 igb_free_queues(adapter);
810err_alloc_queues:
811 igb_free_q_vectors(adapter);
812err_alloc_q_vectors:
813 igb_reset_interrupt_capability(adapter);
814 return err;
815}
816
817/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800818 * igb_request_irq - initialize interrupts
819 *
820 * Attempts to configure interrupts using the best available
821 * capabilities of the hardware and kernel.
822 **/
823static int igb_request_irq(struct igb_adapter *adapter)
824{
825 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000826 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800827 struct e1000_hw *hw = &adapter->hw;
828 int err = 0;
829
830 if (adapter->msix_entries) {
831 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700832 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800833 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800834 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000835 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800836 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700837 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800838 igb_free_all_tx_resources(adapter);
839 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000840 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800841 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000842 adapter->num_q_vectors = 1;
843 err = igb_alloc_q_vectors(adapter);
844 if (err) {
845 dev_err(&pdev->dev,
846 "Unable to allocate memory for vectors\n");
847 goto request_done;
848 }
849 err = igb_alloc_queues(adapter);
850 if (err) {
851 dev_err(&pdev->dev,
852 "Unable to allocate memory for queues\n");
853 igb_free_q_vectors(adapter);
854 goto request_done;
855 }
856 igb_setup_all_tx_resources(adapter);
857 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700858 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -0700859 switch (hw->mac.type) {
860 case e1000_82575:
861 wr32(E1000_MSIXBM(0),
Alexander Duyck047e0032009-10-27 15:49:27 +0000862 (E1000_EICR_RX_QUEUE0 |
863 E1000_EICR_TX_QUEUE0 |
864 E1000_EIMS_OTHER));
Alexander Duyck2d064c02008-07-08 15:10:12 -0700865 break;
866 case e1000_82576:
867 wr32(E1000_IVAR0, E1000_IVAR_VALID);
868 break;
869 default:
870 break;
871 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800872 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700873
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700874 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800875 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000876 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800877 if (!err)
878 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000879
Auke Kok9d5c8242008-01-24 02:22:38 -0800880 /* fall back to legacy interrupts */
881 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700882 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800883 }
884
885 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000886 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800887
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800888 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800889 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
890 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800891
892request_done:
893 return err;
894}
895
896static void igb_free_irq(struct igb_adapter *adapter)
897{
Auke Kok9d5c8242008-01-24 02:22:38 -0800898 if (adapter->msix_entries) {
899 int vector = 0, i;
900
Alexander Duyck047e0032009-10-27 15:49:27 +0000901 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800902
Alexander Duyck047e0032009-10-27 15:49:27 +0000903 for (i = 0; i < adapter->num_q_vectors; i++) {
904 struct igb_q_vector *q_vector = adapter->q_vector[i];
905 free_irq(adapter->msix_entries[vector++].vector,
906 q_vector);
907 }
908 } else {
909 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800910 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800911}
912
913/**
914 * igb_irq_disable - Mask off interrupt generation on the NIC
915 * @adapter: board private structure
916 **/
917static void igb_irq_disable(struct igb_adapter *adapter)
918{
919 struct e1000_hw *hw = &adapter->hw;
920
921 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000922 u32 regval = rd32(E1000_EIAM);
923 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
924 wr32(E1000_EIMC, adapter->eims_enable_mask);
925 regval = rd32(E1000_EIAC);
926 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800927 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700928
929 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800930 wr32(E1000_IMC, ~0);
931 wrfl();
932 synchronize_irq(adapter->pdev->irq);
933}
934
935/**
936 * igb_irq_enable - Enable default interrupt generation settings
937 * @adapter: board private structure
938 **/
939static void igb_irq_enable(struct igb_adapter *adapter)
940{
941 struct e1000_hw *hw = &adapter->hw;
942
943 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000944 u32 regval = rd32(E1000_EIAC);
945 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
946 regval = rd32(E1000_EIAM);
947 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700948 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800949 if (adapter->vfs_allocated_count)
950 wr32(E1000_MBVFIMR, 0xFF);
951 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
952 E1000_IMS_DOUTSYNC));
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700953 } else {
954 wr32(E1000_IMS, IMS_ENABLE_MASK);
955 wr32(E1000_IAM, IMS_ENABLE_MASK);
956 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800957}
958
959static void igb_update_mng_vlan(struct igb_adapter *adapter)
960{
Alexander Duyck51466232009-10-27 23:47:35 +0000961 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800962 u16 vid = adapter->hw.mng_cookie.vlan_id;
963 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -0800964
Alexander Duyck51466232009-10-27 23:47:35 +0000965 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
966 /* add VID to filter table */
967 igb_vfta_set(hw, vid, true);
968 adapter->mng_vlan_id = vid;
969 } else {
970 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
971 }
972
973 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
974 (vid != old_vid) &&
975 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
976 /* remove VID from filter table */
977 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -0800978 }
979}
980
981/**
982 * igb_release_hw_control - release control of the h/w to f/w
983 * @adapter: address of board private structure
984 *
985 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
986 * For ASF and Pass Through versions of f/w this means that the
987 * driver is no longer loaded.
988 *
989 **/
990static void igb_release_hw_control(struct igb_adapter *adapter)
991{
992 struct e1000_hw *hw = &adapter->hw;
993 u32 ctrl_ext;
994
995 /* Let firmware take over control of h/w */
996 ctrl_ext = rd32(E1000_CTRL_EXT);
997 wr32(E1000_CTRL_EXT,
998 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
999}
1000
1001
1002/**
1003 * igb_get_hw_control - get control of the h/w from f/w
1004 * @adapter: address of board private structure
1005 *
1006 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1007 * For ASF and Pass Through versions of f/w this means that
1008 * the driver is loaded.
1009 *
1010 **/
1011static void igb_get_hw_control(struct igb_adapter *adapter)
1012{
1013 struct e1000_hw *hw = &adapter->hw;
1014 u32 ctrl_ext;
1015
1016 /* Let firmware know the driver has taken over */
1017 ctrl_ext = rd32(E1000_CTRL_EXT);
1018 wr32(E1000_CTRL_EXT,
1019 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1020}
1021
Auke Kok9d5c8242008-01-24 02:22:38 -08001022/**
1023 * igb_configure - configure the hardware for RX and TX
1024 * @adapter: private board structure
1025 **/
1026static void igb_configure(struct igb_adapter *adapter)
1027{
1028 struct net_device *netdev = adapter->netdev;
1029 int i;
1030
1031 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001032 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001033
1034 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001035
Alexander Duyck85b430b2009-10-27 15:50:29 +00001036 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001037 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001038 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001039
1040 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001041 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001042
1043 igb_rx_fifo_flush_82575(&adapter->hw);
1044
Alexander Duyckc493ea42009-03-20 00:16:50 +00001045 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001046 * at least 1 descriptor unused to make sure
1047 * next_to_use != next_to_clean */
1048 for (i = 0; i < adapter->num_rx_queues; i++) {
1049 struct igb_ring *ring = &adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001050 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001051 }
1052
1053
1054 adapter->tx_queue_len = netdev->tx_queue_len;
1055}
1056
1057
1058/**
1059 * igb_up - Open the interface and prepare it to handle traffic
1060 * @adapter: board private structure
1061 **/
1062
1063int igb_up(struct igb_adapter *adapter)
1064{
1065 struct e1000_hw *hw = &adapter->hw;
1066 int i;
1067
1068 /* hardware has been reset, we need to reload some things */
1069 igb_configure(adapter);
1070
1071 clear_bit(__IGB_DOWN, &adapter->state);
1072
Alexander Duyck047e0032009-10-27 15:49:27 +00001073 for (i = 0; i < adapter->num_q_vectors; i++) {
1074 struct igb_q_vector *q_vector = adapter->q_vector[i];
1075 napi_enable(&q_vector->napi);
1076 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001077 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001078 igb_configure_msix(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001079
1080 /* Clear any pending interrupts. */
1081 rd32(E1000_ICR);
1082 igb_irq_enable(adapter);
1083
Alexander Duyckd4960302009-10-27 15:53:45 +00001084 /* notify VFs that reset has been completed */
1085 if (adapter->vfs_allocated_count) {
1086 u32 reg_data = rd32(E1000_CTRL_EXT);
1087 reg_data |= E1000_CTRL_EXT_PFRSTD;
1088 wr32(E1000_CTRL_EXT, reg_data);
1089 }
1090
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001091 netif_tx_start_all_queues(adapter->netdev);
1092
Auke Kok9d5c8242008-01-24 02:22:38 -08001093 /* Fire a link change interrupt to start the watchdog. */
1094 wr32(E1000_ICS, E1000_ICS_LSC);
1095 return 0;
1096}
1097
1098void igb_down(struct igb_adapter *adapter)
1099{
1100 struct e1000_hw *hw = &adapter->hw;
1101 struct net_device *netdev = adapter->netdev;
1102 u32 tctl, rctl;
1103 int i;
1104
1105 /* signal that we're down so the interrupt handler does not
1106 * reschedule our watchdog timer */
1107 set_bit(__IGB_DOWN, &adapter->state);
1108
1109 /* disable receives in the hardware */
1110 rctl = rd32(E1000_RCTL);
1111 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1112 /* flush and sleep below */
1113
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001114 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001115
1116 /* disable transmits in the hardware */
1117 tctl = rd32(E1000_TCTL);
1118 tctl &= ~E1000_TCTL_EN;
1119 wr32(E1000_TCTL, tctl);
1120 /* flush both disables and wait for them to finish */
1121 wrfl();
1122 msleep(10);
1123
Alexander Duyck047e0032009-10-27 15:49:27 +00001124 for (i = 0; i < adapter->num_q_vectors; i++) {
1125 struct igb_q_vector *q_vector = adapter->q_vector[i];
1126 napi_disable(&q_vector->napi);
1127 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001128
Auke Kok9d5c8242008-01-24 02:22:38 -08001129 igb_irq_disable(adapter);
1130
1131 del_timer_sync(&adapter->watchdog_timer);
1132 del_timer_sync(&adapter->phy_info_timer);
1133
1134 netdev->tx_queue_len = adapter->tx_queue_len;
1135 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001136
1137 /* record the stats before reset*/
1138 igb_update_stats(adapter);
1139
Auke Kok9d5c8242008-01-24 02:22:38 -08001140 adapter->link_speed = 0;
1141 adapter->link_duplex = 0;
1142
Jeff Kirsher30236822008-06-24 17:01:15 -07001143 if (!pci_channel_offline(adapter->pdev))
1144 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001145 igb_clean_all_tx_rings(adapter);
1146 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001147#ifdef CONFIG_IGB_DCA
1148
1149 /* since we reset the hardware DCA settings were cleared */
1150 igb_setup_dca(adapter);
1151#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001152}
1153
1154void igb_reinit_locked(struct igb_adapter *adapter)
1155{
1156 WARN_ON(in_interrupt());
1157 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1158 msleep(1);
1159 igb_down(adapter);
1160 igb_up(adapter);
1161 clear_bit(__IGB_RESETTING, &adapter->state);
1162}
1163
1164void igb_reset(struct igb_adapter *adapter)
1165{
1166 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001167 struct e1000_mac_info *mac = &hw->mac;
1168 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001169 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1170 u16 hwm;
1171
1172 /* Repartition Pba for greater than 9k mtu
1173 * To take effect CTRL.RST is required.
1174 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001175 switch (mac->type) {
1176 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001177 pba = rd32(E1000_RXPBS);
1178 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001179 break;
1180 case e1000_82575:
1181 default:
1182 pba = E1000_PBA_34K;
1183 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001184 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001185
Alexander Duyck2d064c02008-07-08 15:10:12 -07001186 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1187 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001188 /* adjust PBA for jumbo frames */
1189 wr32(E1000_PBA, pba);
1190
1191 /* To maintain wire speed transmits, the Tx FIFO should be
1192 * large enough to accommodate two full transmit packets,
1193 * rounded up to the next 1KB and expressed in KB. Likewise,
1194 * the Rx FIFO should be large enough to accommodate at least
1195 * one full receive packet and is similarly rounded up and
1196 * expressed in KB. */
1197 pba = rd32(E1000_PBA);
1198 /* upper 16 bits has Tx packet buffer allocation size in KB */
1199 tx_space = pba >> 16;
1200 /* lower 16 bits has Rx packet buffer allocation size in KB */
1201 pba &= 0xffff;
1202 /* the tx fifo also stores 16 bytes of information about the tx
1203 * but don't include ethernet FCS because hardware appends it */
1204 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001205 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001206 ETH_FCS_LEN) * 2;
1207 min_tx_space = ALIGN(min_tx_space, 1024);
1208 min_tx_space >>= 10;
1209 /* software strips receive CRC, so leave room for it */
1210 min_rx_space = adapter->max_frame_size;
1211 min_rx_space = ALIGN(min_rx_space, 1024);
1212 min_rx_space >>= 10;
1213
1214 /* If current Tx allocation is less than the min Tx FIFO size,
1215 * and the min Tx FIFO size is less than the current Rx FIFO
1216 * allocation, take space away from current Rx allocation */
1217 if (tx_space < min_tx_space &&
1218 ((min_tx_space - tx_space) < pba)) {
1219 pba = pba - (min_tx_space - tx_space);
1220
1221 /* if short on rx space, rx wins and must trump tx
1222 * adjustment */
1223 if (pba < min_rx_space)
1224 pba = min_rx_space;
1225 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001226 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001227 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001228
1229 /* flow control settings */
1230 /* The high water mark must be low enough to fit one full frame
1231 * (or the size used for early receive) above it in the Rx FIFO.
1232 * Set it to the lower of:
1233 * - 90% of the Rx FIFO size, or
1234 * - the full Rx FIFO size minus one full frame */
1235 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001236 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001237
Alexander Duyck2d064c02008-07-08 15:10:12 -07001238 if (mac->type < e1000_82576) {
1239 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1240 fc->low_water = fc->high_water - 8;
1241 } else {
1242 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1243 fc->low_water = fc->high_water - 16;
1244 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001245 fc->pause_time = 0xFFFF;
1246 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001247 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001248
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001249 /* disable receive for all VFs and wait one second */
1250 if (adapter->vfs_allocated_count) {
1251 int i;
1252 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001253 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001254
1255 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001256 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001257
1258 /* disable transmits and receives */
1259 wr32(E1000_VFRE, 0);
1260 wr32(E1000_VFTE, 0);
1261 }
1262
Auke Kok9d5c8242008-01-24 02:22:38 -08001263 /* Allow time for pending master requests to run */
1264 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1265 wr32(E1000_WUC, 0);
1266
1267 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1268 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1269
1270 igb_update_mng_vlan(adapter);
1271
1272 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1273 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1274
1275 igb_reset_adaptive(&adapter->hw);
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001276 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001277}
1278
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001279static const struct net_device_ops igb_netdev_ops = {
1280 .ndo_open = igb_open,
1281 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001282 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001283 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001284 .ndo_set_rx_mode = igb_set_rx_mode,
1285 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001286 .ndo_set_mac_address = igb_set_mac,
1287 .ndo_change_mtu = igb_change_mtu,
1288 .ndo_do_ioctl = igb_ioctl,
1289 .ndo_tx_timeout = igb_tx_timeout,
1290 .ndo_validate_addr = eth_validate_addr,
1291 .ndo_vlan_rx_register = igb_vlan_rx_register,
1292 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1293 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1294#ifdef CONFIG_NET_POLL_CONTROLLER
1295 .ndo_poll_controller = igb_netpoll,
1296#endif
1297};
1298
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001299/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001300 * igb_probe - Device Initialization Routine
1301 * @pdev: PCI device information struct
1302 * @ent: entry in igb_pci_tbl
1303 *
1304 * Returns 0 on success, negative on failure
1305 *
1306 * igb_probe initializes an adapter identified by a pci_dev structure.
1307 * The OS initialization, configuring of the adapter private structure,
1308 * and a hardware reset occur.
1309 **/
1310static int __devinit igb_probe(struct pci_dev *pdev,
1311 const struct pci_device_id *ent)
1312{
1313 struct net_device *netdev;
1314 struct igb_adapter *adapter;
1315 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001316 u16 eeprom_data = 0;
1317 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001318 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1319 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001320 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001321 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1322 u32 part_num;
1323
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001324 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001325 if (err)
1326 return err;
1327
1328 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001329 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001330 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001331 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001332 if (!err)
1333 pci_using_dac = 1;
1334 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001335 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001336 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001337 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001338 if (err) {
1339 dev_err(&pdev->dev, "No usable DMA "
1340 "configuration, aborting\n");
1341 goto err_dma;
1342 }
1343 }
1344 }
1345
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001346 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1347 IORESOURCE_MEM),
1348 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001349 if (err)
1350 goto err_pci_reg;
1351
Frans Pop19d5afd2009-10-02 10:04:12 -07001352 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001353
Auke Kok9d5c8242008-01-24 02:22:38 -08001354 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001355 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001356
1357 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001358 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1359 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001360 if (!netdev)
1361 goto err_alloc_etherdev;
1362
1363 SET_NETDEV_DEV(netdev, &pdev->dev);
1364
1365 pci_set_drvdata(pdev, netdev);
1366 adapter = netdev_priv(netdev);
1367 adapter->netdev = netdev;
1368 adapter->pdev = pdev;
1369 hw = &adapter->hw;
1370 hw->back = adapter;
1371 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1372
1373 mmio_start = pci_resource_start(pdev, 0);
1374 mmio_len = pci_resource_len(pdev, 0);
1375
1376 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001377 hw->hw_addr = ioremap(mmio_start, mmio_len);
1378 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001379 goto err_ioremap;
1380
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001381 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001382 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001383 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001384
1385 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1386
1387 netdev->mem_start = mmio_start;
1388 netdev->mem_end = mmio_start + mmio_len;
1389
Auke Kok9d5c8242008-01-24 02:22:38 -08001390 /* PCI config space info */
1391 hw->vendor_id = pdev->vendor;
1392 hw->device_id = pdev->device;
1393 hw->revision_id = pdev->revision;
1394 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1395 hw->subsystem_device_id = pdev->subsystem_device;
1396
1397 /* setup the private structure */
1398 hw->back = adapter;
1399 /* Copy the default MAC, PHY and NVM function pointers */
1400 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1401 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1402 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1403 /* Initialize skew-specific constants */
1404 err = ei->get_invariants(hw);
1405 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001406 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001407
Alexander Duyck450c87c2009-02-06 23:22:11 +00001408 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001409 err = igb_sw_init(adapter);
1410 if (err)
1411 goto err_sw_init;
1412
1413 igb_get_bus_info_pcie(hw);
1414
1415 hw->phy.autoneg_wait_to_complete = false;
1416 hw->mac.adaptive_ifs = true;
1417
1418 /* Copper options */
1419 if (hw->phy.media_type == e1000_media_type_copper) {
1420 hw->phy.mdix = AUTO_ALL_MODES;
1421 hw->phy.disable_polarity_correction = false;
1422 hw->phy.ms_type = e1000_ms_hw_default;
1423 }
1424
1425 if (igb_check_reset_block(hw))
1426 dev_info(&pdev->dev,
1427 "PHY reset is blocked due to SOL/IDER session.\n");
1428
1429 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001430 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001431 NETIF_F_HW_VLAN_TX |
1432 NETIF_F_HW_VLAN_RX |
1433 NETIF_F_HW_VLAN_FILTER;
1434
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001435 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001436 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001437 netdev->features |= NETIF_F_TSO6;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001438
Herbert Xu5c0999b2009-01-19 15:20:57 -08001439 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001440
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001441 netdev->vlan_features |= NETIF_F_TSO;
1442 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001443 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001444 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001445 netdev->vlan_features |= NETIF_F_SG;
1446
Auke Kok9d5c8242008-01-24 02:22:38 -08001447 if (pci_using_dac)
1448 netdev->features |= NETIF_F_HIGHDMA;
1449
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001450 if (adapter->hw.mac.type == e1000_82576)
1451 netdev->features |= NETIF_F_SCTP_CSUM;
1452
Auke Kok9d5c8242008-01-24 02:22:38 -08001453 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1454
1455 /* before reading the NVM, reset the controller to put the device in a
1456 * known good starting state */
1457 hw->mac.ops.reset_hw(hw);
1458
1459 /* make sure the NVM is good */
1460 if (igb_validate_nvm_checksum(hw) < 0) {
1461 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1462 err = -EIO;
1463 goto err_eeprom;
1464 }
1465
1466 /* copy the MAC address out of the NVM */
1467 if (hw->mac.ops.read_mac_addr(hw))
1468 dev_err(&pdev->dev, "NVM Read Error\n");
1469
1470 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1471 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1472
1473 if (!is_valid_ether_addr(netdev->perm_addr)) {
1474 dev_err(&pdev->dev, "Invalid MAC Address\n");
1475 err = -EIO;
1476 goto err_eeprom;
1477 }
1478
Alexander Duyck0e340482009-03-20 00:17:08 +00001479 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1480 (unsigned long) adapter);
1481 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1482 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001483
1484 INIT_WORK(&adapter->reset_task, igb_reset_task);
1485 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1486
Alexander Duyck450c87c2009-02-06 23:22:11 +00001487 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001488 adapter->fc_autoneg = true;
1489 hw->mac.autoneg = true;
1490 hw->phy.autoneg_advertised = 0x2f;
1491
Alexander Duyck0cce1192009-07-23 18:10:24 +00001492 hw->fc.requested_mode = e1000_fc_default;
1493 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001494
Auke Kok9d5c8242008-01-24 02:22:38 -08001495 igb_validate_mdi_setting(hw);
1496
Auke Kok9d5c8242008-01-24 02:22:38 -08001497 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1498 * enable the ACPI Magic Packet filter
1499 */
1500
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001501 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001502 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001503 else if (hw->bus.func == 1)
1504 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001505
1506 if (eeprom_data & eeprom_apme_mask)
1507 adapter->eeprom_wol |= E1000_WUFC_MAG;
1508
1509 /* now that we have the eeprom settings, apply the special cases where
1510 * the eeprom may be wrong or the board simply won't support wake on
1511 * lan on a particular port */
1512 switch (pdev->device) {
1513 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1514 adapter->eeprom_wol = 0;
1515 break;
1516 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001517 case E1000_DEV_ID_82576_FIBER:
1518 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001519 /* Wake events only supported on port A for dual fiber
1520 * regardless of eeprom setting */
1521 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1522 adapter->eeprom_wol = 0;
1523 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001524 case E1000_DEV_ID_82576_QUAD_COPPER:
1525 /* if quad port adapter, disable WoL on all but port A */
1526 if (global_quad_port_a != 0)
1527 adapter->eeprom_wol = 0;
1528 else
1529 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1530 /* Reset for multiple quad port adapters */
1531 if (++global_quad_port_a == 4)
1532 global_quad_port_a = 0;
1533 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001534 }
1535
1536 /* initialize the wol settings based on the eeprom settings */
1537 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001538 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001539
1540 /* reset the hardware with the new settings */
1541 igb_reset(adapter);
1542
1543 /* let the f/w know that the h/w is now under the control of the
1544 * driver. */
1545 igb_get_hw_control(adapter);
1546
Auke Kok9d5c8242008-01-24 02:22:38 -08001547 strcpy(netdev->name, "eth%d");
1548 err = register_netdev(netdev);
1549 if (err)
1550 goto err_register;
1551
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001552 /* carrier off reporting is important to ethtool even BEFORE open */
1553 netif_carrier_off(netdev);
1554
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001555#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001556 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001557 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001558 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001559 igb_setup_dca(adapter);
1560 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001561
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001562#endif
1563
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001564 switch (hw->mac.type) {
1565 case e1000_82576:
1566 /*
1567 * Initialize hardware timer: we keep it running just in case
1568 * that some program needs it later on.
1569 */
1570 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1571 adapter->cycles.read = igb_read_clock;
1572 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1573 adapter->cycles.mult = 1;
1574 /**
1575 * Scale the NIC clock cycle by a large factor so that
1576 * relatively small clock corrections can be added or
1577 * substracted at each clock tick. The drawbacks of a large
1578 * factor are a) that the clock register overflows more quickly
1579 * (not such a big deal) and b) that the increment per tick has
1580 * to fit into 24 bits. As a result we need to use a shift of
1581 * 19 so we can fit a value of 16 into the TIMINCA register.
1582 */
1583 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1584 wr32(E1000_TIMINCA,
1585 (1 << E1000_TIMINCA_16NS_SHIFT) |
1586 (16 << IGB_82576_TSYNC_SHIFT));
Patrick Ohly38c845c2009-02-12 05:03:41 +00001587
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001588 /* Set registers so that rollover occurs soon to test this. */
1589 wr32(E1000_SYSTIML, 0x00000000);
1590 wr32(E1000_SYSTIMH, 0xFF800000);
1591 wrfl();
Patrick Ohly33af6bc2009-02-12 05:03:43 +00001592
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001593 timecounter_init(&adapter->clock,
1594 &adapter->cycles,
1595 ktime_to_ns(ktime_get_real()));
1596 /*
1597 * Synchronize our NIC clock against system wall clock. NIC
1598 * time stamp reading requires ~3us per sample, each sample
1599 * was pretty stable even under load => only require 10
1600 * samples for each offset comparison.
1601 */
1602 memset(&adapter->compare, 0, sizeof(adapter->compare));
1603 adapter->compare.source = &adapter->clock;
1604 adapter->compare.target = ktime_get_real;
1605 adapter->compare.num_samples = 10;
1606 timecompare_update(&adapter->compare, 0);
1607 break;
1608 case e1000_82575:
1609 /* 82575 does not support timesync */
1610 default:
1611 break;
Patrick Ohly38c845c2009-02-12 05:03:41 +00001612 }
Patrick Ohly38c845c2009-02-12 05:03:41 +00001613
Auke Kok9d5c8242008-01-24 02:22:38 -08001614 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1615 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001616 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001617 netdev->name,
1618 ((hw->bus.speed == e1000_bus_speed_2500)
1619 ? "2.5Gb/s" : "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001620 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1621 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1622 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1623 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001624 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001625
1626 igb_read_part_num(hw, &part_num);
1627 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1628 (part_num >> 8), (part_num & 0xff));
1629
1630 dev_info(&pdev->dev,
1631 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1632 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001633 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001634 adapter->num_rx_queues, adapter->num_tx_queues);
1635
Auke Kok9d5c8242008-01-24 02:22:38 -08001636 return 0;
1637
1638err_register:
1639 igb_release_hw_control(adapter);
1640err_eeprom:
1641 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001642 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001643
1644 if (hw->flash_address)
1645 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001646err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001647 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001648 iounmap(hw->hw_addr);
1649err_ioremap:
1650 free_netdev(netdev);
1651err_alloc_etherdev:
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001652 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1653 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001654err_pci_reg:
1655err_dma:
1656 pci_disable_device(pdev);
1657 return err;
1658}
1659
1660/**
1661 * igb_remove - Device Removal Routine
1662 * @pdev: PCI device information struct
1663 *
1664 * igb_remove is called by the PCI subsystem to alert the driver
1665 * that it should release a PCI device. The could be caused by a
1666 * Hot-Plug event, or because the driver is going to be removed from
1667 * memory.
1668 **/
1669static void __devexit igb_remove(struct pci_dev *pdev)
1670{
1671 struct net_device *netdev = pci_get_drvdata(pdev);
1672 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001673 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001674
1675 /* flush_scheduled work may reschedule our watchdog task, so
1676 * explicitly disable watchdog tasks from being rescheduled */
1677 set_bit(__IGB_DOWN, &adapter->state);
1678 del_timer_sync(&adapter->watchdog_timer);
1679 del_timer_sync(&adapter->phy_info_timer);
1680
1681 flush_scheduled_work();
1682
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001683#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001684 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001685 dev_info(&pdev->dev, "DCA disabled\n");
1686 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001687 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001688 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001689 }
1690#endif
1691
Auke Kok9d5c8242008-01-24 02:22:38 -08001692 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1693 * would have already happened in close and is redundant. */
1694 igb_release_hw_control(adapter);
1695
1696 unregister_netdev(netdev);
1697
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001698 if (!igb_check_reset_block(&adapter->hw))
1699 igb_reset_phy(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001700
Alexander Duyck047e0032009-10-27 15:49:27 +00001701 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001702
Alexander Duyck37680112009-02-19 20:40:30 -08001703#ifdef CONFIG_PCI_IOV
1704 /* reclaim resources allocated to VFs */
1705 if (adapter->vf_data) {
1706 /* disable iov and allow time for transactions to clear */
1707 pci_disable_sriov(pdev);
1708 msleep(500);
1709
1710 kfree(adapter->vf_data);
1711 adapter->vf_data = NULL;
1712 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1713 msleep(100);
1714 dev_info(&pdev->dev, "IOV Disabled\n");
1715 }
1716#endif
Alexander Duyck28b07592009-02-06 23:20:31 +00001717 iounmap(hw->hw_addr);
1718 if (hw->flash_address)
1719 iounmap(hw->flash_address);
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001720 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1721 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001722
1723 free_netdev(netdev);
1724
Frans Pop19d5afd2009-10-02 10:04:12 -07001725 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001726
Auke Kok9d5c8242008-01-24 02:22:38 -08001727 pci_disable_device(pdev);
1728}
1729
1730/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001731 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1732 * @adapter: board private structure to initialize
1733 *
1734 * This function initializes the vf specific data storage and then attempts to
1735 * allocate the VFs. The reason for ordering it this way is because it is much
1736 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1737 * the memory for the VFs.
1738 **/
1739static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1740{
1741#ifdef CONFIG_PCI_IOV
1742 struct pci_dev *pdev = adapter->pdev;
1743
1744 if (adapter->vfs_allocated_count > 7)
1745 adapter->vfs_allocated_count = 7;
1746
1747 if (adapter->vfs_allocated_count) {
1748 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1749 sizeof(struct vf_data_storage),
1750 GFP_KERNEL);
1751 /* if allocation failed then we do not support SR-IOV */
1752 if (!adapter->vf_data) {
1753 adapter->vfs_allocated_count = 0;
1754 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1755 "Data Storage\n");
1756 }
1757 }
1758
1759 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1760 kfree(adapter->vf_data);
1761 adapter->vf_data = NULL;
1762#endif /* CONFIG_PCI_IOV */
1763 adapter->vfs_allocated_count = 0;
1764#ifdef CONFIG_PCI_IOV
1765 } else {
1766 unsigned char mac_addr[ETH_ALEN];
1767 int i;
1768 dev_info(&pdev->dev, "%d vfs allocated\n",
1769 adapter->vfs_allocated_count);
1770 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1771 random_ether_addr(mac_addr);
1772 igb_set_vf_mac(adapter, i, mac_addr);
1773 }
1774 }
1775#endif /* CONFIG_PCI_IOV */
1776}
1777
1778/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001779 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1780 * @adapter: board private structure to initialize
1781 *
1782 * igb_sw_init initializes the Adapter private data structure.
1783 * Fields are initialized based on PCI device information and
1784 * OS network device settings (MTU size).
1785 **/
1786static int __devinit igb_sw_init(struct igb_adapter *adapter)
1787{
1788 struct e1000_hw *hw = &adapter->hw;
1789 struct net_device *netdev = adapter->netdev;
1790 struct pci_dev *pdev = adapter->pdev;
1791
1792 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1793
Alexander Duyck68fd9912008-11-20 00:48:10 -08001794 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1795 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001796 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1797 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1798
Auke Kok9d5c8242008-01-24 02:22:38 -08001799 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1800 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1801
Alexander Duycka6b623e2009-10-27 23:47:53 +00001802#ifdef CONFIG_PCI_IOV
1803 if (hw->mac.type == e1000_82576)
1804 adapter->vfs_allocated_count = max_vfs;
1805
1806#endif /* CONFIG_PCI_IOV */
1807 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001808 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001809 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1810 return -ENOMEM;
1811 }
1812
Alexander Duycka6b623e2009-10-27 23:47:53 +00001813 igb_probe_vfs(adapter);
1814
Auke Kok9d5c8242008-01-24 02:22:38 -08001815 /* Explicitly disable IRQ since the NIC can be in any state. */
1816 igb_irq_disable(adapter);
1817
1818 set_bit(__IGB_DOWN, &adapter->state);
1819 return 0;
1820}
1821
1822/**
1823 * igb_open - Called when a network interface is made active
1824 * @netdev: network interface device structure
1825 *
1826 * Returns 0 on success, negative value on failure
1827 *
1828 * The open entry point is called when a network interface is made
1829 * active by the system (IFF_UP). At this point all resources needed
1830 * for transmit and receive operations are allocated, the interrupt
1831 * handler is registered with the OS, the watchdog timer is started,
1832 * and the stack is notified that the interface is ready.
1833 **/
1834static int igb_open(struct net_device *netdev)
1835{
1836 struct igb_adapter *adapter = netdev_priv(netdev);
1837 struct e1000_hw *hw = &adapter->hw;
1838 int err;
1839 int i;
1840
1841 /* disallow open during test */
1842 if (test_bit(__IGB_TESTING, &adapter->state))
1843 return -EBUSY;
1844
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001845 netif_carrier_off(netdev);
1846
Auke Kok9d5c8242008-01-24 02:22:38 -08001847 /* allocate transmit descriptors */
1848 err = igb_setup_all_tx_resources(adapter);
1849 if (err)
1850 goto err_setup_tx;
1851
1852 /* allocate receive descriptors */
1853 err = igb_setup_all_rx_resources(adapter);
1854 if (err)
1855 goto err_setup_rx;
1856
1857 /* e1000_power_up_phy(adapter); */
1858
Auke Kok9d5c8242008-01-24 02:22:38 -08001859 /* before we allocate an interrupt, we must be ready to handle it.
1860 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1861 * as soon as we call pci_request_irq, so we have to setup our
1862 * clean_rx handler before we do so. */
1863 igb_configure(adapter);
1864
1865 err = igb_request_irq(adapter);
1866 if (err)
1867 goto err_req_irq;
1868
1869 /* From here on the code is the same as igb_up() */
1870 clear_bit(__IGB_DOWN, &adapter->state);
1871
Alexander Duyck047e0032009-10-27 15:49:27 +00001872 for (i = 0; i < adapter->num_q_vectors; i++) {
1873 struct igb_q_vector *q_vector = adapter->q_vector[i];
1874 napi_enable(&q_vector->napi);
1875 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001876
1877 /* Clear any pending interrupts. */
1878 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001879
1880 igb_irq_enable(adapter);
1881
Alexander Duyckd4960302009-10-27 15:53:45 +00001882 /* notify VFs that reset has been completed */
1883 if (adapter->vfs_allocated_count) {
1884 u32 reg_data = rd32(E1000_CTRL_EXT);
1885 reg_data |= E1000_CTRL_EXT_PFRSTD;
1886 wr32(E1000_CTRL_EXT, reg_data);
1887 }
1888
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07001889 netif_tx_start_all_queues(netdev);
1890
Auke Kok9d5c8242008-01-24 02:22:38 -08001891 /* Fire a link status change interrupt to start the watchdog. */
1892 wr32(E1000_ICS, E1000_ICS_LSC);
1893
1894 return 0;
1895
1896err_req_irq:
1897 igb_release_hw_control(adapter);
1898 /* e1000_power_down_phy(adapter); */
1899 igb_free_all_rx_resources(adapter);
1900err_setup_rx:
1901 igb_free_all_tx_resources(adapter);
1902err_setup_tx:
1903 igb_reset(adapter);
1904
1905 return err;
1906}
1907
1908/**
1909 * igb_close - Disables a network interface
1910 * @netdev: network interface device structure
1911 *
1912 * Returns 0, this is not allowed to fail
1913 *
1914 * The close entry point is called when an interface is de-activated
1915 * by the OS. The hardware is still under the driver's control, but
1916 * needs to be disabled. A global MAC reset is issued to stop the
1917 * hardware, and all transmit and receive resources are freed.
1918 **/
1919static int igb_close(struct net_device *netdev)
1920{
1921 struct igb_adapter *adapter = netdev_priv(netdev);
1922
1923 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1924 igb_down(adapter);
1925
1926 igb_free_irq(adapter);
1927
1928 igb_free_all_tx_resources(adapter);
1929 igb_free_all_rx_resources(adapter);
1930
Auke Kok9d5c8242008-01-24 02:22:38 -08001931 return 0;
1932}
1933
1934/**
1935 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08001936 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1937 *
1938 * Return 0 on success, negative on failure
1939 **/
Alexander Duyck80785292009-10-27 15:51:47 +00001940int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08001941{
Alexander Duyck80785292009-10-27 15:51:47 +00001942 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001943 int size;
1944
1945 size = sizeof(struct igb_buffer) * tx_ring->count;
1946 tx_ring->buffer_info = vmalloc(size);
1947 if (!tx_ring->buffer_info)
1948 goto err;
1949 memset(tx_ring->buffer_info, 0, size);
1950
1951 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08001952 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08001953 tx_ring->size = ALIGN(tx_ring->size, 4096);
1954
Alexander Duyck439705e2009-10-27 23:49:20 +00001955 tx_ring->desc = pci_alloc_consistent(pdev,
1956 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08001957 &tx_ring->dma);
1958
1959 if (!tx_ring->desc)
1960 goto err;
1961
Auke Kok9d5c8242008-01-24 02:22:38 -08001962 tx_ring->next_to_use = 0;
1963 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08001964 return 0;
1965
1966err:
1967 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00001968 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08001969 "Unable to allocate memory for the transmit descriptor ring\n");
1970 return -ENOMEM;
1971}
1972
1973/**
1974 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1975 * (Descriptors) for all queues
1976 * @adapter: board private structure
1977 *
1978 * Return 0 on success, negative on failure
1979 **/
1980static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1981{
Alexander Duyck439705e2009-10-27 23:49:20 +00001982 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001983 int i, err = 0;
1984
1985 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00001986 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08001987 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00001988 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08001989 "Allocation for Tx Queue %u failed\n", i);
1990 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07001991 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08001992 break;
1993 }
1994 }
1995
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001996 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00001997 int r_idx = i % adapter->num_tx_queues;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001998 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00001999 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002000 return err;
2001}
2002
2003/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002004 * igb_setup_tctl - configure the transmit control registers
2005 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002006 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002007void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002008{
Auke Kok9d5c8242008-01-24 02:22:38 -08002009 struct e1000_hw *hw = &adapter->hw;
2010 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002011
Alexander Duyck85b430b2009-10-27 15:50:29 +00002012 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2013 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002014
2015 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002016 tctl = rd32(E1000_TCTL);
2017 tctl &= ~E1000_TCTL_CT;
2018 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2019 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2020
2021 igb_config_collision_dist(hw);
2022
Auke Kok9d5c8242008-01-24 02:22:38 -08002023 /* Enable transmits */
2024 tctl |= E1000_TCTL_EN;
2025
2026 wr32(E1000_TCTL, tctl);
2027}
2028
2029/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002030 * igb_configure_tx_ring - Configure transmit ring after Reset
2031 * @adapter: board private structure
2032 * @ring: tx ring to configure
2033 *
2034 * Configure a transmit ring after a reset.
2035 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002036void igb_configure_tx_ring(struct igb_adapter *adapter,
2037 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002038{
2039 struct e1000_hw *hw = &adapter->hw;
2040 u32 txdctl;
2041 u64 tdba = ring->dma;
2042 int reg_idx = ring->reg_idx;
2043
2044 /* disable the queue */
2045 txdctl = rd32(E1000_TXDCTL(reg_idx));
2046 wr32(E1000_TXDCTL(reg_idx),
2047 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2048 wrfl();
2049 mdelay(10);
2050
2051 wr32(E1000_TDLEN(reg_idx),
2052 ring->count * sizeof(union e1000_adv_tx_desc));
2053 wr32(E1000_TDBAL(reg_idx),
2054 tdba & 0x00000000ffffffffULL);
2055 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2056
Alexander Duyckfce99e32009-10-27 15:51:27 +00002057 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2058 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2059 writel(0, ring->head);
2060 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002061
2062 txdctl |= IGB_TX_PTHRESH;
2063 txdctl |= IGB_TX_HTHRESH << 8;
2064 txdctl |= IGB_TX_WTHRESH << 16;
2065
2066 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2067 wr32(E1000_TXDCTL(reg_idx), txdctl);
2068}
2069
2070/**
2071 * igb_configure_tx - Configure transmit Unit after Reset
2072 * @adapter: board private structure
2073 *
2074 * Configure the Tx unit of the MAC after a reset.
2075 **/
2076static void igb_configure_tx(struct igb_adapter *adapter)
2077{
2078 int i;
2079
2080 for (i = 0; i < adapter->num_tx_queues; i++)
2081 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002082}
2083
2084/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002085 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002086 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2087 *
2088 * Returns 0 on success, negative on failure
2089 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002090int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002091{
Alexander Duyck80785292009-10-27 15:51:47 +00002092 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002093 int size, desc_len;
2094
2095 size = sizeof(struct igb_buffer) * rx_ring->count;
2096 rx_ring->buffer_info = vmalloc(size);
2097 if (!rx_ring->buffer_info)
2098 goto err;
2099 memset(rx_ring->buffer_info, 0, size);
2100
2101 desc_len = sizeof(union e1000_adv_rx_desc);
2102
2103 /* Round up to nearest 4K */
2104 rx_ring->size = rx_ring->count * desc_len;
2105 rx_ring->size = ALIGN(rx_ring->size, 4096);
2106
2107 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2108 &rx_ring->dma);
2109
2110 if (!rx_ring->desc)
2111 goto err;
2112
2113 rx_ring->next_to_clean = 0;
2114 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002115
Auke Kok9d5c8242008-01-24 02:22:38 -08002116 return 0;
2117
2118err:
2119 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002120 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002121 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002122 "the receive descriptor ring\n");
2123 return -ENOMEM;
2124}
2125
2126/**
2127 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2128 * (Descriptors) for all queues
2129 * @adapter: board private structure
2130 *
2131 * Return 0 on success, negative on failure
2132 **/
2133static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2134{
Alexander Duyck439705e2009-10-27 23:49:20 +00002135 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002136 int i, err = 0;
2137
2138 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00002139 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002140 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002141 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002142 "Allocation for Rx Queue %u failed\n", i);
2143 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002144 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002145 break;
2146 }
2147 }
2148
2149 return err;
2150}
2151
2152/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002153 * igb_setup_mrqc - configure the multiple receive queue control registers
2154 * @adapter: Board private structure
2155 **/
2156static void igb_setup_mrqc(struct igb_adapter *adapter)
2157{
2158 struct e1000_hw *hw = &adapter->hw;
2159 u32 mrqc, rxcsum;
2160 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2161 union e1000_reta {
2162 u32 dword;
2163 u8 bytes[4];
2164 } reta;
2165 static const u8 rsshash[40] = {
2166 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2167 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2168 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2169 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2170
2171 /* Fill out hash function seeds */
2172 for (j = 0; j < 10; j++) {
2173 u32 rsskey = rsshash[(j * 4)];
2174 rsskey |= rsshash[(j * 4) + 1] << 8;
2175 rsskey |= rsshash[(j * 4) + 2] << 16;
2176 rsskey |= rsshash[(j * 4) + 3] << 24;
2177 array_wr32(E1000_RSSRK(0), j, rsskey);
2178 }
2179
2180 num_rx_queues = adapter->num_rx_queues;
2181
2182 if (adapter->vfs_allocated_count) {
2183 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2184 switch (hw->mac.type) {
2185 case e1000_82576:
2186 shift = 3;
2187 num_rx_queues = 2;
2188 break;
2189 case e1000_82575:
2190 shift = 2;
2191 shift2 = 6;
2192 default:
2193 break;
2194 }
2195 } else {
2196 if (hw->mac.type == e1000_82575)
2197 shift = 6;
2198 }
2199
2200 for (j = 0; j < (32 * 4); j++) {
2201 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2202 if (shift2)
2203 reta.bytes[j & 3] |= num_rx_queues << shift2;
2204 if ((j & 3) == 3)
2205 wr32(E1000_RETA(j >> 2), reta.dword);
2206 }
2207
2208 /*
2209 * Disable raw packet checksumming so that RSS hash is placed in
2210 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2211 * offloads as they are enabled by default
2212 */
2213 rxcsum = rd32(E1000_RXCSUM);
2214 rxcsum |= E1000_RXCSUM_PCSD;
2215
2216 if (adapter->hw.mac.type >= e1000_82576)
2217 /* Enable Receive Checksum Offload for SCTP */
2218 rxcsum |= E1000_RXCSUM_CRCOFL;
2219
2220 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2221 wr32(E1000_RXCSUM, rxcsum);
2222
2223 /* If VMDq is enabled then we set the appropriate mode for that, else
2224 * we default to RSS so that an RSS hash is calculated per packet even
2225 * if we are only using one queue */
2226 if (adapter->vfs_allocated_count) {
2227 if (hw->mac.type > e1000_82575) {
2228 /* Set the default pool for the PF's first queue */
2229 u32 vtctl = rd32(E1000_VT_CTL);
2230 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2231 E1000_VT_CTL_DISABLE_DEF_POOL);
2232 vtctl |= adapter->vfs_allocated_count <<
2233 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2234 wr32(E1000_VT_CTL, vtctl);
2235 }
2236 if (adapter->num_rx_queues > 1)
2237 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2238 else
2239 mrqc = E1000_MRQC_ENABLE_VMDQ;
2240 } else {
2241 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2242 }
2243 igb_vmm_control(adapter);
2244
2245 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2246 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2247 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2248 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2249 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2250 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2251 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2252 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2253
2254 wr32(E1000_MRQC, mrqc);
2255}
2256
2257/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002258 * igb_setup_rctl - configure the receive control registers
2259 * @adapter: Board private structure
2260 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002261void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002262{
2263 struct e1000_hw *hw = &adapter->hw;
2264 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002265
2266 rctl = rd32(E1000_RCTL);
2267
2268 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002269 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002270
Alexander Duyck69d728b2008-11-25 01:04:03 -08002271 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002272 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002273
Auke Kok87cb7e82008-07-08 15:08:29 -07002274 /*
2275 * enable stripping of CRC. It's unlikely this will break BMC
2276 * redirection as it did with e1000. Newer features require
2277 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002278 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002279 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002280
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002281 /*
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002282 * disable store bad packets and clear size bits.
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002283 */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002284 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002285
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002286 /* enable LPE to prevent packets larger than max_frame_size */
2287 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002288
Alexander Duyck952f72a2009-10-27 15:51:07 +00002289 /* disable queue 0 to prevent tail write w/o re-config */
2290 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002291
Alexander Duycke1739522009-02-19 20:39:44 -08002292 /* Attention!!! For SR-IOV PF driver operations you must enable
2293 * queue drop for all VF and PF queues to prevent head of line blocking
2294 * if an un-trusted VF does not provide descriptors to hardware.
2295 */
2296 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002297 /* set all queue drop enable bits */
2298 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002299 }
2300
Auke Kok9d5c8242008-01-24 02:22:38 -08002301 wr32(E1000_RCTL, rctl);
2302}
2303
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002304static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2305 int vfn)
2306{
2307 struct e1000_hw *hw = &adapter->hw;
2308 u32 vmolr;
2309
2310 /* if it isn't the PF check to see if VFs are enabled and
2311 * increase the size to support vlan tags */
2312 if (vfn < adapter->vfs_allocated_count &&
2313 adapter->vf_data[vfn].vlans_enabled)
2314 size += VLAN_TAG_SIZE;
2315
2316 vmolr = rd32(E1000_VMOLR(vfn));
2317 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2318 vmolr |= size | E1000_VMOLR_LPE;
2319 wr32(E1000_VMOLR(vfn), vmolr);
2320
2321 return 0;
2322}
2323
Auke Kok9d5c8242008-01-24 02:22:38 -08002324/**
Alexander Duycke1739522009-02-19 20:39:44 -08002325 * igb_rlpml_set - set maximum receive packet size
2326 * @adapter: board private structure
2327 *
2328 * Configure maximum receivable packet size.
2329 **/
2330static void igb_rlpml_set(struct igb_adapter *adapter)
2331{
2332 u32 max_frame_size = adapter->max_frame_size;
2333 struct e1000_hw *hw = &adapter->hw;
2334 u16 pf_id = adapter->vfs_allocated_count;
2335
2336 if (adapter->vlgrp)
2337 max_frame_size += VLAN_TAG_SIZE;
2338
2339 /* if vfs are enabled we set RLPML to the largest possible request
2340 * size and set the VMOLR RLPML to the size we need */
2341 if (pf_id) {
2342 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002343 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002344 }
2345
2346 wr32(E1000_RLPML, max_frame_size);
2347}
2348
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002349static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2350{
2351 struct e1000_hw *hw = &adapter->hw;
2352 u32 vmolr;
2353
2354 /*
2355 * This register exists only on 82576 and newer so if we are older then
2356 * we should exit and do nothing
2357 */
2358 if (hw->mac.type < e1000_82576)
2359 return;
2360
2361 vmolr = rd32(E1000_VMOLR(vfn));
2362 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2363 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2364
2365 /* clear all bits that might not be set */
2366 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2367
2368 if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
2369 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2370 /*
2371 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2372 * multicast packets
2373 */
2374 if (vfn <= adapter->vfs_allocated_count)
2375 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2376
2377 wr32(E1000_VMOLR(vfn), vmolr);
2378}
2379
Alexander Duycke1739522009-02-19 20:39:44 -08002380/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002381 * igb_configure_rx_ring - Configure a receive ring after Reset
2382 * @adapter: board private structure
2383 * @ring: receive ring to be configured
2384 *
2385 * Configure the Rx unit of the MAC after a reset.
2386 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002387void igb_configure_rx_ring(struct igb_adapter *adapter,
2388 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002389{
2390 struct e1000_hw *hw = &adapter->hw;
2391 u64 rdba = ring->dma;
2392 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002393 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002394
2395 /* disable the queue */
2396 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2397 wr32(E1000_RXDCTL(reg_idx),
2398 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2399
2400 /* Set DMA base address registers */
2401 wr32(E1000_RDBAL(reg_idx),
2402 rdba & 0x00000000ffffffffULL);
2403 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2404 wr32(E1000_RDLEN(reg_idx),
2405 ring->count * sizeof(union e1000_adv_rx_desc));
2406
2407 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002408 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2409 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2410 writel(0, ring->head);
2411 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002412
Alexander Duyck952f72a2009-10-27 15:51:07 +00002413 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002414 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2415 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002416 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2417#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2418 srrctl |= IGB_RXBUFFER_16384 >>
2419 E1000_SRRCTL_BSIZEPKT_SHIFT;
2420#else
2421 srrctl |= (PAGE_SIZE / 2) >>
2422 E1000_SRRCTL_BSIZEPKT_SHIFT;
2423#endif
2424 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2425 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002426 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002427 E1000_SRRCTL_BSIZEPKT_SHIFT;
2428 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2429 }
2430
2431 wr32(E1000_SRRCTL(reg_idx), srrctl);
2432
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002433 /* set filtering for VMDQ pools */
2434 igb_set_vmolr(adapter, reg_idx & 0x7);
2435
Alexander Duyck85b430b2009-10-27 15:50:29 +00002436 /* enable receive descriptor fetching */
2437 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2438 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2439 rxdctl &= 0xFFF00000;
2440 rxdctl |= IGB_RX_PTHRESH;
2441 rxdctl |= IGB_RX_HTHRESH << 8;
2442 rxdctl |= IGB_RX_WTHRESH << 16;
2443 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2444}
2445
2446/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002447 * igb_configure_rx - Configure receive Unit after Reset
2448 * @adapter: board private structure
2449 *
2450 * Configure the Rx unit of the MAC after a reset.
2451 **/
2452static void igb_configure_rx(struct igb_adapter *adapter)
2453{
Hannes Eder91075842009-02-18 19:36:04 -08002454 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002455
Alexander Duyck68d480c2009-10-05 06:33:08 +00002456 /* set UTA to appropriate mode */
2457 igb_set_uta(adapter);
2458
Alexander Duyck26ad9172009-10-05 06:32:49 +00002459 /* set the correct pool for the PF default MAC address in entry 0 */
2460 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2461 adapter->vfs_allocated_count);
2462
Alexander Duyck06cf2662009-10-27 15:53:25 +00002463 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2464 * the Base and Length of the Rx Descriptor Ring */
2465 for (i = 0; i < adapter->num_rx_queues; i++)
2466 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002467}
2468
2469/**
2470 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002471 * @tx_ring: Tx descriptor ring for a specific queue
2472 *
2473 * Free all transmit software resources
2474 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002475void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002476{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002477 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002478
2479 vfree(tx_ring->buffer_info);
2480 tx_ring->buffer_info = NULL;
2481
Alexander Duyck439705e2009-10-27 23:49:20 +00002482 /* if not set, then don't free */
2483 if (!tx_ring->desc)
2484 return;
2485
Alexander Duyck80785292009-10-27 15:51:47 +00002486 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2487 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002488
2489 tx_ring->desc = NULL;
2490}
2491
2492/**
2493 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2494 * @adapter: board private structure
2495 *
2496 * Free all transmit software resources
2497 **/
2498static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2499{
2500 int i;
2501
2502 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002503 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002504}
2505
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002506void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2507 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002508{
Alexander Duyck65689fe2009-03-20 00:17:43 +00002509 buffer_info->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002510 if (buffer_info->skb) {
Alexander Duyck80785292009-10-27 15:51:47 +00002511 skb_dma_unmap(&tx_ring->pdev->dev,
2512 buffer_info->skb,
Alexander Duyck65689fe2009-03-20 00:17:43 +00002513 DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002514 dev_kfree_skb_any(buffer_info->skb);
2515 buffer_info->skb = NULL;
2516 }
2517 buffer_info->time_stamp = 0;
2518 /* buffer_info must be completely set up in the transmit path */
2519}
2520
2521/**
2522 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002523 * @tx_ring: ring to be cleaned
2524 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002525static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002526{
2527 struct igb_buffer *buffer_info;
2528 unsigned long size;
2529 unsigned int i;
2530
2531 if (!tx_ring->buffer_info)
2532 return;
2533 /* Free all the Tx ring sk_buffs */
2534
2535 for (i = 0; i < tx_ring->count; i++) {
2536 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002537 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002538 }
2539
2540 size = sizeof(struct igb_buffer) * tx_ring->count;
2541 memset(tx_ring->buffer_info, 0, size);
2542
2543 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002544 memset(tx_ring->desc, 0, tx_ring->size);
2545
2546 tx_ring->next_to_use = 0;
2547 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002548}
2549
2550/**
2551 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2552 * @adapter: board private structure
2553 **/
2554static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2555{
2556 int i;
2557
2558 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002559 igb_clean_tx_ring(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002560}
2561
2562/**
2563 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002564 * @rx_ring: ring to clean the resources from
2565 *
2566 * Free all receive software resources
2567 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002568void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002569{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002570 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002571
2572 vfree(rx_ring->buffer_info);
2573 rx_ring->buffer_info = NULL;
2574
Alexander Duyck439705e2009-10-27 23:49:20 +00002575 /* if not set, then don't free */
2576 if (!rx_ring->desc)
2577 return;
2578
Alexander Duyck80785292009-10-27 15:51:47 +00002579 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2580 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002581
2582 rx_ring->desc = NULL;
2583}
2584
2585/**
2586 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2587 * @adapter: board private structure
2588 *
2589 * Free all receive software resources
2590 **/
2591static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2592{
2593 int i;
2594
2595 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002596 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002597}
2598
2599/**
2600 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002601 * @rx_ring: ring to free buffers from
2602 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002603static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002604{
2605 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002606 unsigned long size;
2607 unsigned int i;
2608
2609 if (!rx_ring->buffer_info)
2610 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002611
Auke Kok9d5c8242008-01-24 02:22:38 -08002612 /* Free all the Rx ring sk_buffs */
2613 for (i = 0; i < rx_ring->count; i++) {
2614 buffer_info = &rx_ring->buffer_info[i];
2615 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002616 pci_unmap_single(rx_ring->pdev,
2617 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002618 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002619 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002620 buffer_info->dma = 0;
2621 }
2622
2623 if (buffer_info->skb) {
2624 dev_kfree_skb(buffer_info->skb);
2625 buffer_info->skb = NULL;
2626 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002627 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002628 pci_unmap_page(rx_ring->pdev,
2629 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002630 PAGE_SIZE / 2,
2631 PCI_DMA_FROMDEVICE);
2632 buffer_info->page_dma = 0;
2633 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002634 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002635 put_page(buffer_info->page);
2636 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002637 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 }
2639 }
2640
Auke Kok9d5c8242008-01-24 02:22:38 -08002641 size = sizeof(struct igb_buffer) * rx_ring->count;
2642 memset(rx_ring->buffer_info, 0, size);
2643
2644 /* Zero out the descriptor ring */
2645 memset(rx_ring->desc, 0, rx_ring->size);
2646
2647 rx_ring->next_to_clean = 0;
2648 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002649}
2650
2651/**
2652 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2653 * @adapter: board private structure
2654 **/
2655static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2656{
2657 int i;
2658
2659 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002660 igb_clean_rx_ring(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002661}
2662
2663/**
2664 * igb_set_mac - Change the Ethernet Address of the NIC
2665 * @netdev: network interface device structure
2666 * @p: pointer to an address structure
2667 *
2668 * Returns 0 on success, negative on failure
2669 **/
2670static int igb_set_mac(struct net_device *netdev, void *p)
2671{
2672 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002673 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002674 struct sockaddr *addr = p;
2675
2676 if (!is_valid_ether_addr(addr->sa_data))
2677 return -EADDRNOTAVAIL;
2678
2679 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002680 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002681
Alexander Duyck26ad9172009-10-05 06:32:49 +00002682 /* set the correct pool for the new PF MAC address in entry 0 */
2683 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2684 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002685
Auke Kok9d5c8242008-01-24 02:22:38 -08002686 return 0;
2687}
2688
2689/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002690 * igb_write_mc_addr_list - write multicast addresses to MTA
2691 * @netdev: network interface device structure
2692 *
2693 * Writes multicast address list to the MTA hash table.
2694 * Returns: -ENOMEM on failure
2695 * 0 on no addresses written
2696 * X on writing X addresses to MTA
2697 **/
2698static int igb_write_mc_addr_list(struct net_device *netdev)
2699{
2700 struct igb_adapter *adapter = netdev_priv(netdev);
2701 struct e1000_hw *hw = &adapter->hw;
2702 struct dev_mc_list *mc_ptr = netdev->mc_list;
2703 u8 *mta_list;
2704 u32 vmolr = 0;
2705 int i;
2706
2707 if (!netdev->mc_count) {
2708 /* nothing to program, so clear mc list */
2709 igb_update_mc_addr_list(hw, NULL, 0);
2710 igb_restore_vf_multicasts(adapter);
2711 return 0;
2712 }
2713
2714 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2715 if (!mta_list)
2716 return -ENOMEM;
2717
2718 /* set vmolr receive overflow multicast bit */
2719 vmolr |= E1000_VMOLR_ROMPE;
2720
2721 /* The shared function expects a packed array of only addresses. */
2722 mc_ptr = netdev->mc_list;
2723
2724 for (i = 0; i < netdev->mc_count; i++) {
2725 if (!mc_ptr)
2726 break;
2727 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2728 mc_ptr = mc_ptr->next;
2729 }
2730 igb_update_mc_addr_list(hw, mta_list, i);
2731 kfree(mta_list);
2732
2733 return netdev->mc_count;
2734}
2735
2736/**
2737 * igb_write_uc_addr_list - write unicast addresses to RAR table
2738 * @netdev: network interface device structure
2739 *
2740 * Writes unicast address list to the RAR table.
2741 * Returns: -ENOMEM on failure/insufficient address space
2742 * 0 on no addresses written
2743 * X on writing X addresses to the RAR table
2744 **/
2745static int igb_write_uc_addr_list(struct net_device *netdev)
2746{
2747 struct igb_adapter *adapter = netdev_priv(netdev);
2748 struct e1000_hw *hw = &adapter->hw;
2749 unsigned int vfn = adapter->vfs_allocated_count;
2750 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2751 int count = 0;
2752
2753 /* return ENOMEM indicating insufficient memory for addresses */
2754 if (netdev->uc.count > rar_entries)
2755 return -ENOMEM;
2756
2757 if (netdev->uc.count && rar_entries) {
2758 struct netdev_hw_addr *ha;
2759 list_for_each_entry(ha, &netdev->uc.list, list) {
2760 if (!rar_entries)
2761 break;
2762 igb_rar_set_qsel(adapter, ha->addr,
2763 rar_entries--,
2764 vfn);
2765 count++;
2766 }
2767 }
2768 /* write the addresses in reverse order to avoid write combining */
2769 for (; rar_entries > 0 ; rar_entries--) {
2770 wr32(E1000_RAH(rar_entries), 0);
2771 wr32(E1000_RAL(rar_entries), 0);
2772 }
2773 wrfl();
2774
2775 return count;
2776}
2777
2778/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002779 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002780 * @netdev: network interface device structure
2781 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002782 * The set_rx_mode entry point is called whenever the unicast or multicast
2783 * address lists or the network interface flags are updated. This routine is
2784 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002785 * promiscuous mode, and all-multi behavior.
2786 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002787static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002788{
2789 struct igb_adapter *adapter = netdev_priv(netdev);
2790 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002791 unsigned int vfn = adapter->vfs_allocated_count;
2792 u32 rctl, vmolr = 0;
2793 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002794
2795 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002796 rctl = rd32(E1000_RCTL);
2797
Alexander Duyck68d480c2009-10-05 06:33:08 +00002798 /* clear the effected bits */
2799 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2800
Patrick McHardy746b9f02008-07-16 20:15:45 -07002801 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002802 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002803 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002804 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002805 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002806 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002807 vmolr |= E1000_VMOLR_MPME;
2808 } else {
2809 /*
2810 * Write addresses to the MTA, if the attempt fails
2811 * then we should just turn on promiscous mode so
2812 * that we can at least receive multicast traffic
2813 */
2814 count = igb_write_mc_addr_list(netdev);
2815 if (count < 0) {
2816 rctl |= E1000_RCTL_MPE;
2817 vmolr |= E1000_VMOLR_MPME;
2818 } else if (count) {
2819 vmolr |= E1000_VMOLR_ROMPE;
2820 }
2821 }
2822 /*
2823 * Write addresses to available RAR registers, if there is not
2824 * sufficient space to store all the addresses then enable
2825 * unicast promiscous mode
2826 */
2827 count = igb_write_uc_addr_list(netdev);
2828 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002829 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002830 vmolr |= E1000_VMOLR_ROPE;
2831 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07002832 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07002833 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002834 wr32(E1000_RCTL, rctl);
2835
Alexander Duyck68d480c2009-10-05 06:33:08 +00002836 /*
2837 * In order to support SR-IOV and eventually VMDq it is necessary to set
2838 * the VMOLR to enable the appropriate modes. Without this workaround
2839 * we will have issues with VLAN tag stripping not being done for frames
2840 * that are only arriving because we are the default pool
2841 */
2842 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002843 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002844
Alexander Duyck68d480c2009-10-05 06:33:08 +00002845 vmolr |= rd32(E1000_VMOLR(vfn)) &
2846 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2847 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002848 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002849}
2850
2851/* Need to wait a few seconds after link up to get diagnostic information from
2852 * the phy */
2853static void igb_update_phy_info(unsigned long data)
2854{
2855 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002856 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002857}
2858
2859/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002860 * igb_has_link - check shared code for link and determine up/down
2861 * @adapter: pointer to driver private info
2862 **/
2863static bool igb_has_link(struct igb_adapter *adapter)
2864{
2865 struct e1000_hw *hw = &adapter->hw;
2866 bool link_active = false;
2867 s32 ret_val = 0;
2868
2869 /* get_link_status is set on LSC (link status) interrupt or
2870 * rx sequence error interrupt. get_link_status will stay
2871 * false until the e1000_check_for_link establishes link
2872 * for copper adapters ONLY
2873 */
2874 switch (hw->phy.media_type) {
2875 case e1000_media_type_copper:
2876 if (hw->mac.get_link_status) {
2877 ret_val = hw->mac.ops.check_for_link(hw);
2878 link_active = !hw->mac.get_link_status;
2879 } else {
2880 link_active = true;
2881 }
2882 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002883 case e1000_media_type_internal_serdes:
2884 ret_val = hw->mac.ops.check_for_link(hw);
2885 link_active = hw->mac.serdes_has_link;
2886 break;
2887 default:
2888 case e1000_media_type_unknown:
2889 break;
2890 }
2891
2892 return link_active;
2893}
2894
2895/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002896 * igb_watchdog - Timer Call-back
2897 * @data: pointer to adapter cast into an unsigned long
2898 **/
2899static void igb_watchdog(unsigned long data)
2900{
2901 struct igb_adapter *adapter = (struct igb_adapter *)data;
2902 /* Do the rest outside of interrupt context */
2903 schedule_work(&adapter->watchdog_task);
2904}
2905
2906static void igb_watchdog_task(struct work_struct *work)
2907{
2908 struct igb_adapter *adapter = container_of(work,
2909 struct igb_adapter, watchdog_task);
2910 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002911 struct net_device *netdev = adapter->netdev;
2912 struct igb_ring *tx_ring = adapter->tx_ring;
Auke Kok9d5c8242008-01-24 02:22:38 -08002913 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07002914 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002915
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002916 link = igb_has_link(adapter);
2917 if ((netif_carrier_ok(netdev)) && link)
Auke Kok9d5c8242008-01-24 02:22:38 -08002918 goto link_up;
2919
Auke Kok9d5c8242008-01-24 02:22:38 -08002920 if (link) {
2921 if (!netif_carrier_ok(netdev)) {
2922 u32 ctrl;
2923 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2924 &adapter->link_speed,
2925 &adapter->link_duplex);
2926
2927 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08002928 /* Links status message must follow this format */
2929 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08002930 "Flow Control: %s\n",
Alexander Duyck527d47c2008-11-27 00:21:39 -08002931 netdev->name,
Auke Kok9d5c8242008-01-24 02:22:38 -08002932 adapter->link_speed,
2933 adapter->link_duplex == FULL_DUPLEX ?
2934 "Full Duplex" : "Half Duplex",
2935 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2936 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2937 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2938 E1000_CTRL_TFCE) ? "TX" : "None")));
2939
2940 /* tweak tx_queue_len according to speed/duplex and
2941 * adjust the timeout factor */
2942 netdev->tx_queue_len = adapter->tx_queue_len;
2943 adapter->tx_timeout_factor = 1;
2944 switch (adapter->link_speed) {
2945 case SPEED_10:
2946 netdev->tx_queue_len = 10;
2947 adapter->tx_timeout_factor = 14;
2948 break;
2949 case SPEED_100:
2950 netdev->tx_queue_len = 100;
2951 /* maybe add some timeout factor ? */
2952 break;
2953 }
2954
2955 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002956
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002957 igb_ping_all_vfs(adapter);
2958
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002959 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08002960 if (!test_bit(__IGB_DOWN, &adapter->state))
2961 mod_timer(&adapter->phy_info_timer,
2962 round_jiffies(jiffies + 2 * HZ));
2963 }
2964 } else {
2965 if (netif_carrier_ok(netdev)) {
2966 adapter->link_speed = 0;
2967 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08002968 /* Links status message must follow this format */
2969 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2970 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002971 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002972
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002973 igb_ping_all_vfs(adapter);
2974
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002975 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08002976 if (!test_bit(__IGB_DOWN, &adapter->state))
2977 mod_timer(&adapter->phy_info_timer,
2978 round_jiffies(jiffies + 2 * HZ));
2979 }
2980 }
2981
2982link_up:
2983 igb_update_stats(adapter);
2984
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002985 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08002986 adapter->tpt_old = adapter->stats.tpt;
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002987 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08002988 adapter->colc_old = adapter->stats.colc;
2989
2990 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2991 adapter->gorc_old = adapter->stats.gorc;
2992 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2993 adapter->gotc_old = adapter->stats.gotc;
2994
2995 igb_update_adaptive(&adapter->hw);
2996
2997 if (!netif_carrier_ok(netdev)) {
Alexander Duyckc493ea42009-03-20 00:16:50 +00002998 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002999 /* We've lost link, so the controller stops DMA,
3000 * but we've got queued Tx work that's never going
3001 * to get done, so reset controller to flush Tx.
3002 * (Do the reset outside of interrupt context). */
3003 adapter->tx_timeout_count++;
3004 schedule_work(&adapter->reset_task);
Jesse Brandeburgc2d5ab42009-05-07 11:07:35 +00003005 /* return immediately since reset is imminent */
3006 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003007 }
3008 }
3009
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003010 /* Force detection of hung controller every watchdog period */
3011 for (i = 0; i < adapter->num_tx_queues; i++)
3012 adapter->tx_ring[i].detect_tx_hung = true;
3013
Auke Kok9d5c8242008-01-24 02:22:38 -08003014 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003015 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003016 u32 eics = 0;
3017 for (i = 0; i < adapter->num_q_vectors; i++) {
3018 struct igb_q_vector *q_vector = adapter->q_vector[i];
3019 eics |= q_vector->eims_value;
3020 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003021 wr32(E1000_EICS, eics);
3022 } else {
3023 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3024 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003025
Auke Kok9d5c8242008-01-24 02:22:38 -08003026 /* Reset the timer */
3027 if (!test_bit(__IGB_DOWN, &adapter->state))
3028 mod_timer(&adapter->watchdog_timer,
3029 round_jiffies(jiffies + 2 * HZ));
3030}
3031
3032enum latency_range {
3033 lowest_latency = 0,
3034 low_latency = 1,
3035 bulk_latency = 2,
3036 latency_invalid = 255
3037};
3038
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003039/**
3040 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3041 *
3042 * Stores a new ITR value based on strictly on packet size. This
3043 * algorithm is less sophisticated than that used in igb_update_itr,
3044 * due to the difficulty of synchronizing statistics across multiple
3045 * receive rings. The divisors and thresholds used by this fuction
3046 * were determined based on theoretical maximum wire speed and testing
3047 * data, in order to minimize response time while increasing bulk
3048 * throughput.
3049 * This functionality is controlled by the InterruptThrottleRate module
3050 * parameter (see igb_param.c)
3051 * NOTE: This function is called only when operating in a multiqueue
3052 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003053 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003054 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003055static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003056{
Alexander Duyck047e0032009-10-27 15:49:27 +00003057 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003058 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003059 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003060
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003061 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3062 * ints/sec - ITR timer value of 120 ticks.
3063 */
3064 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003065 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003066 goto set_itr_val;
3067 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003068
3069 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3070 struct igb_ring *ring = q_vector->rx_ring;
3071 avg_wire_size = ring->total_bytes / ring->total_packets;
3072 }
3073
3074 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3075 struct igb_ring *ring = q_vector->tx_ring;
3076 avg_wire_size = max_t(u32, avg_wire_size,
3077 (ring->total_bytes /
3078 ring->total_packets));
3079 }
3080
3081 /* if avg_wire_size isn't set no work was done */
3082 if (!avg_wire_size)
3083 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003084
3085 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3086 avg_wire_size += 24;
3087
3088 /* Don't starve jumbo frames */
3089 avg_wire_size = min(avg_wire_size, 3000);
3090
3091 /* Give a little boost to mid-size frames */
3092 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3093 new_val = avg_wire_size / 3;
3094 else
3095 new_val = avg_wire_size / 2;
3096
3097set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003098 if (new_val != q_vector->itr_val) {
3099 q_vector->itr_val = new_val;
3100 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003101 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003102clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003103 if (q_vector->rx_ring) {
3104 q_vector->rx_ring->total_bytes = 0;
3105 q_vector->rx_ring->total_packets = 0;
3106 }
3107 if (q_vector->tx_ring) {
3108 q_vector->tx_ring->total_bytes = 0;
3109 q_vector->tx_ring->total_packets = 0;
3110 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003111}
3112
3113/**
3114 * igb_update_itr - update the dynamic ITR value based on statistics
3115 * Stores a new ITR value based on packets and byte
3116 * counts during the last interrupt. The advantage of per interrupt
3117 * computation is faster updates and more accurate ITR for the current
3118 * traffic pattern. Constants in this function were computed
3119 * based on theoretical maximum wire speed and thresholds were set based
3120 * on testing data as well as attempting to minimize response time
3121 * while increasing bulk throughput.
3122 * this functionality is controlled by the InterruptThrottleRate module
3123 * parameter (see igb_param.c)
3124 * NOTE: These calculations are only valid when operating in a single-
3125 * queue environment.
3126 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003127 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003128 * @packets: the number of packets during this measurement interval
3129 * @bytes: the number of bytes during this measurement interval
3130 **/
3131static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3132 int packets, int bytes)
3133{
3134 unsigned int retval = itr_setting;
3135
3136 if (packets == 0)
3137 goto update_itr_done;
3138
3139 switch (itr_setting) {
3140 case lowest_latency:
3141 /* handle TSO and jumbo frames */
3142 if (bytes/packets > 8000)
3143 retval = bulk_latency;
3144 else if ((packets < 5) && (bytes > 512))
3145 retval = low_latency;
3146 break;
3147 case low_latency: /* 50 usec aka 20000 ints/s */
3148 if (bytes > 10000) {
3149 /* this if handles the TSO accounting */
3150 if (bytes/packets > 8000) {
3151 retval = bulk_latency;
3152 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3153 retval = bulk_latency;
3154 } else if ((packets > 35)) {
3155 retval = lowest_latency;
3156 }
3157 } else if (bytes/packets > 2000) {
3158 retval = bulk_latency;
3159 } else if (packets <= 2 && bytes < 512) {
3160 retval = lowest_latency;
3161 }
3162 break;
3163 case bulk_latency: /* 250 usec aka 4000 ints/s */
3164 if (bytes > 25000) {
3165 if (packets > 35)
3166 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003167 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003168 retval = low_latency;
3169 }
3170 break;
3171 }
3172
3173update_itr_done:
3174 return retval;
3175}
3176
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003177static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003178{
Alexander Duyck047e0032009-10-27 15:49:27 +00003179 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003180 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003181 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003182
3183 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3184 if (adapter->link_speed != SPEED_1000) {
3185 current_itr = 0;
3186 new_itr = 4000;
3187 goto set_itr_now;
3188 }
3189
3190 adapter->rx_itr = igb_update_itr(adapter,
3191 adapter->rx_itr,
3192 adapter->rx_ring->total_packets,
3193 adapter->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003194
Alexander Duyck047e0032009-10-27 15:49:27 +00003195 adapter->tx_itr = igb_update_itr(adapter,
3196 adapter->tx_itr,
3197 adapter->tx_ring->total_packets,
3198 adapter->tx_ring->total_bytes);
3199 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003200
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003201 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003202 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003203 current_itr = low_latency;
3204
Auke Kok9d5c8242008-01-24 02:22:38 -08003205 switch (current_itr) {
3206 /* counts and packets in update_itr are dependent on these numbers */
3207 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003208 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003209 break;
3210 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003211 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003212 break;
3213 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003214 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003215 break;
3216 default:
3217 break;
3218 }
3219
3220set_itr_now:
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003221 adapter->rx_ring->total_bytes = 0;
3222 adapter->rx_ring->total_packets = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003223 adapter->tx_ring->total_bytes = 0;
3224 adapter->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003225
Alexander Duyck047e0032009-10-27 15:49:27 +00003226 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003227 /* this attempts to bias the interrupt rate towards Bulk
3228 * by adding intermediate steps when interrupt rate is
3229 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003230 new_itr = new_itr > q_vector->itr_val ?
3231 max((new_itr * q_vector->itr_val) /
3232 (new_itr + (q_vector->itr_val >> 2)),
3233 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003234 new_itr;
3235 /* Don't write the value here; it resets the adapter's
3236 * internal timer, and causes us to delay far longer than
3237 * we should between interrupts. Instead, we write the ITR
3238 * value at the beginning of the next interrupt so the timing
3239 * ends up being correct.
3240 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003241 q_vector->itr_val = new_itr;
3242 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 }
3244
3245 return;
3246}
3247
Auke Kok9d5c8242008-01-24 02:22:38 -08003248#define IGB_TX_FLAGS_CSUM 0x00000001
3249#define IGB_TX_FLAGS_VLAN 0x00000002
3250#define IGB_TX_FLAGS_TSO 0x00000004
3251#define IGB_TX_FLAGS_IPV4 0x00000008
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003252#define IGB_TX_FLAGS_TSTAMP 0x00000010
Auke Kok9d5c8242008-01-24 02:22:38 -08003253#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3254#define IGB_TX_FLAGS_VLAN_SHIFT 16
3255
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003256static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003257 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3258{
3259 struct e1000_adv_tx_context_desc *context_desc;
3260 unsigned int i;
3261 int err;
3262 struct igb_buffer *buffer_info;
3263 u32 info = 0, tu_cmd = 0;
3264 u32 mss_l4len_idx, l4len;
3265 *hdr_len = 0;
3266
3267 if (skb_header_cloned(skb)) {
3268 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3269 if (err)
3270 return err;
3271 }
3272
3273 l4len = tcp_hdrlen(skb);
3274 *hdr_len += l4len;
3275
3276 if (skb->protocol == htons(ETH_P_IP)) {
3277 struct iphdr *iph = ip_hdr(skb);
3278 iph->tot_len = 0;
3279 iph->check = 0;
3280 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3281 iph->daddr, 0,
3282 IPPROTO_TCP,
3283 0);
3284 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3285 ipv6_hdr(skb)->payload_len = 0;
3286 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3287 &ipv6_hdr(skb)->daddr,
3288 0, IPPROTO_TCP, 0);
3289 }
3290
3291 i = tx_ring->next_to_use;
3292
3293 buffer_info = &tx_ring->buffer_info[i];
3294 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3295 /* VLAN MACLEN IPLEN */
3296 if (tx_flags & IGB_TX_FLAGS_VLAN)
3297 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3298 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3299 *hdr_len += skb_network_offset(skb);
3300 info |= skb_network_header_len(skb);
3301 *hdr_len += skb_network_header_len(skb);
3302 context_desc->vlan_macip_lens = cpu_to_le32(info);
3303
3304 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3305 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3306
3307 if (skb->protocol == htons(ETH_P_IP))
3308 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3309 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3310
3311 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3312
3313 /* MSS L4LEN IDX */
3314 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3315 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3316
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003317 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003318 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3319 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003320
3321 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3322 context_desc->seqnum_seed = 0;
3323
3324 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003325 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003326 buffer_info->dma = 0;
3327 i++;
3328 if (i == tx_ring->count)
3329 i = 0;
3330
3331 tx_ring->next_to_use = i;
3332
3333 return true;
3334}
3335
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003336static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3337 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003338{
3339 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003340 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003341 struct igb_buffer *buffer_info;
3342 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003343 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003344
3345 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3346 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3347 i = tx_ring->next_to_use;
3348 buffer_info = &tx_ring->buffer_info[i];
3349 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3350
3351 if (tx_flags & IGB_TX_FLAGS_VLAN)
3352 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3353 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3354 if (skb->ip_summed == CHECKSUM_PARTIAL)
3355 info |= skb_network_header_len(skb);
3356
3357 context_desc->vlan_macip_lens = cpu_to_le32(info);
3358
3359 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3360
3361 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003362 __be16 protocol;
3363
3364 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3365 const struct vlan_ethhdr *vhdr =
3366 (const struct vlan_ethhdr*)skb->data;
3367
3368 protocol = vhdr->h_vlan_encapsulated_proto;
3369 } else {
3370 protocol = skb->protocol;
3371 }
3372
3373 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003374 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003375 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003376 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3377 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003378 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3379 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003380 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003381 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003382 /* XXX what about other V6 headers?? */
3383 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3384 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003385 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3386 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003387 break;
3388 default:
3389 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003390 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003391 "partial checksum but proto=%x!\n",
3392 skb->protocol);
3393 break;
3394 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003395 }
3396
3397 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3398 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003399 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003400 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003401 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003402
3403 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003404 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003405 buffer_info->dma = 0;
3406
3407 i++;
3408 if (i == tx_ring->count)
3409 i = 0;
3410 tx_ring->next_to_use = i;
3411
3412 return true;
3413 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003414 return false;
3415}
3416
3417#define IGB_MAX_TXD_PWR 16
3418#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3419
Alexander Duyck80785292009-10-27 15:51:47 +00003420static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003421 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003422{
3423 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003424 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003425 unsigned int len = skb_headlen(skb);
3426 unsigned int count = 0, i;
3427 unsigned int f;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003428 dma_addr_t *map;
Auke Kok9d5c8242008-01-24 02:22:38 -08003429
3430 i = tx_ring->next_to_use;
3431
Alexander Duyck80785292009-10-27 15:51:47 +00003432 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3433 dev_err(&pdev->dev, "TX DMA map failed\n");
Alexander Duyck65689fe2009-03-20 00:17:43 +00003434 return 0;
3435 }
3436
3437 map = skb_shinfo(skb)->dma_maps;
3438
Auke Kok9d5c8242008-01-24 02:22:38 -08003439 buffer_info = &tx_ring->buffer_info[i];
3440 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3441 buffer_info->length = len;
3442 /* set time_stamp *before* dma to help avoid a possible race */
3443 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003444 buffer_info->next_to_watch = i;
Eric Dumazet042a53a2009-06-05 04:04:16 +00003445 buffer_info->dma = skb_shinfo(skb)->dma_head;
Auke Kok9d5c8242008-01-24 02:22:38 -08003446
3447 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3448 struct skb_frag_struct *frag;
3449
Alexander Duyck65689fe2009-03-20 00:17:43 +00003450 i++;
3451 if (i == tx_ring->count)
3452 i = 0;
3453
Auke Kok9d5c8242008-01-24 02:22:38 -08003454 frag = &skb_shinfo(skb)->frags[f];
3455 len = frag->size;
3456
3457 buffer_info = &tx_ring->buffer_info[i];
3458 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3459 buffer_info->length = len;
3460 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003461 buffer_info->next_to_watch = i;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003462 buffer_info->dma = map[count];
Auke Kok9d5c8242008-01-24 02:22:38 -08003463 count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003464 }
3465
Auke Kok9d5c8242008-01-24 02:22:38 -08003466 tx_ring->buffer_info[i].skb = skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003467 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003468
Eric Dumazet042a53a2009-06-05 04:04:16 +00003469 return count + 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003470}
3471
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003472static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003473 int tx_flags, int count, u32 paylen,
3474 u8 hdr_len)
3475{
3476 union e1000_adv_tx_desc *tx_desc = NULL;
3477 struct igb_buffer *buffer_info;
3478 u32 olinfo_status = 0, cmd_type_len;
3479 unsigned int i;
3480
3481 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3482 E1000_ADVTXD_DCMD_DEXT);
3483
3484 if (tx_flags & IGB_TX_FLAGS_VLAN)
3485 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3486
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003487 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3488 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3489
Auke Kok9d5c8242008-01-24 02:22:38 -08003490 if (tx_flags & IGB_TX_FLAGS_TSO) {
3491 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3492
3493 /* insert tcp checksum */
3494 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3495
3496 /* insert ip checksum */
3497 if (tx_flags & IGB_TX_FLAGS_IPV4)
3498 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3499
3500 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3501 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3502 }
3503
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003504 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3505 (tx_flags & (IGB_TX_FLAGS_CSUM |
3506 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003507 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003508 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003509
3510 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3511
3512 i = tx_ring->next_to_use;
3513 while (count--) {
3514 buffer_info = &tx_ring->buffer_info[i];
3515 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3516 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3517 tx_desc->read.cmd_type_len =
3518 cpu_to_le32(cmd_type_len | buffer_info->length);
3519 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3520 i++;
3521 if (i == tx_ring->count)
3522 i = 0;
3523 }
3524
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003525 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003526 /* Force memory writes to complete before letting h/w
3527 * know there are new descriptors to fetch. (Only
3528 * applicable for weak-ordered memory model archs,
3529 * such as IA-64). */
3530 wmb();
3531
3532 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003533 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003534 /* we need this if more than one processor can write to our tail
3535 * at a time, it syncronizes IO on IA64/Altix systems */
3536 mmiowb();
3537}
3538
Alexander Duycke694e962009-10-27 15:53:06 +00003539static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003540{
Alexander Duycke694e962009-10-27 15:53:06 +00003541 struct net_device *netdev = tx_ring->netdev;
3542
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003543 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003544
Auke Kok9d5c8242008-01-24 02:22:38 -08003545 /* Herbert's original patch had:
3546 * smp_mb__after_netif_stop_queue();
3547 * but since that doesn't exist yet, just open code it. */
3548 smp_mb();
3549
3550 /* We need to check again in a case another CPU has just
3551 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003552 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003553 return -EBUSY;
3554
3555 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003556 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003557 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003558 return 0;
3559}
3560
Alexander Duycke694e962009-10-27 15:53:06 +00003561static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003562{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003563 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003564 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003565 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003566}
3567
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003568netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3569 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003570{
Alexander Duycke694e962009-10-27 15:53:06 +00003571 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003572 unsigned int first;
Auke Kok9d5c8242008-01-24 02:22:38 -08003573 unsigned int tx_flags = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003574 u8 hdr_len = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003575 int count = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003576 int tso = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003577 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003578
Auke Kok9d5c8242008-01-24 02:22:38 -08003579 /* need: 1 descriptor per page,
3580 * + 2 desc gap to keep tail from touching head,
3581 * + 1 desc for skb->data,
3582 * + 1 desc for context descriptor,
3583 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003584 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003585 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003586 return NETDEV_TX_BUSY;
3587 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003588
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003589 if (unlikely(shtx->hardware)) {
3590 shtx->in_progress = 1;
3591 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003592 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003593
3594 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3595 tx_flags |= IGB_TX_FLAGS_VLAN;
3596 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3597 }
3598
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003599 if (skb->protocol == htons(ETH_P_IP))
3600 tx_flags |= IGB_TX_FLAGS_IPV4;
3601
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003602 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003603 if (skb_is_gso(skb)) {
3604 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3605 if (tso < 0) {
3606 dev_kfree_skb_any(skb);
3607 return NETDEV_TX_OK;
3608 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003609 }
3610
3611 if (tso)
3612 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003613 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003614 (skb->ip_summed == CHECKSUM_PARTIAL))
3615 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003616
Alexander Duyck65689fe2009-03-20 00:17:43 +00003617 /*
3618 * count reflects descriptors mapped, if 0 then mapping error
3619 * has occured and we need to rewind the descriptor queue
3620 */
Alexander Duyck80785292009-10-27 15:51:47 +00003621 count = igb_tx_map_adv(tx_ring, skb, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08003622
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003623 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003624 dev_kfree_skb_any(skb);
3625 tx_ring->buffer_info[first].time_stamp = 0;
3626 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003627 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003628 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003629
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003630 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3631
3632 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003633 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003634
Auke Kok9d5c8242008-01-24 02:22:38 -08003635 return NETDEV_TX_OK;
3636}
3637
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003638static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3639 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003640{
3641 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003642 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003643 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003644
3645 if (test_bit(__IGB_DOWN, &adapter->state)) {
3646 dev_kfree_skb_any(skb);
3647 return NETDEV_TX_OK;
3648 }
3649
3650 if (skb->len <= 0) {
3651 dev_kfree_skb_any(skb);
3652 return NETDEV_TX_OK;
3653 }
3654
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003655 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003656 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003657
3658 /* This goes back to the question of how to logically map a tx queue
3659 * to a flow. Right now, performance is impacted slightly negatively
3660 * if using multiple tx queues. If the stack breaks away from a
3661 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003662 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003663}
3664
3665/**
3666 * igb_tx_timeout - Respond to a Tx Hang
3667 * @netdev: network interface device structure
3668 **/
3669static void igb_tx_timeout(struct net_device *netdev)
3670{
3671 struct igb_adapter *adapter = netdev_priv(netdev);
3672 struct e1000_hw *hw = &adapter->hw;
3673
3674 /* Do the reset outside of interrupt context */
3675 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003676
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003678 wr32(E1000_EICS,
3679 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003680}
3681
3682static void igb_reset_task(struct work_struct *work)
3683{
3684 struct igb_adapter *adapter;
3685 adapter = container_of(work, struct igb_adapter, reset_task);
3686
3687 igb_reinit_locked(adapter);
3688}
3689
3690/**
3691 * igb_get_stats - Get System Network Statistics
3692 * @netdev: network interface device structure
3693 *
3694 * Returns the address of the device statistics structure.
3695 * The statistics are actually updated from the timer callback.
3696 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003697static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003698{
Auke Kok9d5c8242008-01-24 02:22:38 -08003699 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003700 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003701}
3702
3703/**
3704 * igb_change_mtu - Change the Maximum Transfer Unit
3705 * @netdev: network interface device structure
3706 * @new_mtu: new value for maximum frame size
3707 *
3708 * Returns 0 on success, negative on failure
3709 **/
3710static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3711{
3712 struct igb_adapter *adapter = netdev_priv(netdev);
3713 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003714 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003715
3716 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3717 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3718 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3719 return -EINVAL;
3720 }
3721
Auke Kok9d5c8242008-01-24 02:22:38 -08003722 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3723 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3724 return -EINVAL;
3725 }
3726
3727 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3728 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003729
Auke Kok9d5c8242008-01-24 02:22:38 -08003730 /* igb_down has a dependency on max_frame_size */
3731 adapter->max_frame_size = max_frame;
Auke Kok9d5c8242008-01-24 02:22:38 -08003732 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3733 * means we reserve 2 more, this pushes us to allocate from the next
3734 * larger slab size.
3735 * i.e. RXBUFFER_2048 --> size-4096 slab
3736 */
3737
Alexander Duyck7d95b712009-10-27 15:50:08 +00003738 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003739 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003740 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003741 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003742 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003743 rx_buffer_len = IGB_RXBUFFER_128;
3744
3745 if (netif_running(netdev))
3746 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003747
3748 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3749 netdev->mtu, new_mtu);
3750 netdev->mtu = new_mtu;
3751
Alexander Duyck4c844852009-10-27 15:52:07 +00003752 for (i = 0; i < adapter->num_rx_queues; i++)
3753 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3754
Auke Kok9d5c8242008-01-24 02:22:38 -08003755 if (netif_running(netdev))
3756 igb_up(adapter);
3757 else
3758 igb_reset(adapter);
3759
3760 clear_bit(__IGB_RESETTING, &adapter->state);
3761
3762 return 0;
3763}
3764
3765/**
3766 * igb_update_stats - Update the board statistics counters
3767 * @adapter: board private structure
3768 **/
3769
3770void igb_update_stats(struct igb_adapter *adapter)
3771{
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003772 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 struct e1000_hw *hw = &adapter->hw;
3774 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003775 u32 rnbc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003777 int i;
3778 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003779
3780#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3781
3782 /*
3783 * Prevent stats update while adapter is being reset, or if the pci
3784 * connection is down.
3785 */
3786 if (adapter->link_speed == 0)
3787 return;
3788 if (pci_channel_offline(pdev))
3789 return;
3790
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003791 bytes = 0;
3792 packets = 0;
3793 for (i = 0; i < adapter->num_rx_queues; i++) {
3794 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3795 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3796 netdev->stats.rx_fifo_errors += rqdpc_tmp;
3797 bytes += adapter->rx_ring[i].rx_stats.bytes;
3798 packets += adapter->rx_ring[i].rx_stats.packets;
3799 }
3800
3801 netdev->stats.rx_bytes = bytes;
3802 netdev->stats.rx_packets = packets;
3803
3804 bytes = 0;
3805 packets = 0;
3806 for (i = 0; i < adapter->num_tx_queues; i++) {
3807 bytes += adapter->tx_ring[i].tx_stats.bytes;
3808 packets += adapter->tx_ring[i].tx_stats.packets;
3809 }
3810 netdev->stats.tx_bytes = bytes;
3811 netdev->stats.tx_packets = packets;
3812
3813 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08003814 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3815 adapter->stats.gprc += rd32(E1000_GPRC);
3816 adapter->stats.gorc += rd32(E1000_GORCL);
3817 rd32(E1000_GORCH); /* clear GORCL */
3818 adapter->stats.bprc += rd32(E1000_BPRC);
3819 adapter->stats.mprc += rd32(E1000_MPRC);
3820 adapter->stats.roc += rd32(E1000_ROC);
3821
3822 adapter->stats.prc64 += rd32(E1000_PRC64);
3823 adapter->stats.prc127 += rd32(E1000_PRC127);
3824 adapter->stats.prc255 += rd32(E1000_PRC255);
3825 adapter->stats.prc511 += rd32(E1000_PRC511);
3826 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3827 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3828 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3829 adapter->stats.sec += rd32(E1000_SEC);
3830
3831 adapter->stats.mpc += rd32(E1000_MPC);
3832 adapter->stats.scc += rd32(E1000_SCC);
3833 adapter->stats.ecol += rd32(E1000_ECOL);
3834 adapter->stats.mcc += rd32(E1000_MCC);
3835 adapter->stats.latecol += rd32(E1000_LATECOL);
3836 adapter->stats.dc += rd32(E1000_DC);
3837 adapter->stats.rlec += rd32(E1000_RLEC);
3838 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3839 adapter->stats.xontxc += rd32(E1000_XONTXC);
3840 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3841 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3842 adapter->stats.fcruc += rd32(E1000_FCRUC);
3843 adapter->stats.gptc += rd32(E1000_GPTC);
3844 adapter->stats.gotc += rd32(E1000_GOTCL);
3845 rd32(E1000_GOTCH); /* clear GOTCL */
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003846 rnbc = rd32(E1000_RNBC);
3847 adapter->stats.rnbc += rnbc;
3848 netdev->stats.rx_fifo_errors += rnbc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003849 adapter->stats.ruc += rd32(E1000_RUC);
3850 adapter->stats.rfc += rd32(E1000_RFC);
3851 adapter->stats.rjc += rd32(E1000_RJC);
3852 adapter->stats.tor += rd32(E1000_TORH);
3853 adapter->stats.tot += rd32(E1000_TOTH);
3854 adapter->stats.tpr += rd32(E1000_TPR);
3855
3856 adapter->stats.ptc64 += rd32(E1000_PTC64);
3857 adapter->stats.ptc127 += rd32(E1000_PTC127);
3858 adapter->stats.ptc255 += rd32(E1000_PTC255);
3859 adapter->stats.ptc511 += rd32(E1000_PTC511);
3860 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3861 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3862
3863 adapter->stats.mptc += rd32(E1000_MPTC);
3864 adapter->stats.bptc += rd32(E1000_BPTC);
3865
3866 /* used for adaptive IFS */
3867
3868 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3869 adapter->stats.tpt += hw->mac.tx_packet_delta;
3870 hw->mac.collision_delta = rd32(E1000_COLC);
3871 adapter->stats.colc += hw->mac.collision_delta;
3872
3873 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3874 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3875 adapter->stats.tncrs += rd32(E1000_TNCRS);
3876 adapter->stats.tsctc += rd32(E1000_TSCTC);
3877 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3878
3879 adapter->stats.iac += rd32(E1000_IAC);
3880 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3881 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3882 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3883 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3884 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3885 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3886 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3887 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3888
3889 /* Fill out the OS statistics structure */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003890 netdev->stats.multicast = adapter->stats.mprc;
3891 netdev->stats.collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003892
3893 /* Rx Errors */
3894
3895 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003896 * our own version based on RUC and ROC */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003897 netdev->stats.rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003898 adapter->stats.crcerrs + adapter->stats.algnerrc +
3899 adapter->stats.ruc + adapter->stats.roc +
3900 adapter->stats.cexterr;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003901 netdev->stats.rx_length_errors = adapter->stats.ruc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003902 adapter->stats.roc;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003903 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3904 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3905 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003906
3907 /* Tx Errors */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003908 netdev->stats.tx_errors = adapter->stats.ecol +
Auke Kok9d5c8242008-01-24 02:22:38 -08003909 adapter->stats.latecol;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003910 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3911 netdev->stats.tx_window_errors = adapter->stats.latecol;
3912 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08003913
3914 /* Tx Dropped needs to be maintained elsewhere */
3915
3916 /* Phy Stats */
3917 if (hw->phy.media_type == e1000_media_type_copper) {
3918 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003919 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3921 adapter->phy_stats.idle_errors += phy_tmp;
3922 }
3923 }
3924
3925 /* Management Stats */
3926 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3927 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3928 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3929}
3930
Auke Kok9d5c8242008-01-24 02:22:38 -08003931static irqreturn_t igb_msix_other(int irq, void *data)
3932{
Alexander Duyck047e0032009-10-27 15:49:27 +00003933 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08003934 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003935 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003936 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00003937
Alexander Duyck047e0032009-10-27 15:49:27 +00003938 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00003939 /* HW is reporting DMA is out of sync */
3940 adapter->stats.doosync++;
3941 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00003942
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003943 /* Check for a mailbox event */
3944 if (icr & E1000_ICR_VMMB)
3945 igb_msg_task(adapter);
3946
3947 if (icr & E1000_ICR_LSC) {
3948 hw->mac.get_link_status = 1;
3949 /* guard against interrupt when we're going down */
3950 if (!test_bit(__IGB_DOWN, &adapter->state))
3951 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3952 }
3953
3954 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003955 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08003956
3957 return IRQ_HANDLED;
3958}
3959
Alexander Duyck047e0032009-10-27 15:49:27 +00003960static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003961{
Alexander Duyck047e0032009-10-27 15:49:27 +00003962 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08003963
Alexander Duyck047e0032009-10-27 15:49:27 +00003964 if (!q_vector->set_itr)
3965 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003966
Alexander Duyck047e0032009-10-27 15:49:27 +00003967 if (!itr_val)
3968 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003969
Alexander Duyck047e0032009-10-27 15:49:27 +00003970 if (q_vector->itr_shift)
3971 itr_val |= itr_val << q_vector->itr_shift;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003972 else
Alexander Duyck047e0032009-10-27 15:49:27 +00003973 itr_val |= 0x8000000;
3974
3975 writel(itr_val, q_vector->itr_register);
3976 q_vector->set_itr = 0;
3977}
3978
3979static irqreturn_t igb_msix_ring(int irq, void *data)
3980{
3981 struct igb_q_vector *q_vector = data;
3982
3983 /* Write the ITR value calculated from the previous interrupt. */
3984 igb_write_itr(q_vector);
3985
3986 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003987
Auke Kok9d5c8242008-01-24 02:22:38 -08003988 return IRQ_HANDLED;
3989}
3990
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003991#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00003992static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003993{
Alexander Duyck047e0032009-10-27 15:49:27 +00003994 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003995 struct e1000_hw *hw = &adapter->hw;
3996 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003997
Alexander Duyck047e0032009-10-27 15:49:27 +00003998 if (q_vector->cpu == cpu)
3999 goto out_no_update;
4000
4001 if (q_vector->tx_ring) {
4002 int q = q_vector->tx_ring->reg_idx;
4003 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4004 if (hw->mac.type == e1000_82575) {
4005 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4006 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4007 } else {
4008 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4009 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4010 E1000_DCA_TXCTRL_CPUID_SHIFT;
4011 }
4012 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4013 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4014 }
4015 if (q_vector->rx_ring) {
4016 int q = q_vector->rx_ring->reg_idx;
4017 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4018 if (hw->mac.type == e1000_82575) {
4019 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4020 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4021 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004022 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004023 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004024 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004025 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004026 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4027 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4028 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4029 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004030 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004031 q_vector->cpu = cpu;
4032out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004033 put_cpu();
4034}
4035
4036static void igb_setup_dca(struct igb_adapter *adapter)
4037{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004038 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004039 int i;
4040
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004041 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004042 return;
4043
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004044 /* Always use CB2 mode, difference is masked in the CB driver. */
4045 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4046
Alexander Duyck047e0032009-10-27 15:49:27 +00004047 for (i = 0; i < adapter->num_q_vectors; i++) {
4048 struct igb_q_vector *q_vector = adapter->q_vector[i];
4049 q_vector->cpu = -1;
4050 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004051 }
4052}
4053
4054static int __igb_notify_dca(struct device *dev, void *data)
4055{
4056 struct net_device *netdev = dev_get_drvdata(dev);
4057 struct igb_adapter *adapter = netdev_priv(netdev);
4058 struct e1000_hw *hw = &adapter->hw;
4059 unsigned long event = *(unsigned long *)data;
4060
4061 switch (event) {
4062 case DCA_PROVIDER_ADD:
4063 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004064 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004065 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004066 /* Always use CB2 mode, difference is masked
4067 * in the CB driver. */
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004068 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004069 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004070 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004071 dev_info(&adapter->pdev->dev, "DCA enabled\n");
4072 igb_setup_dca(adapter);
4073 break;
4074 }
4075 /* Fall Through since DCA is disabled. */
4076 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004077 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004078 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004079 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004080 dca_remove_requester(dev);
4081 dev_info(&adapter->pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004082 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004083 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004084 }
4085 break;
4086 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004087
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004088 return 0;
4089}
4090
4091static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4092 void *p)
4093{
4094 int ret_val;
4095
4096 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4097 __igb_notify_dca);
4098
4099 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4100}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004101#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004102
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004103static void igb_ping_all_vfs(struct igb_adapter *adapter)
4104{
4105 struct e1000_hw *hw = &adapter->hw;
4106 u32 ping;
4107 int i;
4108
4109 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4110 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004111 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004112 ping |= E1000_VT_MSGTYPE_CTS;
4113 igb_write_mbx(hw, &ping, 1, i);
4114 }
4115}
4116
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004117static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4118{
4119 struct e1000_hw *hw = &adapter->hw;
4120 u32 vmolr = rd32(E1000_VMOLR(vf));
4121 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4122
4123 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4124 IGB_VF_FLAG_MULTI_PROMISC);
4125 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4126
4127 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4128 vmolr |= E1000_VMOLR_MPME;
4129 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4130 } else {
4131 /*
4132 * if we have hashes and we are clearing a multicast promisc
4133 * flag we need to write the hashes to the MTA as this step
4134 * was previously skipped
4135 */
4136 if (vf_data->num_vf_mc_hashes > 30) {
4137 vmolr |= E1000_VMOLR_MPME;
4138 } else if (vf_data->num_vf_mc_hashes) {
4139 int j;
4140 vmolr |= E1000_VMOLR_ROMPE;
4141 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4142 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4143 }
4144 }
4145
4146 wr32(E1000_VMOLR(vf), vmolr);
4147
4148 /* there are flags left unprocessed, likely not supported */
4149 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4150 return -EINVAL;
4151
4152 return 0;
4153
4154}
4155
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004156static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4157 u32 *msgbuf, u32 vf)
4158{
4159 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4160 u16 *hash_list = (u16 *)&msgbuf[1];
4161 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4162 int i;
4163
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004164 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004165 * to this VF for later use to restore when the PF multi cast
4166 * list changes
4167 */
4168 vf_data->num_vf_mc_hashes = n;
4169
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004170 /* only up to 30 hash values supported */
4171 if (n > 30)
4172 n = 30;
4173
4174 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004175 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004176 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004177
4178 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004179 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004180
4181 return 0;
4182}
4183
4184static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4185{
4186 struct e1000_hw *hw = &adapter->hw;
4187 struct vf_data_storage *vf_data;
4188 int i, j;
4189
4190 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004191 u32 vmolr = rd32(E1000_VMOLR(i));
4192 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4193
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004194 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004195
4196 if ((vf_data->num_vf_mc_hashes > 30) ||
4197 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4198 vmolr |= E1000_VMOLR_MPME;
4199 } else if (vf_data->num_vf_mc_hashes) {
4200 vmolr |= E1000_VMOLR_ROMPE;
4201 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4202 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4203 }
4204 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004205 }
4206}
4207
4208static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4209{
4210 struct e1000_hw *hw = &adapter->hw;
4211 u32 pool_mask, reg, vid;
4212 int i;
4213
4214 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4215
4216 /* Find the vlan filter for this id */
4217 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4218 reg = rd32(E1000_VLVF(i));
4219
4220 /* remove the vf from the pool */
4221 reg &= ~pool_mask;
4222
4223 /* if pool is empty then remove entry from vfta */
4224 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4225 (reg & E1000_VLVF_VLANID_ENABLE)) {
4226 reg = 0;
4227 vid = reg & E1000_VLVF_VLANID_MASK;
4228 igb_vfta_set(hw, vid, false);
4229 }
4230
4231 wr32(E1000_VLVF(i), reg);
4232 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004233
4234 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004235}
4236
4237static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4238{
4239 struct e1000_hw *hw = &adapter->hw;
4240 u32 reg, i;
4241
Alexander Duyck51466232009-10-27 23:47:35 +00004242 /* The vlvf table only exists on 82576 hardware and newer */
4243 if (hw->mac.type < e1000_82576)
4244 return -1;
4245
4246 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004247 if (!adapter->vfs_allocated_count)
4248 return -1;
4249
4250 /* Find the vlan filter for this id */
4251 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4252 reg = rd32(E1000_VLVF(i));
4253 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4254 vid == (reg & E1000_VLVF_VLANID_MASK))
4255 break;
4256 }
4257
4258 if (add) {
4259 if (i == E1000_VLVF_ARRAY_SIZE) {
4260 /* Did not find a matching VLAN ID entry that was
4261 * enabled. Search for a free filter entry, i.e.
4262 * one without the enable bit set
4263 */
4264 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4265 reg = rd32(E1000_VLVF(i));
4266 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4267 break;
4268 }
4269 }
4270 if (i < E1000_VLVF_ARRAY_SIZE) {
4271 /* Found an enabled/available entry */
4272 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4273
4274 /* if !enabled we need to set this up in vfta */
4275 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004276 /* add VID to filter table */
4277 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004278 reg |= E1000_VLVF_VLANID_ENABLE;
4279 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004280 reg &= ~E1000_VLVF_VLANID_MASK;
4281 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004282 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004283
4284 /* do not modify RLPML for PF devices */
4285 if (vf >= adapter->vfs_allocated_count)
4286 return 0;
4287
4288 if (!adapter->vf_data[vf].vlans_enabled) {
4289 u32 size;
4290 reg = rd32(E1000_VMOLR(vf));
4291 size = reg & E1000_VMOLR_RLPML_MASK;
4292 size += 4;
4293 reg &= ~E1000_VMOLR_RLPML_MASK;
4294 reg |= size;
4295 wr32(E1000_VMOLR(vf), reg);
4296 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004297
Alexander Duyck51466232009-10-27 23:47:35 +00004298 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004299 return 0;
4300 }
4301 } else {
4302 if (i < E1000_VLVF_ARRAY_SIZE) {
4303 /* remove vf from the pool */
4304 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4305 /* if pool is empty then remove entry from vfta */
4306 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4307 reg = 0;
4308 igb_vfta_set(hw, vid, false);
4309 }
4310 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004311
4312 /* do not modify RLPML for PF devices */
4313 if (vf >= adapter->vfs_allocated_count)
4314 return 0;
4315
4316 adapter->vf_data[vf].vlans_enabled--;
4317 if (!adapter->vf_data[vf].vlans_enabled) {
4318 u32 size;
4319 reg = rd32(E1000_VMOLR(vf));
4320 size = reg & E1000_VMOLR_RLPML_MASK;
4321 size -= 4;
4322 reg &= ~E1000_VMOLR_RLPML_MASK;
4323 reg |= size;
4324 wr32(E1000_VMOLR(vf), reg);
4325 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004326 return 0;
4327 }
4328 }
4329 return -1;
4330}
4331
4332static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4333{
4334 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4335 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4336
4337 return igb_vlvf_set(adapter, vid, add, vf);
4338}
4339
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004340static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004341{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004342 /* clear all flags */
4343 adapter->vf_data[vf].flags = 0;
4344 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004345
4346 /* reset offloads to defaults */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004347 igb_set_vmolr(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004348
4349 /* reset vlans for device */
4350 igb_clear_vf_vfta(adapter, vf);
4351
4352 /* reset multicast table array for vf */
4353 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4354
4355 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004356 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004357}
4358
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004359static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4360{
4361 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4362
4363 /* generate a new mac address as we were hotplug removed/added */
4364 random_ether_addr(vf_mac);
4365
4366 /* process remaining reset events */
4367 igb_vf_reset(adapter, vf);
4368}
4369
4370static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004371{
4372 struct e1000_hw *hw = &adapter->hw;
4373 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004374 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004375 u32 reg, msgbuf[3];
4376 u8 *addr = (u8 *)(&msgbuf[1]);
4377
4378 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004379 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004380
4381 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004382 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004383
4384 /* enable transmit and receive for vf */
4385 reg = rd32(E1000_VFTE);
4386 wr32(E1000_VFTE, reg | (1 << vf));
4387 reg = rd32(E1000_VFRE);
4388 wr32(E1000_VFRE, reg | (1 << vf));
4389
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004390 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004391
4392 /* reply to reset with ack and vf mac address */
4393 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4394 memcpy(addr, vf_mac, 6);
4395 igb_write_mbx(hw, msgbuf, 3, vf);
4396}
4397
4398static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4399{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004400 unsigned char *addr = (char *)&msg[1];
4401 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004402
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004403 if (is_valid_ether_addr(addr))
4404 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004405
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004406 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004407}
4408
4409static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4410{
4411 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004412 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004413 u32 msg = E1000_VT_MSGTYPE_NACK;
4414
4415 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004416 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4417 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004418 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004419 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004420 }
4421}
4422
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004423static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004424{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004425 struct pci_dev *pdev = adapter->pdev;
4426 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004427 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004428 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004429 s32 retval;
4430
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004431 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004432
4433 if (retval)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004434 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004435
4436 /* this is a message we already processed, do nothing */
4437 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004438 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004439
4440 /*
4441 * until the vf completes a reset it should not be
4442 * allowed to start any configuration.
4443 */
4444
4445 if (msgbuf[0] == E1000_VF_RESET) {
4446 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004447 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004448 }
4449
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004450 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4451 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4452 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4453 igb_write_mbx(hw, msgbuf, 1, vf);
4454 vf_data->last_nack = jiffies;
4455 }
4456 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004457 }
4458
4459 switch ((msgbuf[0] & 0xFFFF)) {
4460 case E1000_VF_SET_MAC_ADDR:
4461 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4462 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004463 case E1000_VF_SET_PROMISC:
4464 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4465 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004466 case E1000_VF_SET_MULTICAST:
4467 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4468 break;
4469 case E1000_VF_SET_LPE:
4470 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4471 break;
4472 case E1000_VF_SET_VLAN:
4473 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4474 break;
4475 default:
4476 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4477 retval = -1;
4478 break;
4479 }
4480
4481 /* notify the VF of the results of what it sent us */
4482 if (retval)
4483 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4484 else
4485 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4486
4487 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4488
4489 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004490}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004491
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004492static void igb_msg_task(struct igb_adapter *adapter)
4493{
4494 struct e1000_hw *hw = &adapter->hw;
4495 u32 vf;
4496
4497 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4498 /* process any reset requests */
4499 if (!igb_check_for_rst(hw, vf))
4500 igb_vf_reset_event(adapter, vf);
4501
4502 /* process any messages pending */
4503 if (!igb_check_for_msg(hw, vf))
4504 igb_rcv_msg_from_vf(adapter, vf);
4505
4506 /* process any acks */
4507 if (!igb_check_for_ack(hw, vf))
4508 igb_rcv_ack_from_vf(adapter, vf);
4509 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004510}
4511
Auke Kok9d5c8242008-01-24 02:22:38 -08004512/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004513 * igb_set_uta - Set unicast filter table address
4514 * @adapter: board private structure
4515 *
4516 * The unicast table address is a register array of 32-bit registers.
4517 * The table is meant to be used in a way similar to how the MTA is used
4518 * however due to certain limitations in the hardware it is necessary to
4519 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4520 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4521 **/
4522static void igb_set_uta(struct igb_adapter *adapter)
4523{
4524 struct e1000_hw *hw = &adapter->hw;
4525 int i;
4526
4527 /* The UTA table only exists on 82576 hardware and newer */
4528 if (hw->mac.type < e1000_82576)
4529 return;
4530
4531 /* we only need to do this if VMDq is enabled */
4532 if (!adapter->vfs_allocated_count)
4533 return;
4534
4535 for (i = 0; i < hw->mac.uta_reg_count; i++)
4536 array_wr32(E1000_UTA, i, ~0);
4537}
4538
4539/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004540 * igb_intr_msi - Interrupt Handler
4541 * @irq: interrupt number
4542 * @data: pointer to a network interface device structure
4543 **/
4544static irqreturn_t igb_intr_msi(int irq, void *data)
4545{
Alexander Duyck047e0032009-10-27 15:49:27 +00004546 struct igb_adapter *adapter = data;
4547 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004548 struct e1000_hw *hw = &adapter->hw;
4549 /* read ICR disables interrupts using IAM */
4550 u32 icr = rd32(E1000_ICR);
4551
Alexander Duyck047e0032009-10-27 15:49:27 +00004552 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004553
Alexander Duyck047e0032009-10-27 15:49:27 +00004554 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004555 /* HW is reporting DMA is out of sync */
4556 adapter->stats.doosync++;
4557 }
4558
Auke Kok9d5c8242008-01-24 02:22:38 -08004559 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4560 hw->mac.get_link_status = 1;
4561 if (!test_bit(__IGB_DOWN, &adapter->state))
4562 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4563 }
4564
Alexander Duyck047e0032009-10-27 15:49:27 +00004565 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004566
4567 return IRQ_HANDLED;
4568}
4569
4570/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004571 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004572 * @irq: interrupt number
4573 * @data: pointer to a network interface device structure
4574 **/
4575static irqreturn_t igb_intr(int irq, void *data)
4576{
Alexander Duyck047e0032009-10-27 15:49:27 +00004577 struct igb_adapter *adapter = data;
4578 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004579 struct e1000_hw *hw = &adapter->hw;
4580 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4581 * need for the IMC write */
4582 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004583 if (!icr)
4584 return IRQ_NONE; /* Not our interrupt */
4585
Alexander Duyck047e0032009-10-27 15:49:27 +00004586 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004587
4588 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4589 * not set, then the adapter didn't send an interrupt */
4590 if (!(icr & E1000_ICR_INT_ASSERTED))
4591 return IRQ_NONE;
4592
Alexander Duyck047e0032009-10-27 15:49:27 +00004593 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004594 /* HW is reporting DMA is out of sync */
4595 adapter->stats.doosync++;
4596 }
4597
Auke Kok9d5c8242008-01-24 02:22:38 -08004598 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4599 hw->mac.get_link_status = 1;
4600 /* guard against interrupt when we're going down */
4601 if (!test_bit(__IGB_DOWN, &adapter->state))
4602 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4603 }
4604
Alexander Duyck047e0032009-10-27 15:49:27 +00004605 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004606
4607 return IRQ_HANDLED;
4608}
4609
Alexander Duyck047e0032009-10-27 15:49:27 +00004610static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004611{
Alexander Duyck047e0032009-10-27 15:49:27 +00004612 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004613 struct e1000_hw *hw = &adapter->hw;
4614
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004615 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4616 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004617 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004618 igb_set_itr(adapter);
4619 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004620 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004621 }
4622
4623 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4624 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004625 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004626 else
4627 igb_irq_enable(adapter);
4628 }
4629}
4630
Auke Kok9d5c8242008-01-24 02:22:38 -08004631/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004632 * igb_poll - NAPI Rx polling callback
4633 * @napi: napi polling structure
4634 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004635 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004636static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004637{
Alexander Duyck047e0032009-10-27 15:49:27 +00004638 struct igb_q_vector *q_vector = container_of(napi,
4639 struct igb_q_vector,
4640 napi);
4641 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004642
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004643#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004644 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4645 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004646#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004647 if (q_vector->tx_ring)
4648 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004649
Alexander Duyck047e0032009-10-27 15:49:27 +00004650 if (q_vector->rx_ring)
4651 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4652
4653 if (!tx_clean_complete)
4654 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004655
Alexander Duyck46544252009-02-19 20:39:04 -08004656 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004657 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004658 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004659 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004660 }
4661
4662 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004663}
Al Viro6d8126f2008-03-16 22:23:24 +00004664
Auke Kok9d5c8242008-01-24 02:22:38 -08004665/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004666 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004667 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004668 * @shhwtstamps: timestamp structure to update
4669 * @regval: unsigned 64bit system time value.
4670 *
4671 * We need to convert the system time value stored in the RX/TXSTMP registers
4672 * into a hwtstamp which can be used by the upper level timestamping functions
4673 */
4674static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4675 struct skb_shared_hwtstamps *shhwtstamps,
4676 u64 regval)
4677{
4678 u64 ns;
4679
4680 ns = timecounter_cyc2time(&adapter->clock, regval);
4681 timecompare_update(&adapter->compare, ns);
4682 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4683 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4684 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4685}
4686
4687/**
4688 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4689 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004690 * @skb: packet that was just sent
4691 *
4692 * If we were asked to do hardware stamping and such a time stamp is
4693 * available, then it must have been for this skb here because we only
4694 * allow only one such packet into the queue.
4695 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004696static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004697{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004698 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004699 union skb_shared_tx *shtx = skb_tx(skb);
4700 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004701 struct skb_shared_hwtstamps shhwtstamps;
4702 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004703
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004704 /* if skb does not support hw timestamp or TX stamp not valid exit */
4705 if (likely(!shtx->hardware) ||
4706 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4707 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004708
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004709 regval = rd32(E1000_TXSTMPL);
4710 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4711
4712 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4713 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004714}
4715
4716/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004717 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004718 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004719 * returns true if ring is completely cleaned
4720 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004721static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004722{
Alexander Duyck047e0032009-10-27 15:49:27 +00004723 struct igb_adapter *adapter = q_vector->adapter;
4724 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004725 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004726 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004727 struct igb_buffer *buffer_info;
4728 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004729 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004730 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004731 unsigned int i, eop, count = 0;
4732 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08004733
Auke Kok9d5c8242008-01-24 02:22:38 -08004734 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004735 eop = tx_ring->buffer_info[i].next_to_watch;
4736 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4737
4738 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4739 (count < tx_ring->count)) {
4740 for (cleaned = false; !cleaned; count++) {
4741 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004742 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004743 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08004744 skb = buffer_info->skb;
4745
4746 if (skb) {
4747 unsigned int segs, bytecount;
4748 /* gso_segs is currently only valid for tcp */
4749 segs = skb_shinfo(skb)->gso_segs ?: 1;
4750 /* multiply data chunks by size of headers */
4751 bytecount = ((segs - 1) * skb_headlen(skb)) +
4752 skb->len;
4753 total_packets += segs;
4754 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004755
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004756 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004757 }
4758
Alexander Duyck80785292009-10-27 15:51:47 +00004759 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004760 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004761
4762 i++;
4763 if (i == tx_ring->count)
4764 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004765 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004766 eop = tx_ring->buffer_info[i].next_to_watch;
4767 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4768 }
4769
Auke Kok9d5c8242008-01-24 02:22:38 -08004770 tx_ring->next_to_clean = i;
4771
Alexander Duyckfc7d3452008-08-26 04:25:08 -07004772 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08004773 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00004774 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004775 /* Make sure that anybody stopping the queue after this
4776 * sees the new next_to_clean.
4777 */
4778 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004779 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4780 !(test_bit(__IGB_DOWN, &adapter->state))) {
4781 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004782 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004783 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004784 }
4785
4786 if (tx_ring->detect_tx_hung) {
4787 /* Detect a transmit hang in hardware, this serializes the
4788 * check with the clearing of time_stamp and movement of i */
4789 tx_ring->detect_tx_hung = false;
4790 if (tx_ring->buffer_info[i].time_stamp &&
4791 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4792 (adapter->tx_timeout_factor * HZ))
4793 && !(rd32(E1000_STATUS) &
4794 E1000_STATUS_TXOFF)) {
4795
Auke Kok9d5c8242008-01-24 02:22:38 -08004796 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00004797 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08004798 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07004799 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004800 " TDH <%x>\n"
4801 " TDT <%x>\n"
4802 " next_to_use <%x>\n"
4803 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004804 "buffer_info[next_to_clean]\n"
4805 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004806 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004807 " jiffies <%lx>\n"
4808 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07004809 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00004810 readl(tx_ring->head),
4811 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08004812 tx_ring->next_to_use,
4813 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004814 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004815 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08004816 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004817 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004818 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08004819 }
4820 }
4821 tx_ring->total_bytes += total_bytes;
4822 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07004823 tx_ring->tx_stats.bytes += total_bytes;
4824 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004825 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08004826}
4827
Auke Kok9d5c8242008-01-24 02:22:38 -08004828/**
4829 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00004830 * @q_vector: structure containing interrupt and ring information
4831 * @skb: packet to send up
4832 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08004833 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004834static void igb_receive_skb(struct igb_q_vector *q_vector,
4835 struct sk_buff *skb,
4836 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08004837{
Alexander Duyck047e0032009-10-27 15:49:27 +00004838 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07004839
Alexander Duyck047e0032009-10-27 15:49:27 +00004840 if (vlan_tag)
4841 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4842 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00004843 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004844 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004845}
4846
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004847static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08004848 u32 status_err, struct sk_buff *skb)
4849{
4850 skb->ip_summed = CHECKSUM_NONE;
4851
4852 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004853 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4854 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08004855 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004856
Auke Kok9d5c8242008-01-24 02:22:38 -08004857 /* TCP/UDP checksum error bit is set */
4858 if (status_err &
4859 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004860 /*
4861 * work around errata with sctp packets where the TCPE aka
4862 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4863 * packets, (aka let the stack check the crc32c)
4864 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004865 if ((skb->len == 60) &&
4866 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004867 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004868
Auke Kok9d5c8242008-01-24 02:22:38 -08004869 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08004870 return;
4871 }
4872 /* It must be a TCP or UDP packet with a valid checksum */
4873 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4874 skb->ip_summed = CHECKSUM_UNNECESSARY;
4875
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004876 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08004877}
4878
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004879static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4880 struct sk_buff *skb)
4881{
4882 struct igb_adapter *adapter = q_vector->adapter;
4883 struct e1000_hw *hw = &adapter->hw;
4884 u64 regval;
4885
4886 /*
4887 * If this bit is set, then the RX registers contain the time stamp. No
4888 * other packet will be time stamped until we read these registers, so
4889 * read the registers to make them available again. Because only one
4890 * packet can be time stamped at a time, we know that the register
4891 * values must belong to this one here and therefore we don't need to
4892 * compare any of the additional attributes stored for it.
4893 *
4894 * If nothing went wrong, then it should have a skb_shared_tx that we
4895 * can turn into a skb_shared_hwtstamps.
4896 */
4897 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4898 return;
4899 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4900 return;
4901
4902 regval = rd32(E1000_RXSTMPL);
4903 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4904
4905 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4906}
Alexander Duyck4c844852009-10-27 15:52:07 +00004907static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004908 union e1000_adv_rx_desc *rx_desc)
4909{
4910 /* HW will not DMA in data larger than the given buffer, even if it
4911 * parses the (NFS, of course) header to be larger. In that case, it
4912 * fills the header buffer and spills the rest into the page.
4913 */
4914 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4915 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00004916 if (hlen > rx_ring->rx_buffer_len)
4917 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004918 return hlen;
4919}
4920
Alexander Duyck047e0032009-10-27 15:49:27 +00004921static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4922 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004923{
Alexander Duyck047e0032009-10-27 15:49:27 +00004924 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004925 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00004926 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004927 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4928 struct igb_buffer *buffer_info , *next_buffer;
4929 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08004930 bool cleaned = false;
4931 int cleaned_count = 0;
4932 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004933 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004934 u32 staterr;
4935 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00004936 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08004937
4938 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004939 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004940 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4941 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4942
4943 while (staterr & E1000_RXD_STAT_DD) {
4944 if (*work_done >= budget)
4945 break;
4946 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004947
4948 skb = buffer_info->skb;
4949 prefetch(skb->data - NET_IP_ALIGN);
4950 buffer_info->skb = NULL;
4951
4952 i++;
4953 if (i == rx_ring->count)
4954 i = 0;
4955 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4956 prefetch(next_rxd);
4957 next_buffer = &rx_ring->buffer_info[i];
4958
4959 length = le16_to_cpu(rx_desc->wb.upper.length);
4960 cleaned = true;
4961 cleaned_count++;
4962
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004963 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004964 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00004965 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004966 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00004967 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00004968 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004969 skb_put(skb, length);
4970 goto send_up;
4971 }
Alexander Duyck4c844852009-10-27 15:52:07 +00004972 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004973 }
4974
4975 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004976 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004977 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004978 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004979
4980 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4981 buffer_info->page,
4982 buffer_info->page_offset,
4983 length);
4984
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004985 if (page_count(buffer_info->page) != 1)
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004986 buffer_info->page = NULL;
4987 else
4988 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08004989
4990 skb->len += length;
4991 skb->data_len += length;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004992
Auke Kok9d5c8242008-01-24 02:22:38 -08004993 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08004994 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004995
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004996 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08004997 buffer_info->skb = next_buffer->skb;
4998 buffer_info->dma = next_buffer->dma;
4999 next_buffer->skb = skb;
5000 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005001 goto next_desc;
5002 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005003send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005004 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5005 dev_kfree_skb_irq(skb);
5006 goto next_desc;
5007 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005008
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005009 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005010 total_bytes += skb->len;
5011 total_packets++;
5012
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005013 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005014
5015 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005016 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005017
Alexander Duyck047e0032009-10-27 15:49:27 +00005018 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5019 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5020
5021 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005022
Auke Kok9d5c8242008-01-24 02:22:38 -08005023next_desc:
5024 rx_desc->wb.upper.status_error = 0;
5025
5026 /* return some buffers to hardware, one at a time is too slow */
5027 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005028 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005029 cleaned_count = 0;
5030 }
5031
5032 /* use prefetched values */
5033 rx_desc = next_rxd;
5034 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005035 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5036 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005037
Auke Kok9d5c8242008-01-24 02:22:38 -08005038 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005039 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005040
5041 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005042 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005043
5044 rx_ring->total_packets += total_packets;
5045 rx_ring->total_bytes += total_bytes;
5046 rx_ring->rx_stats.packets += total_packets;
5047 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005048 return cleaned;
5049}
5050
Auke Kok9d5c8242008-01-24 02:22:38 -08005051/**
5052 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5053 * @adapter: address of board private structure
5054 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005055void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005056{
Alexander Duycke694e962009-10-27 15:53:06 +00005057 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005058 union e1000_adv_rx_desc *rx_desc;
5059 struct igb_buffer *buffer_info;
5060 struct sk_buff *skb;
5061 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005062 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005063
5064 i = rx_ring->next_to_use;
5065 buffer_info = &rx_ring->buffer_info[i];
5066
Alexander Duyck4c844852009-10-27 15:52:07 +00005067 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005068
Auke Kok9d5c8242008-01-24 02:22:38 -08005069 while (cleaned_count--) {
5070 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5071
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005072 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005073 if (!buffer_info->page) {
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005074 buffer_info->page = alloc_page(GFP_ATOMIC);
5075 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005076 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005077 goto no_buffers;
5078 }
5079 buffer_info->page_offset = 0;
5080 } else {
5081 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005082 }
5083 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005084 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005085 buffer_info->page_offset,
5086 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005087 PCI_DMA_FROMDEVICE);
5088 }
5089
5090 if (!buffer_info->skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005091 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005092 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005093 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005094 goto no_buffers;
5095 }
5096
Auke Kok9d5c8242008-01-24 02:22:38 -08005097 buffer_info->skb = skb;
Alexander Duyck80785292009-10-27 15:51:47 +00005098 buffer_info->dma = pci_map_single(rx_ring->pdev,
5099 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005100 bufsz,
5101 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005102 }
5103 /* Refresh the desc even if buffer_addrs didn't change because
5104 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005105 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005106 rx_desc->read.pkt_addr =
5107 cpu_to_le64(buffer_info->page_dma);
5108 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5109 } else {
5110 rx_desc->read.pkt_addr =
5111 cpu_to_le64(buffer_info->dma);
5112 rx_desc->read.hdr_addr = 0;
5113 }
5114
5115 i++;
5116 if (i == rx_ring->count)
5117 i = 0;
5118 buffer_info = &rx_ring->buffer_info[i];
5119 }
5120
5121no_buffers:
5122 if (rx_ring->next_to_use != i) {
5123 rx_ring->next_to_use = i;
5124 if (i == 0)
5125 i = (rx_ring->count - 1);
5126 else
5127 i--;
5128
5129 /* Force memory writes to complete before letting h/w
5130 * know there are new descriptors to fetch. (Only
5131 * applicable for weak-ordered memory model archs,
5132 * such as IA-64). */
5133 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005134 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005135 }
5136}
5137
5138/**
5139 * igb_mii_ioctl -
5140 * @netdev:
5141 * @ifreq:
5142 * @cmd:
5143 **/
5144static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5145{
5146 struct igb_adapter *adapter = netdev_priv(netdev);
5147 struct mii_ioctl_data *data = if_mii(ifr);
5148
5149 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5150 return -EOPNOTSUPP;
5151
5152 switch (cmd) {
5153 case SIOCGMIIPHY:
5154 data->phy_id = adapter->hw.phy.addr;
5155 break;
5156 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005157 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5158 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005159 return -EIO;
5160 break;
5161 case SIOCSMIIREG:
5162 default:
5163 return -EOPNOTSUPP;
5164 }
5165 return 0;
5166}
5167
5168/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005169 * igb_hwtstamp_ioctl - control hardware time stamping
5170 * @netdev:
5171 * @ifreq:
5172 * @cmd:
5173 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005174 * Outgoing time stamping can be enabled and disabled. Play nice and
5175 * disable it when requested, although it shouldn't case any overhead
5176 * when no packet needs it. At most one packet in the queue may be
5177 * marked for time stamping, otherwise it would be impossible to tell
5178 * for sure to which packet the hardware time stamp belongs.
5179 *
5180 * Incoming time stamping has to be configured via the hardware
5181 * filters. Not all combinations are supported, in particular event
5182 * type has to be specified. Matching the kind of event packet is
5183 * not supported, with the exception of "all V2 events regardless of
5184 * level 2 or 4".
5185 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005186 **/
5187static int igb_hwtstamp_ioctl(struct net_device *netdev,
5188 struct ifreq *ifr, int cmd)
5189{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005190 struct igb_adapter *adapter = netdev_priv(netdev);
5191 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005192 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005193 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5194 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005195 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005196 bool is_l4 = false;
5197 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005198 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005199
5200 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5201 return -EFAULT;
5202
5203 /* reserved for future extensions */
5204 if (config.flags)
5205 return -EINVAL;
5206
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005207 switch (config.tx_type) {
5208 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005209 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005210 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005211 break;
5212 default:
5213 return -ERANGE;
5214 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005215
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005216 switch (config.rx_filter) {
5217 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005218 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005219 break;
5220 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5221 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5223 case HWTSTAMP_FILTER_ALL:
5224 /*
5225 * register TSYNCRXCFG must be set, therefore it is not
5226 * possible to time stamp both Sync and Delay_Req messages
5227 * => fall back to time stamping all packets
5228 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005229 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005230 config.rx_filter = HWTSTAMP_FILTER_ALL;
5231 break;
5232 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005233 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005234 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005235 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005236 break;
5237 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005238 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005239 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005240 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005241 break;
5242 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5243 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005244 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005245 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005246 is_l2 = true;
5247 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005248 config.rx_filter = HWTSTAMP_FILTER_SOME;
5249 break;
5250 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5251 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005252 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005253 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005254 is_l2 = true;
5255 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005256 config.rx_filter = HWTSTAMP_FILTER_SOME;
5257 break;
5258 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5259 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5260 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005261 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005262 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005263 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005264 break;
5265 default:
5266 return -ERANGE;
5267 }
5268
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005269 if (hw->mac.type == e1000_82575) {
5270 if (tsync_rx_ctl | tsync_tx_ctl)
5271 return -EINVAL;
5272 return 0;
5273 }
5274
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005275 /* enable/disable TX */
5276 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005277 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5278 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005279 wr32(E1000_TSYNCTXCTL, regval);
5280
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005281 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005282 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005283 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5284 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005285 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005286
5287 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005288 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5289
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005290 /* define ethertype filter for timestamped packets */
5291 if (is_l2)
5292 wr32(E1000_ETQF(3),
5293 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5294 E1000_ETQF_1588 | /* enable timestamping */
5295 ETH_P_1588)); /* 1588 eth protocol type */
5296 else
5297 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005298
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005299#define PTP_PORT 319
5300 /* L4 Queue Filter[3]: filter by destination port and protocol */
5301 if (is_l4) {
5302 u32 ftqf = (IPPROTO_UDP /* UDP */
5303 | E1000_FTQF_VF_BP /* VF not compared */
5304 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5305 | E1000_FTQF_MASK); /* mask all inputs */
5306 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005307
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005308 wr32(E1000_IMIR(3), htons(PTP_PORT));
5309 wr32(E1000_IMIREXT(3),
5310 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5311 if (hw->mac.type == e1000_82576) {
5312 /* enable source port check */
5313 wr32(E1000_SPQF(3), htons(PTP_PORT));
5314 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5315 }
5316 wr32(E1000_FTQF(3), ftqf);
5317 } else {
5318 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5319 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005320 wrfl();
5321
5322 adapter->hwtstamp_config = config;
5323
5324 /* clear TX/RX time stamp registers, just to be sure */
5325 regval = rd32(E1000_TXSTMPH);
5326 regval = rd32(E1000_RXSTMPH);
5327
5328 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5329 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005330}
5331
5332/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005333 * igb_ioctl -
5334 * @netdev:
5335 * @ifreq:
5336 * @cmd:
5337 **/
5338static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5339{
5340 switch (cmd) {
5341 case SIOCGMIIPHY:
5342 case SIOCGMIIREG:
5343 case SIOCSMIIREG:
5344 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005345 case SIOCSHWTSTAMP:
5346 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005347 default:
5348 return -EOPNOTSUPP;
5349 }
5350}
5351
Alexander Duyck009bc062009-07-23 18:08:35 +00005352s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5353{
5354 struct igb_adapter *adapter = hw->back;
5355 u16 cap_offset;
5356
5357 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5358 if (!cap_offset)
5359 return -E1000_ERR_CONFIG;
5360
5361 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5362
5363 return 0;
5364}
5365
5366s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5367{
5368 struct igb_adapter *adapter = hw->back;
5369 u16 cap_offset;
5370
5371 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5372 if (!cap_offset)
5373 return -E1000_ERR_CONFIG;
5374
5375 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5376
5377 return 0;
5378}
5379
Auke Kok9d5c8242008-01-24 02:22:38 -08005380static void igb_vlan_rx_register(struct net_device *netdev,
5381 struct vlan_group *grp)
5382{
5383 struct igb_adapter *adapter = netdev_priv(netdev);
5384 struct e1000_hw *hw = &adapter->hw;
5385 u32 ctrl, rctl;
5386
5387 igb_irq_disable(adapter);
5388 adapter->vlgrp = grp;
5389
5390 if (grp) {
5391 /* enable VLAN tag insert/strip */
5392 ctrl = rd32(E1000_CTRL);
5393 ctrl |= E1000_CTRL_VME;
5394 wr32(E1000_CTRL, ctrl);
5395
Alexander Duyck51466232009-10-27 23:47:35 +00005396 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005397 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005398 rctl &= ~E1000_RCTL_CFIEN;
5399 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005400 } else {
5401 /* disable VLAN tag insert/strip */
5402 ctrl = rd32(E1000_CTRL);
5403 ctrl &= ~E1000_CTRL_VME;
5404 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005405 }
5406
Alexander Duycke1739522009-02-19 20:39:44 -08005407 igb_rlpml_set(adapter);
5408
Auke Kok9d5c8242008-01-24 02:22:38 -08005409 if (!test_bit(__IGB_DOWN, &adapter->state))
5410 igb_irq_enable(adapter);
5411}
5412
5413static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5414{
5415 struct igb_adapter *adapter = netdev_priv(netdev);
5416 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005417 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005418
Alexander Duyck51466232009-10-27 23:47:35 +00005419 /* attempt to add filter to vlvf array */
5420 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005421
Alexander Duyck51466232009-10-27 23:47:35 +00005422 /* add the filter since PF can receive vlans w/o entry in vlvf */
5423 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005424}
5425
5426static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5427{
5428 struct igb_adapter *adapter = netdev_priv(netdev);
5429 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005430 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005431 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005432
5433 igb_irq_disable(adapter);
5434 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5435
5436 if (!test_bit(__IGB_DOWN, &adapter->state))
5437 igb_irq_enable(adapter);
5438
Alexander Duyck51466232009-10-27 23:47:35 +00005439 /* remove vlan from VLVF table array */
5440 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005441
Alexander Duyck51466232009-10-27 23:47:35 +00005442 /* if vid was not present in VLVF just remove it from table */
5443 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005444 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005445}
5446
5447static void igb_restore_vlan(struct igb_adapter *adapter)
5448{
5449 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5450
5451 if (adapter->vlgrp) {
5452 u16 vid;
5453 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5454 if (!vlan_group_get_device(adapter->vlgrp, vid))
5455 continue;
5456 igb_vlan_rx_add_vid(adapter->netdev, vid);
5457 }
5458 }
5459}
5460
5461int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5462{
5463 struct e1000_mac_info *mac = &adapter->hw.mac;
5464
5465 mac->autoneg = 0;
5466
Auke Kok9d5c8242008-01-24 02:22:38 -08005467 switch (spddplx) {
5468 case SPEED_10 + DUPLEX_HALF:
5469 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5470 break;
5471 case SPEED_10 + DUPLEX_FULL:
5472 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5473 break;
5474 case SPEED_100 + DUPLEX_HALF:
5475 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5476 break;
5477 case SPEED_100 + DUPLEX_FULL:
5478 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5479 break;
5480 case SPEED_1000 + DUPLEX_FULL:
5481 mac->autoneg = 1;
5482 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5483 break;
5484 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5485 default:
5486 dev_err(&adapter->pdev->dev,
5487 "Unsupported Speed/Duplex configuration\n");
5488 return -EINVAL;
5489 }
5490 return 0;
5491}
5492
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005493static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005494{
5495 struct net_device *netdev = pci_get_drvdata(pdev);
5496 struct igb_adapter *adapter = netdev_priv(netdev);
5497 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005498 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005499 u32 wufc = adapter->wol;
5500#ifdef CONFIG_PM
5501 int retval = 0;
5502#endif
5503
5504 netif_device_detach(netdev);
5505
Alexander Duycka88f10e2008-07-08 15:13:38 -07005506 if (netif_running(netdev))
5507 igb_close(netdev);
5508
Alexander Duyck047e0032009-10-27 15:49:27 +00005509 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005510
5511#ifdef CONFIG_PM
5512 retval = pci_save_state(pdev);
5513 if (retval)
5514 return retval;
5515#endif
5516
5517 status = rd32(E1000_STATUS);
5518 if (status & E1000_STATUS_LU)
5519 wufc &= ~E1000_WUFC_LNKC;
5520
5521 if (wufc) {
5522 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005523 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005524
5525 /* turn on all-multi mode if wake on multicast is enabled */
5526 if (wufc & E1000_WUFC_MC) {
5527 rctl = rd32(E1000_RCTL);
5528 rctl |= E1000_RCTL_MPE;
5529 wr32(E1000_RCTL, rctl);
5530 }
5531
5532 ctrl = rd32(E1000_CTRL);
5533 /* advertise wake from D3Cold */
5534 #define E1000_CTRL_ADVD3WUC 0x00100000
5535 /* phy power management enable */
5536 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5537 ctrl |= E1000_CTRL_ADVD3WUC;
5538 wr32(E1000_CTRL, ctrl);
5539
Auke Kok9d5c8242008-01-24 02:22:38 -08005540 /* Allow time for pending master requests to run */
5541 igb_disable_pcie_master(&adapter->hw);
5542
5543 wr32(E1000_WUC, E1000_WUC_PME_EN);
5544 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005545 } else {
5546 wr32(E1000_WUC, 0);
5547 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005548 }
5549
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005550 *enable_wake = wufc || adapter->en_mng_pt;
5551 if (!*enable_wake)
Alexander Duyck2fb02a22009-09-14 08:22:54 +00005552 igb_shutdown_serdes_link_82575(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005553
5554 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5555 * would have already happened in close and is redundant. */
5556 igb_release_hw_control(adapter);
5557
5558 pci_disable_device(pdev);
5559
Auke Kok9d5c8242008-01-24 02:22:38 -08005560 return 0;
5561}
5562
5563#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005564static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5565{
5566 int retval;
5567 bool wake;
5568
5569 retval = __igb_shutdown(pdev, &wake);
5570 if (retval)
5571 return retval;
5572
5573 if (wake) {
5574 pci_prepare_to_sleep(pdev);
5575 } else {
5576 pci_wake_from_d3(pdev, false);
5577 pci_set_power_state(pdev, PCI_D3hot);
5578 }
5579
5580 return 0;
5581}
5582
Auke Kok9d5c8242008-01-24 02:22:38 -08005583static int igb_resume(struct pci_dev *pdev)
5584{
5585 struct net_device *netdev = pci_get_drvdata(pdev);
5586 struct igb_adapter *adapter = netdev_priv(netdev);
5587 struct e1000_hw *hw = &adapter->hw;
5588 u32 err;
5589
5590 pci_set_power_state(pdev, PCI_D0);
5591 pci_restore_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005592
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005593 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005594 if (err) {
5595 dev_err(&pdev->dev,
5596 "igb: Cannot enable PCI device from suspend\n");
5597 return err;
5598 }
5599 pci_set_master(pdev);
5600
5601 pci_enable_wake(pdev, PCI_D3hot, 0);
5602 pci_enable_wake(pdev, PCI_D3cold, 0);
5603
Alexander Duyck047e0032009-10-27 15:49:27 +00005604 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005605 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5606 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005607 }
5608
5609 /* e1000_power_up_phy(adapter); */
5610
5611 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005612
5613 /* let the f/w know that the h/w is now under the control of the
5614 * driver. */
5615 igb_get_hw_control(adapter);
5616
Auke Kok9d5c8242008-01-24 02:22:38 -08005617 wr32(E1000_WUS, ~0);
5618
Alexander Duycka88f10e2008-07-08 15:13:38 -07005619 if (netif_running(netdev)) {
5620 err = igb_open(netdev);
5621 if (err)
5622 return err;
5623 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005624
5625 netif_device_attach(netdev);
5626
Auke Kok9d5c8242008-01-24 02:22:38 -08005627 return 0;
5628}
5629#endif
5630
5631static void igb_shutdown(struct pci_dev *pdev)
5632{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005633 bool wake;
5634
5635 __igb_shutdown(pdev, &wake);
5636
5637 if (system_state == SYSTEM_POWER_OFF) {
5638 pci_wake_from_d3(pdev, wake);
5639 pci_set_power_state(pdev, PCI_D3hot);
5640 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005641}
5642
5643#ifdef CONFIG_NET_POLL_CONTROLLER
5644/*
5645 * Polling 'interrupt' - used by things like netconsole to send skbs
5646 * without having to re-enable interrupts. It's not called while
5647 * the interrupt routine is executing.
5648 */
5649static void igb_netpoll(struct net_device *netdev)
5650{
5651 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005652 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005653 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005654
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005655 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005656 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005657 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005658 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005659 return;
5660 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005661
Alexander Duyck047e0032009-10-27 15:49:27 +00005662 for (i = 0; i < adapter->num_q_vectors; i++) {
5663 struct igb_q_vector *q_vector = adapter->q_vector[i];
5664 wr32(E1000_EIMC, q_vector->eims_value);
5665 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005666 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005667}
5668#endif /* CONFIG_NET_POLL_CONTROLLER */
5669
5670/**
5671 * igb_io_error_detected - called when PCI error is detected
5672 * @pdev: Pointer to PCI device
5673 * @state: The current pci connection state
5674 *
5675 * This function is called after a PCI bus error affecting
5676 * this device has been detected.
5677 */
5678static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5679 pci_channel_state_t state)
5680{
5681 struct net_device *netdev = pci_get_drvdata(pdev);
5682 struct igb_adapter *adapter = netdev_priv(netdev);
5683
5684 netif_device_detach(netdev);
5685
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005686 if (state == pci_channel_io_perm_failure)
5687 return PCI_ERS_RESULT_DISCONNECT;
5688
Auke Kok9d5c8242008-01-24 02:22:38 -08005689 if (netif_running(netdev))
5690 igb_down(adapter);
5691 pci_disable_device(pdev);
5692
5693 /* Request a slot slot reset. */
5694 return PCI_ERS_RESULT_NEED_RESET;
5695}
5696
5697/**
5698 * igb_io_slot_reset - called after the pci bus has been reset.
5699 * @pdev: Pointer to PCI device
5700 *
5701 * Restart the card from scratch, as if from a cold-boot. Implementation
5702 * resembles the first-half of the igb_resume routine.
5703 */
5704static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5705{
5706 struct net_device *netdev = pci_get_drvdata(pdev);
5707 struct igb_adapter *adapter = netdev_priv(netdev);
5708 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005709 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005710 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005711
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005712 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005713 dev_err(&pdev->dev,
5714 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005715 result = PCI_ERS_RESULT_DISCONNECT;
5716 } else {
5717 pci_set_master(pdev);
5718 pci_restore_state(pdev);
5719
5720 pci_enable_wake(pdev, PCI_D3hot, 0);
5721 pci_enable_wake(pdev, PCI_D3cold, 0);
5722
5723 igb_reset(adapter);
5724 wr32(E1000_WUS, ~0);
5725 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08005726 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005727
Jeff Kirsherea943d42008-12-11 20:34:19 -08005728 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5729 if (err) {
5730 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5731 "failed 0x%0x\n", err);
5732 /* non-fatal, continue */
5733 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005734
Alexander Duyck40a914f2008-11-27 00:24:37 -08005735 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08005736}
5737
5738/**
5739 * igb_io_resume - called when traffic can start flowing again.
5740 * @pdev: Pointer to PCI device
5741 *
5742 * This callback is called when the error recovery driver tells us that
5743 * its OK to resume normal operation. Implementation resembles the
5744 * second-half of the igb_resume routine.
5745 */
5746static void igb_io_resume(struct pci_dev *pdev)
5747{
5748 struct net_device *netdev = pci_get_drvdata(pdev);
5749 struct igb_adapter *adapter = netdev_priv(netdev);
5750
Auke Kok9d5c8242008-01-24 02:22:38 -08005751 if (netif_running(netdev)) {
5752 if (igb_up(adapter)) {
5753 dev_err(&pdev->dev, "igb_up failed after reset\n");
5754 return;
5755 }
5756 }
5757
5758 netif_device_attach(netdev);
5759
5760 /* let the f/w know that the h/w is now under the control of the
5761 * driver. */
5762 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005763}
5764
Alexander Duyck26ad9172009-10-05 06:32:49 +00005765static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5766 u8 qsel)
5767{
5768 u32 rar_low, rar_high;
5769 struct e1000_hw *hw = &adapter->hw;
5770
5771 /* HW expects these in little endian so we reverse the byte order
5772 * from network order (big endian) to little endian
5773 */
5774 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5775 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5776 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5777
5778 /* Indicate to hardware the Address is Valid. */
5779 rar_high |= E1000_RAH_AV;
5780
5781 if (hw->mac.type == e1000_82575)
5782 rar_high |= E1000_RAH_POOL_1 * qsel;
5783 else
5784 rar_high |= E1000_RAH_POOL_1 << qsel;
5785
5786 wr32(E1000_RAL(index), rar_low);
5787 wrfl();
5788 wr32(E1000_RAH(index), rar_high);
5789 wrfl();
5790}
5791
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005792static int igb_set_vf_mac(struct igb_adapter *adapter,
5793 int vf, unsigned char *mac_addr)
5794{
5795 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005796 /* VF MAC addresses start at end of receive addresses and moves
5797 * torwards the first, as a result a collision should not be possible */
5798 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005799
Alexander Duyck37680112009-02-19 20:40:30 -08005800 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005801
Alexander Duyck26ad9172009-10-05 06:32:49 +00005802 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005803
5804 return 0;
5805}
5806
5807static void igb_vmm_control(struct igb_adapter *adapter)
5808{
5809 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00005810 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005811
Alexander Duyckd4960302009-10-27 15:53:45 +00005812 /* replication is not supported for 82575 */
5813 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005814 return;
5815
Alexander Duyck10d8e902009-10-27 15:54:04 +00005816 /* enable replication vlan tag stripping */
5817 reg = rd32(E1000_RPLOLR);
5818 reg |= E1000_RPLOLR_STRVLAN;
5819 wr32(E1000_RPLOLR, reg);
5820
5821 /* notify HW that the MAC is adding vlan tags */
5822 reg = rd32(E1000_DTXCTL);
5823 reg |= E1000_DTXCTL_VLAN_ADDED;
5824 wr32(E1000_DTXCTL, reg);
5825
Alexander Duyckd4960302009-10-27 15:53:45 +00005826 if (adapter->vfs_allocated_count) {
5827 igb_vmdq_set_loopback_pf(hw, true);
5828 igb_vmdq_set_replication_pf(hw, true);
5829 } else {
5830 igb_vmdq_set_loopback_pf(hw, false);
5831 igb_vmdq_set_replication_pf(hw, false);
5832 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005833}
5834
Auke Kok9d5c8242008-01-24 02:22:38 -08005835/* igb_main.c */