blob: 24a119ec005d13bdff3ae65d8308692d3cd78226 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck86d5d382009-02-06 23:23:12 +000052#define DRV_VERSION "1.3.16-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
Alexander Duyck2d064c02008-07-08 15:10:12 -070064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74 /* required last entry */
75 {0, }
76};
77
78MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
79
80void igb_reset(struct igb_adapter *);
81static int igb_setup_all_tx_resources(struct igb_adapter *);
82static int igb_setup_all_rx_resources(struct igb_adapter *);
83static void igb_free_all_tx_resources(struct igb_adapter *);
84static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000085static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080086void igb_update_stats(struct igb_adapter *);
87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
88static void __devexit igb_remove(struct pci_dev *pdev);
89static int igb_sw_init(struct igb_adapter *);
90static int igb_open(struct net_device *);
91static int igb_close(struct net_device *);
92static void igb_configure_tx(struct igb_adapter *);
93static void igb_configure_rx(struct igb_adapter *);
Alexander Duyck85b430b2009-10-27 15:50:29 +000094static void igb_setup_tctl(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080095static void igb_setup_rctl(struct igb_adapter *);
96static void igb_clean_all_tx_rings(struct igb_adapter *);
97static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -070098static void igb_clean_tx_ring(struct igb_ring *);
99static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000100static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800101static void igb_update_phy_info(unsigned long);
102static void igb_watchdog(unsigned long);
103static void igb_watchdog_task(struct work_struct *);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000104static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000105 struct igb_ring *);
106static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
107 struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800108static struct net_device_stats *igb_get_stats(struct net_device *);
109static int igb_change_mtu(struct net_device *, int);
110static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000111static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static irqreturn_t igb_intr(int irq, void *);
113static irqreturn_t igb_intr_msi(int irq, void *);
114static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000115static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700116#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700118static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700119#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000120static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700121static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000122static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700123static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800124static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
125static void igb_tx_timeout(struct net_device *);
126static void igb_reset_task(struct work_struct *);
127static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
128static void igb_vlan_rx_add_vid(struct net_device *, u16);
129static void igb_vlan_rx_kill_vid(struct net_device *, u16);
130static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000131static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800132static void igb_ping_all_vfs(struct igb_adapter *);
133static void igb_msg_task(struct igb_adapter *);
134static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800135static void igb_vmm_control(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800136static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
137static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800138
Eric Dumazetc8159b22009-07-13 11:11:41 -0700139static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
140{
141 u32 reg_data;
142
143 reg_data = rd32(E1000_VMOLR(vfn));
144 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
Eric Dumazetc8159b22009-07-13 11:11:41 -0700145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
149}
150
151static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
153{
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
156
Alexander Duyckae641bd2009-09-03 14:49:33 +0000157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
162
Eric Dumazetc8159b22009-07-13 11:11:41 -0700163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
167
168 return 0;
169}
170
Auke Kok9d5c8242008-01-24 02:22:38 -0800171#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000172static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800173static int igb_resume(struct pci_dev *);
174#endif
175static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700176#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700177static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
178static struct notifier_block dca_notifier = {
179 .notifier_call = igb_notify_dca,
180 .next = NULL,
181 .priority = 0
182};
183#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800184#ifdef CONFIG_NET_POLL_CONTROLLER
185/* for netdump / net console */
186static void igb_netpoll(struct net_device *);
187#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800188#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000189static unsigned int max_vfs = 0;
190module_param(max_vfs, uint, 0);
191MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
192 "per physical function");
193#endif /* CONFIG_PCI_IOV */
194
Auke Kok9d5c8242008-01-24 02:22:38 -0800195static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
196 pci_channel_state_t);
197static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
198static void igb_io_resume(struct pci_dev *);
199
200static struct pci_error_handlers igb_err_handler = {
201 .error_detected = igb_io_error_detected,
202 .slot_reset = igb_io_slot_reset,
203 .resume = igb_io_resume,
204};
205
206
207static struct pci_driver igb_driver = {
208 .name = igb_driver_name,
209 .id_table = igb_pci_tbl,
210 .probe = igb_probe,
211 .remove = __devexit_p(igb_remove),
212#ifdef CONFIG_PM
213 /* Power Managment Hooks */
214 .suspend = igb_suspend,
215 .resume = igb_resume,
216#endif
217 .shutdown = igb_shutdown,
218 .err_handler = &igb_err_handler
219};
220
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700221static int global_quad_port_a; /* global quad port a indication */
222
Auke Kok9d5c8242008-01-24 02:22:38 -0800223MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
224MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
225MODULE_LICENSE("GPL");
226MODULE_VERSION(DRV_VERSION);
227
Patrick Ohly38c845c2009-02-12 05:03:41 +0000228/**
229 * Scale the NIC clock cycle by a large factor so that
230 * relatively small clock corrections can be added or
231 * substracted at each clock tick. The drawbacks of a
232 * large factor are a) that the clock register overflows
233 * more quickly (not such a big deal) and b) that the
234 * increment per tick has to fit into 24 bits.
235 *
236 * Note that
237 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
238 * IGB_TSYNC_SCALE
239 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
240 *
241 * The base scale factor is intentionally a power of two
242 * so that the division in %struct timecounter can be done with
243 * a shift.
244 */
245#define IGB_TSYNC_SHIFT (19)
246#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
247
248/**
249 * The duration of one clock cycle of the NIC.
250 *
251 * @todo This hard-coded value is part of the specification and might change
252 * in future hardware revisions. Add revision check.
253 */
254#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
255
256#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
257# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
258#endif
259
260/**
261 * igb_read_clock - read raw cycle counter (to be used by time counter)
262 */
263static cycle_t igb_read_clock(const struct cyclecounter *tc)
264{
265 struct igb_adapter *adapter =
266 container_of(tc, struct igb_adapter, cycles);
267 struct e1000_hw *hw = &adapter->hw;
268 u64 stamp;
269
270 stamp = rd32(E1000_SYSTIML);
271 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
272
273 return stamp;
274}
275
Auke Kok9d5c8242008-01-24 02:22:38 -0800276#ifdef DEBUG
277/**
278 * igb_get_hw_dev_name - return device name string
279 * used by hardware layer to print debugging information
280 **/
281char *igb_get_hw_dev_name(struct e1000_hw *hw)
282{
283 struct igb_adapter *adapter = hw->back;
284 return adapter->netdev->name;
285}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000286
287/**
288 * igb_get_time_str - format current NIC and system time as string
289 */
290static char *igb_get_time_str(struct igb_adapter *adapter,
291 char buffer[160])
292{
293 cycle_t hw = adapter->cycles.read(&adapter->cycles);
294 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
295 struct timespec sys;
296 struct timespec delta;
297 getnstimeofday(&sys);
298
299 delta = timespec_sub(nic, sys);
300
301 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000302 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
303 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000304 (long)nic.tv_sec, nic.tv_nsec,
305 (long)sys.tv_sec, sys.tv_nsec,
306 (long)delta.tv_sec, delta.tv_nsec);
307
308 return buffer;
309}
Auke Kok9d5c8242008-01-24 02:22:38 -0800310#endif
311
312/**
Alexander Duyckc493ea42009-03-20 00:16:50 +0000313 * igb_desc_unused - calculate if we have unused descriptors
314 **/
315static int igb_desc_unused(struct igb_ring *ring)
316{
317 if (ring->next_to_clean > ring->next_to_use)
318 return ring->next_to_clean - ring->next_to_use - 1;
319
320 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
321}
322
323/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800324 * igb_init_module - Driver Registration Routine
325 *
326 * igb_init_module is the first routine called when the driver is
327 * loaded. All it does is register with the PCI subsystem.
328 **/
329static int __init igb_init_module(void)
330{
331 int ret;
332 printk(KERN_INFO "%s - version %s\n",
333 igb_driver_string, igb_driver_version);
334
335 printk(KERN_INFO "%s\n", igb_copyright);
336
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700337 global_quad_port_a = 0;
338
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700339#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700340 dca_register_notify(&dca_notifier);
341#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800342
343 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800344 return ret;
345}
346
347module_init(igb_init_module);
348
349/**
350 * igb_exit_module - Driver Exit Cleanup Routine
351 *
352 * igb_exit_module is called just before the driver is removed
353 * from memory.
354 **/
355static void __exit igb_exit_module(void)
356{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700357#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700358 dca_unregister_notify(&dca_notifier);
359#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800360 pci_unregister_driver(&igb_driver);
361}
362
363module_exit(igb_exit_module);
364
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800365#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
366/**
367 * igb_cache_ring_register - Descriptor ring to register mapping
368 * @adapter: board private structure to initialize
369 *
370 * Once we know the feature-set enabled for the device, we'll cache
371 * the register offset the descriptor ring is assigned to.
372 **/
373static void igb_cache_ring_register(struct igb_adapter *adapter)
374{
375 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000376 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800377
378 switch (adapter->hw.mac.type) {
379 case e1000_82576:
380 /* The queues are allocated for virtualization such that VF 0
381 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
382 * In order to avoid collision we start at the first free queue
383 * and continue consuming queues in the same sequence
384 */
385 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800386 adapter->rx_ring[i].reg_idx = rbase_offset +
387 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800388 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck1bfaf072009-02-19 20:39:23 -0800389 adapter->tx_ring[i].reg_idx = rbase_offset +
390 Q_IDX_82576(i);
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800391 break;
392 case e1000_82575:
393 default:
394 for (i = 0; i < adapter->num_rx_queues; i++)
395 adapter->rx_ring[i].reg_idx = i;
396 for (i = 0; i < adapter->num_tx_queues; i++)
397 adapter->tx_ring[i].reg_idx = i;
398 break;
399 }
400}
401
Alexander Duyck047e0032009-10-27 15:49:27 +0000402static void igb_free_queues(struct igb_adapter *adapter)
403{
404 kfree(adapter->tx_ring);
405 kfree(adapter->rx_ring);
406
407 adapter->tx_ring = NULL;
408 adapter->rx_ring = NULL;
409
410 adapter->num_rx_queues = 0;
411 adapter->num_tx_queues = 0;
412}
413
Auke Kok9d5c8242008-01-24 02:22:38 -0800414/**
415 * igb_alloc_queues - Allocate memory for all rings
416 * @adapter: board private structure to initialize
417 *
418 * We allocate one ring per queue at run-time since we don't know the
419 * number of queues at compile-time.
420 **/
421static int igb_alloc_queues(struct igb_adapter *adapter)
422{
423 int i;
424
425 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
426 sizeof(struct igb_ring), GFP_KERNEL);
427 if (!adapter->tx_ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000428 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -0800429
430 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
431 sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +0000432 if (!adapter->rx_ring)
433 goto err;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -0700434
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700435 for (i = 0; i < adapter->num_tx_queues; i++) {
436 struct igb_ring *ring = &(adapter->tx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800437 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700438 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000439 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000440 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000441 /* For 82575, context index must be unique per ring. */
442 if (adapter->hw.mac.type == e1000_82575)
443 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700444 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000445
Auke Kok9d5c8242008-01-24 02:22:38 -0800446 for (i = 0; i < adapter->num_rx_queues; i++) {
447 struct igb_ring *ring = &(adapter->rx_ring[i]);
Alexander Duyck68fd9912008-11-20 00:48:10 -0800448 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700449 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000450 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000451 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000452 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000453 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
454 /* set flag indicating ring supports SCTP checksum offload */
455 if (adapter->hw.mac.type >= e1000_82576)
456 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -0800457 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800458
459 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000460
Auke Kok9d5c8242008-01-24 02:22:38 -0800461 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800462
Alexander Duyck047e0032009-10-27 15:49:27 +0000463err:
464 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700465
Alexander Duyck047e0032009-10-27 15:49:27 +0000466 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700467}
468
Auke Kok9d5c8242008-01-24 02:22:38 -0800469#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000470static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800471{
472 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000473 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800474 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700475 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000476 int rx_queue = IGB_N0_QUEUE;
477 int tx_queue = IGB_N0_QUEUE;
478
479 if (q_vector->rx_ring)
480 rx_queue = q_vector->rx_ring->reg_idx;
481 if (q_vector->tx_ring)
482 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700483
484 switch (hw->mac.type) {
485 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800486 /* The 82575 assigns vectors using a bitmask, which matches the
487 bitmask for the EICR/EIMS/EIMC registers. To assign one
488 or more queues to a vector, we write the appropriate bits
489 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000490 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800491 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000492 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800493 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Auke Kok9d5c8242008-01-24 02:22:38 -0800494 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000495 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700496 break;
497 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800498 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700499 Each queue has a single entry in the table to which we write
500 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */
502 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000503 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700504 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000505 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800506 /* vector goes into low byte of register */
507 ivar = ivar & 0xFFFFFF00;
508 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000509 } else {
510 /* vector goes into third byte of register */
511 ivar = ivar & 0xFF00FFFF;
512 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700513 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700514 array_wr32(E1000_IVAR0, index, ivar);
515 }
516 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000517 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700518 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000519 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800520 /* vector goes into second byte of register */
521 ivar = ivar & 0xFFFF00FF;
522 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000523 } else {
524 /* vector goes into high byte of register */
525 ivar = ivar & 0x00FFFFFF;
526 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700527 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700528 array_wr32(E1000_IVAR0, index, ivar);
529 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000530 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700531 break;
532 default:
533 BUG();
534 break;
535 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800536}
537
538/**
539 * igb_configure_msix - Configure MSI-X hardware
540 *
541 * igb_configure_msix sets up the hardware to properly
542 * generate MSI-X interrupts.
543 **/
544static void igb_configure_msix(struct igb_adapter *adapter)
545{
546 u32 tmp;
547 int i, vector = 0;
548 struct e1000_hw *hw = &adapter->hw;
549
550 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800551
552 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700553 switch (hw->mac.type) {
554 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800555 tmp = rd32(E1000_CTRL_EXT);
556 /* enable MSI-X PBA support*/
557 tmp |= E1000_CTRL_EXT_PBA_CLR;
558
559 /* Auto-Mask interrupts upon ICR read. */
560 tmp |= E1000_CTRL_EXT_EIAME;
561 tmp |= E1000_CTRL_EXT_IRCA;
562
563 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000564
565 /* enable msix_other interrupt */
566 array_wr32(E1000_MSIXBM(0), vector++,
567 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700568 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800569
Alexander Duyck2d064c02008-07-08 15:10:12 -0700570 break;
571
572 case e1000_82576:
Alexander Duyck047e0032009-10-27 15:49:27 +0000573 /* Turn on MSI-X capability first, or our settings
574 * won't stick. And it will take days to debug. */
575 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
576 E1000_GPIE_PBA | E1000_GPIE_EIAME |
577 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700578
Alexander Duyck047e0032009-10-27 15:49:27 +0000579 /* enable msix_other interrupt */
580 adapter->eims_other = 1 << vector;
581 tmp = (vector++ | E1000_IVAR_VALID) << 8;
582
583 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700584 break;
585 default:
586 /* do nothing, since nothing else supports MSI-X */
587 break;
588 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000589
590 adapter->eims_enable_mask |= adapter->eims_other;
591
592 for (i = 0; i < adapter->num_q_vectors; i++) {
593 struct igb_q_vector *q_vector = adapter->q_vector[i];
594 igb_assign_vector(q_vector, vector++);
595 adapter->eims_enable_mask |= q_vector->eims_value;
596 }
597
Auke Kok9d5c8242008-01-24 02:22:38 -0800598 wrfl();
599}
600
601/**
602 * igb_request_msix - Initialize MSI-X interrupts
603 *
604 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
605 * kernel.
606 **/
607static int igb_request_msix(struct igb_adapter *adapter)
608{
609 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000610 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800611 int i, err = 0, vector = 0;
612
Auke Kok9d5c8242008-01-24 02:22:38 -0800613 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck047e0032009-10-27 15:49:27 +0000614 &igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800615 if (err)
616 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000617 vector++;
618
619 for (i = 0; i < adapter->num_q_vectors; i++) {
620 struct igb_q_vector *q_vector = adapter->q_vector[i];
621
622 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
623
624 if (q_vector->rx_ring && q_vector->tx_ring)
625 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
626 q_vector->rx_ring->queue_index);
627 else if (q_vector->tx_ring)
628 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
629 q_vector->tx_ring->queue_index);
630 else if (q_vector->rx_ring)
631 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
632 q_vector->rx_ring->queue_index);
633 else
634 sprintf(q_vector->name, "%s-unused", netdev->name);
635
636 err = request_irq(adapter->msix_entries[vector].vector,
637 &igb_msix_ring, 0, q_vector->name,
638 q_vector);
639 if (err)
640 goto out;
641 vector++;
642 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800643
Auke Kok9d5c8242008-01-24 02:22:38 -0800644 igb_configure_msix(adapter);
645 return 0;
646out:
647 return err;
648}
649
650static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
651{
652 if (adapter->msix_entries) {
653 pci_disable_msix(adapter->pdev);
654 kfree(adapter->msix_entries);
655 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000656 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800657 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000658 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800659}
660
Alexander Duyck047e0032009-10-27 15:49:27 +0000661/**
662 * igb_free_q_vectors - Free memory allocated for interrupt vectors
663 * @adapter: board private structure to initialize
664 *
665 * This function frees the memory allocated to the q_vectors. In addition if
666 * NAPI is enabled it will delete any references to the NAPI struct prior
667 * to freeing the q_vector.
668 **/
669static void igb_free_q_vectors(struct igb_adapter *adapter)
670{
671 int v_idx;
672
673 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
674 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
675 adapter->q_vector[v_idx] = NULL;
676 netif_napi_del(&q_vector->napi);
677 kfree(q_vector);
678 }
679 adapter->num_q_vectors = 0;
680}
681
682/**
683 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
684 *
685 * This function resets the device so that it has 0 rx queues, tx queues, and
686 * MSI-X interrupts allocated.
687 */
688static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
689{
690 igb_free_queues(adapter);
691 igb_free_q_vectors(adapter);
692 igb_reset_interrupt_capability(adapter);
693}
Auke Kok9d5c8242008-01-24 02:22:38 -0800694
695/**
696 * igb_set_interrupt_capability - set MSI or MSI-X if supported
697 *
698 * Attempt to configure interrupts using the best available
699 * capabilities of the hardware and kernel.
700 **/
701static void igb_set_interrupt_capability(struct igb_adapter *adapter)
702{
703 int err;
704 int numvecs, i;
705
Alexander Duyck83b71802009-02-06 23:15:45 +0000706 /* Number of supported queues. */
Alexander Duyck83b71802009-02-06 23:15:45 +0000707 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
708 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
709
Alexander Duyck047e0032009-10-27 15:49:27 +0000710 /* start with one vector for every rx queue */
711 numvecs = adapter->num_rx_queues;
712
713 /* if tx handler is seperate add 1 for every tx queue */
714 numvecs += adapter->num_tx_queues;
715
716 /* store the number of vectors reserved for queues */
717 adapter->num_q_vectors = numvecs;
718
719 /* add 1 vector for link status interrupts */
720 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800721 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
722 GFP_KERNEL);
723 if (!adapter->msix_entries)
724 goto msi_only;
725
726 for (i = 0; i < numvecs; i++)
727 adapter->msix_entries[i].entry = i;
728
729 err = pci_enable_msix(adapter->pdev,
730 adapter->msix_entries,
731 numvecs);
732 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700733 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800734
735 igb_reset_interrupt_capability(adapter);
736
737 /* If we can't do MSI-X, try MSI */
738msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000739#ifdef CONFIG_PCI_IOV
740 /* disable SR-IOV for non MSI-X configurations */
741 if (adapter->vf_data) {
742 struct e1000_hw *hw = &adapter->hw;
743 /* disable iov and allow time for transactions to clear */
744 pci_disable_sriov(adapter->pdev);
745 msleep(500);
746
747 kfree(adapter->vf_data);
748 adapter->vf_data = NULL;
749 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
750 msleep(100);
751 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
752 }
753#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800754 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700755 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000756 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800757 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700758 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700759out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700760 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700761 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800762 return;
763}
764
765/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000766 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
767 * @adapter: board private structure to initialize
768 *
769 * We allocate one q_vector per queue interrupt. If allocation fails we
770 * return -ENOMEM.
771 **/
772static int igb_alloc_q_vectors(struct igb_adapter *adapter)
773{
774 struct igb_q_vector *q_vector;
775 struct e1000_hw *hw = &adapter->hw;
776 int v_idx;
777
778 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
779 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
780 if (!q_vector)
781 goto err_out;
782 q_vector->adapter = adapter;
783 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
784 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
785 q_vector->itr_val = IGB_START_ITR;
786 q_vector->set_itr = 1;
787 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
788 adapter->q_vector[v_idx] = q_vector;
789 }
790 return 0;
791
792err_out:
793 while (v_idx) {
794 v_idx--;
795 q_vector = adapter->q_vector[v_idx];
796 netif_napi_del(&q_vector->napi);
797 kfree(q_vector);
798 adapter->q_vector[v_idx] = NULL;
799 }
800 return -ENOMEM;
801}
802
803static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
804 int ring_idx, int v_idx)
805{
806 struct igb_q_vector *q_vector;
807
808 q_vector = adapter->q_vector[v_idx];
809 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
810 q_vector->rx_ring->q_vector = q_vector;
811 q_vector->itr_val = adapter->itr;
812}
813
814static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
815 int ring_idx, int v_idx)
816{
817 struct igb_q_vector *q_vector;
818
819 q_vector = adapter->q_vector[v_idx];
820 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
821 q_vector->tx_ring->q_vector = q_vector;
822 q_vector->itr_val = adapter->itr;
823}
824
825/**
826 * igb_map_ring_to_vector - maps allocated queues to vectors
827 *
828 * This function maps the recently allocated queues to vectors.
829 **/
830static int igb_map_ring_to_vector(struct igb_adapter *adapter)
831{
832 int i;
833 int v_idx = 0;
834
835 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
836 (adapter->num_q_vectors < adapter->num_tx_queues))
837 return -ENOMEM;
838
839 if (adapter->num_q_vectors >=
840 (adapter->num_rx_queues + adapter->num_tx_queues)) {
841 for (i = 0; i < adapter->num_rx_queues; i++)
842 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
843 for (i = 0; i < adapter->num_tx_queues; i++)
844 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
845 } else {
846 for (i = 0; i < adapter->num_rx_queues; i++) {
847 if (i < adapter->num_tx_queues)
848 igb_map_tx_ring_to_vector(adapter, i, v_idx);
849 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
850 }
851 for (; i < adapter->num_tx_queues; i++)
852 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
853 }
854 return 0;
855}
856
857/**
858 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
859 *
860 * This function initializes the interrupts and allocates all of the queues.
861 **/
862static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
863{
864 struct pci_dev *pdev = adapter->pdev;
865 int err;
866
867 igb_set_interrupt_capability(adapter);
868
869 err = igb_alloc_q_vectors(adapter);
870 if (err) {
871 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
872 goto err_alloc_q_vectors;
873 }
874
875 err = igb_alloc_queues(adapter);
876 if (err) {
877 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
878 goto err_alloc_queues;
879 }
880
881 err = igb_map_ring_to_vector(adapter);
882 if (err) {
883 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
884 goto err_map_queues;
885 }
886
887
888 return 0;
889err_map_queues:
890 igb_free_queues(adapter);
891err_alloc_queues:
892 igb_free_q_vectors(adapter);
893err_alloc_q_vectors:
894 igb_reset_interrupt_capability(adapter);
895 return err;
896}
897
898/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800899 * igb_request_irq - initialize interrupts
900 *
901 * Attempts to configure interrupts using the best available
902 * capabilities of the hardware and kernel.
903 **/
904static int igb_request_irq(struct igb_adapter *adapter)
905{
906 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000907 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800908 struct e1000_hw *hw = &adapter->hw;
909 int err = 0;
910
911 if (adapter->msix_entries) {
912 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700913 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800914 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800915 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000916 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800917 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700918 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800919 igb_free_all_tx_resources(adapter);
920 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000921 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800922 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000923 adapter->num_q_vectors = 1;
924 err = igb_alloc_q_vectors(adapter);
925 if (err) {
926 dev_err(&pdev->dev,
927 "Unable to allocate memory for vectors\n");
928 goto request_done;
929 }
930 err = igb_alloc_queues(adapter);
931 if (err) {
932 dev_err(&pdev->dev,
933 "Unable to allocate memory for queues\n");
934 igb_free_q_vectors(adapter);
935 goto request_done;
936 }
937 igb_setup_all_tx_resources(adapter);
938 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700939 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -0700940 switch (hw->mac.type) {
941 case e1000_82575:
942 wr32(E1000_MSIXBM(0),
Alexander Duyck047e0032009-10-27 15:49:27 +0000943 (E1000_EICR_RX_QUEUE0 |
944 E1000_EICR_TX_QUEUE0 |
945 E1000_EIMS_OTHER));
Alexander Duyck2d064c02008-07-08 15:10:12 -0700946 break;
947 case e1000_82576:
948 wr32(E1000_IVAR0, E1000_IVAR_VALID);
949 break;
950 default:
951 break;
952 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800953 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700954
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700955 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800956 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800958 if (!err)
959 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000960
Auke Kok9d5c8242008-01-24 02:22:38 -0800961 /* fall back to legacy interrupts */
962 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700963 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800964 }
965
966 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800968
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800969 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800970 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
971 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800972
973request_done:
974 return err;
975}
976
977static void igb_free_irq(struct igb_adapter *adapter)
978{
Auke Kok9d5c8242008-01-24 02:22:38 -0800979 if (adapter->msix_entries) {
980 int vector = 0, i;
981
Alexander Duyck047e0032009-10-27 15:49:27 +0000982 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800983
Alexander Duyck047e0032009-10-27 15:49:27 +0000984 for (i = 0; i < adapter->num_q_vectors; i++) {
985 struct igb_q_vector *q_vector = adapter->q_vector[i];
986 free_irq(adapter->msix_entries[vector++].vector,
987 q_vector);
988 }
989 } else {
990 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800991 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800992}
993
994/**
995 * igb_irq_disable - Mask off interrupt generation on the NIC
996 * @adapter: board private structure
997 **/
998static void igb_irq_disable(struct igb_adapter *adapter)
999{
1000 struct e1000_hw *hw = &adapter->hw;
1001
1002 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001003 u32 regval = rd32(E1000_EIAM);
1004 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1005 wr32(E1000_EIMC, adapter->eims_enable_mask);
1006 regval = rd32(E1000_EIAC);
1007 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001008 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001009
1010 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001011 wr32(E1000_IMC, ~0);
1012 wrfl();
1013 synchronize_irq(adapter->pdev->irq);
1014}
1015
1016/**
1017 * igb_irq_enable - Enable default interrupt generation settings
1018 * @adapter: board private structure
1019 **/
1020static void igb_irq_enable(struct igb_adapter *adapter)
1021{
1022 struct e1000_hw *hw = &adapter->hw;
1023
1024 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001025 u32 regval = rd32(E1000_EIAC);
1026 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1027 regval = rd32(E1000_EIAM);
1028 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001029 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001030 if (adapter->vfs_allocated_count)
1031 wr32(E1000_MBVFIMR, 0xFF);
1032 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
1033 E1000_IMS_DOUTSYNC));
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001034 } else {
1035 wr32(E1000_IMS, IMS_ENABLE_MASK);
1036 wr32(E1000_IAM, IMS_ENABLE_MASK);
1037 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001038}
1039
1040static void igb_update_mng_vlan(struct igb_adapter *adapter)
1041{
1042 struct net_device *netdev = adapter->netdev;
1043 u16 vid = adapter->hw.mng_cookie.vlan_id;
1044 u16 old_vid = adapter->mng_vlan_id;
1045 if (adapter->vlgrp) {
1046 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1047 if (adapter->hw.mng_cookie.status &
1048 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1049 igb_vlan_rx_add_vid(netdev, vid);
1050 adapter->mng_vlan_id = vid;
1051 } else
1052 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1053
1054 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1055 (vid != old_vid) &&
1056 !vlan_group_get_device(adapter->vlgrp, old_vid))
1057 igb_vlan_rx_kill_vid(netdev, old_vid);
1058 } else
1059 adapter->mng_vlan_id = vid;
1060 }
1061}
1062
1063/**
1064 * igb_release_hw_control - release control of the h/w to f/w
1065 * @adapter: address of board private structure
1066 *
1067 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1068 * For ASF and Pass Through versions of f/w this means that the
1069 * driver is no longer loaded.
1070 *
1071 **/
1072static void igb_release_hw_control(struct igb_adapter *adapter)
1073{
1074 struct e1000_hw *hw = &adapter->hw;
1075 u32 ctrl_ext;
1076
1077 /* Let firmware take over control of h/w */
1078 ctrl_ext = rd32(E1000_CTRL_EXT);
1079 wr32(E1000_CTRL_EXT,
1080 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1081}
1082
1083
1084/**
1085 * igb_get_hw_control - get control of the h/w from f/w
1086 * @adapter: address of board private structure
1087 *
1088 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1089 * For ASF and Pass Through versions of f/w this means that
1090 * the driver is loaded.
1091 *
1092 **/
1093static void igb_get_hw_control(struct igb_adapter *adapter)
1094{
1095 struct e1000_hw *hw = &adapter->hw;
1096 u32 ctrl_ext;
1097
1098 /* Let firmware know the driver has taken over */
1099 ctrl_ext = rd32(E1000_CTRL_EXT);
1100 wr32(E1000_CTRL_EXT,
1101 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1102}
1103
Auke Kok9d5c8242008-01-24 02:22:38 -08001104/**
1105 * igb_configure - configure the hardware for RX and TX
1106 * @adapter: private board structure
1107 **/
1108static void igb_configure(struct igb_adapter *adapter)
1109{
1110 struct net_device *netdev = adapter->netdev;
1111 int i;
1112
1113 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001114 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001115
1116 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001117
Alexander Duyck85b430b2009-10-27 15:50:29 +00001118 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001119 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001120 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001121
1122 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001123 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001124
1125 igb_rx_fifo_flush_82575(&adapter->hw);
1126
Alexander Duyckc493ea42009-03-20 00:16:50 +00001127 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001128 * at least 1 descriptor unused to make sure
1129 * next_to_use != next_to_clean */
1130 for (i = 0; i < adapter->num_rx_queues; i++) {
1131 struct igb_ring *ring = &adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001132 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001133 }
1134
1135
1136 adapter->tx_queue_len = netdev->tx_queue_len;
1137}
1138
1139
1140/**
1141 * igb_up - Open the interface and prepare it to handle traffic
1142 * @adapter: board private structure
1143 **/
1144
1145int igb_up(struct igb_adapter *adapter)
1146{
1147 struct e1000_hw *hw = &adapter->hw;
1148 int i;
1149
1150 /* hardware has been reset, we need to reload some things */
1151 igb_configure(adapter);
1152
1153 clear_bit(__IGB_DOWN, &adapter->state);
1154
Alexander Duyck047e0032009-10-27 15:49:27 +00001155 for (i = 0; i < adapter->num_q_vectors; i++) {
1156 struct igb_q_vector *q_vector = adapter->q_vector[i];
1157 napi_enable(&q_vector->napi);
1158 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001159 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001160 igb_configure_msix(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001161
Alexander Duycke1739522009-02-19 20:39:44 -08001162 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1163
Auke Kok9d5c8242008-01-24 02:22:38 -08001164 /* Clear any pending interrupts. */
1165 rd32(E1000_ICR);
1166 igb_irq_enable(adapter);
1167
Alexander Duyckd4960302009-10-27 15:53:45 +00001168 /* notify VFs that reset has been completed */
1169 if (adapter->vfs_allocated_count) {
1170 u32 reg_data = rd32(E1000_CTRL_EXT);
1171 reg_data |= E1000_CTRL_EXT_PFRSTD;
1172 wr32(E1000_CTRL_EXT, reg_data);
1173 }
1174
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001175 netif_tx_start_all_queues(adapter->netdev);
1176
Auke Kok9d5c8242008-01-24 02:22:38 -08001177 /* Fire a link change interrupt to start the watchdog. */
1178 wr32(E1000_ICS, E1000_ICS_LSC);
1179 return 0;
1180}
1181
1182void igb_down(struct igb_adapter *adapter)
1183{
1184 struct e1000_hw *hw = &adapter->hw;
1185 struct net_device *netdev = adapter->netdev;
1186 u32 tctl, rctl;
1187 int i;
1188
1189 /* signal that we're down so the interrupt handler does not
1190 * reschedule our watchdog timer */
1191 set_bit(__IGB_DOWN, &adapter->state);
1192
1193 /* disable receives in the hardware */
1194 rctl = rd32(E1000_RCTL);
1195 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1196 /* flush and sleep below */
1197
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001198 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001199
1200 /* disable transmits in the hardware */
1201 tctl = rd32(E1000_TCTL);
1202 tctl &= ~E1000_TCTL_EN;
1203 wr32(E1000_TCTL, tctl);
1204 /* flush both disables and wait for them to finish */
1205 wrfl();
1206 msleep(10);
1207
Alexander Duyck047e0032009-10-27 15:49:27 +00001208 for (i = 0; i < adapter->num_q_vectors; i++) {
1209 struct igb_q_vector *q_vector = adapter->q_vector[i];
1210 napi_disable(&q_vector->napi);
1211 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001212
Auke Kok9d5c8242008-01-24 02:22:38 -08001213 igb_irq_disable(adapter);
1214
1215 del_timer_sync(&adapter->watchdog_timer);
1216 del_timer_sync(&adapter->phy_info_timer);
1217
1218 netdev->tx_queue_len = adapter->tx_queue_len;
1219 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001220
1221 /* record the stats before reset*/
1222 igb_update_stats(adapter);
1223
Auke Kok9d5c8242008-01-24 02:22:38 -08001224 adapter->link_speed = 0;
1225 adapter->link_duplex = 0;
1226
Jeff Kirsher30236822008-06-24 17:01:15 -07001227 if (!pci_channel_offline(adapter->pdev))
1228 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001229 igb_clean_all_tx_rings(adapter);
1230 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001231#ifdef CONFIG_IGB_DCA
1232
1233 /* since we reset the hardware DCA settings were cleared */
1234 igb_setup_dca(adapter);
1235#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001236}
1237
1238void igb_reinit_locked(struct igb_adapter *adapter)
1239{
1240 WARN_ON(in_interrupt());
1241 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1242 msleep(1);
1243 igb_down(adapter);
1244 igb_up(adapter);
1245 clear_bit(__IGB_RESETTING, &adapter->state);
1246}
1247
1248void igb_reset(struct igb_adapter *adapter)
1249{
1250 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001251 struct e1000_mac_info *mac = &hw->mac;
1252 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001253 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1254 u16 hwm;
1255
1256 /* Repartition Pba for greater than 9k mtu
1257 * To take effect CTRL.RST is required.
1258 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001259 switch (mac->type) {
1260 case e1000_82576:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001261 pba = E1000_PBA_64K;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001262 break;
1263 case e1000_82575:
1264 default:
1265 pba = E1000_PBA_34K;
1266 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001267 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001268
Alexander Duyck2d064c02008-07-08 15:10:12 -07001269 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1270 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 /* adjust PBA for jumbo frames */
1272 wr32(E1000_PBA, pba);
1273
1274 /* To maintain wire speed transmits, the Tx FIFO should be
1275 * large enough to accommodate two full transmit packets,
1276 * rounded up to the next 1KB and expressed in KB. Likewise,
1277 * the Rx FIFO should be large enough to accommodate at least
1278 * one full receive packet and is similarly rounded up and
1279 * expressed in KB. */
1280 pba = rd32(E1000_PBA);
1281 /* upper 16 bits has Tx packet buffer allocation size in KB */
1282 tx_space = pba >> 16;
1283 /* lower 16 bits has Rx packet buffer allocation size in KB */
1284 pba &= 0xffff;
1285 /* the tx fifo also stores 16 bytes of information about the tx
1286 * but don't include ethernet FCS because hardware appends it */
1287 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001288 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001289 ETH_FCS_LEN) * 2;
1290 min_tx_space = ALIGN(min_tx_space, 1024);
1291 min_tx_space >>= 10;
1292 /* software strips receive CRC, so leave room for it */
1293 min_rx_space = adapter->max_frame_size;
1294 min_rx_space = ALIGN(min_rx_space, 1024);
1295 min_rx_space >>= 10;
1296
1297 /* If current Tx allocation is less than the min Tx FIFO size,
1298 * and the min Tx FIFO size is less than the current Rx FIFO
1299 * allocation, take space away from current Rx allocation */
1300 if (tx_space < min_tx_space &&
1301 ((min_tx_space - tx_space) < pba)) {
1302 pba = pba - (min_tx_space - tx_space);
1303
1304 /* if short on rx space, rx wins and must trump tx
1305 * adjustment */
1306 if (pba < min_rx_space)
1307 pba = min_rx_space;
1308 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001309 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001310 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001311
1312 /* flow control settings */
1313 /* The high water mark must be low enough to fit one full frame
1314 * (or the size used for early receive) above it in the Rx FIFO.
1315 * Set it to the lower of:
1316 * - 90% of the Rx FIFO size, or
1317 * - the full Rx FIFO size minus one full frame */
1318 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001319 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001320
Alexander Duyck2d064c02008-07-08 15:10:12 -07001321 if (mac->type < e1000_82576) {
1322 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1323 fc->low_water = fc->high_water - 8;
1324 } else {
1325 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1326 fc->low_water = fc->high_water - 16;
1327 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001328 fc->pause_time = 0xFFFF;
1329 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001330 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001331
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001332 /* disable receive for all VFs and wait one second */
1333 if (adapter->vfs_allocated_count) {
1334 int i;
1335 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1336 adapter->vf_data[i].clear_to_send = false;
1337
1338 /* ping all the active vfs to let them know we are going down */
1339 igb_ping_all_vfs(adapter);
1340
1341 /* disable transmits and receives */
1342 wr32(E1000_VFRE, 0);
1343 wr32(E1000_VFTE, 0);
1344 }
1345
Auke Kok9d5c8242008-01-24 02:22:38 -08001346 /* Allow time for pending master requests to run */
1347 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1348 wr32(E1000_WUC, 0);
1349
1350 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1351 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1352
1353 igb_update_mng_vlan(adapter);
1354
1355 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1356 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1357
1358 igb_reset_adaptive(&adapter->hw);
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001359 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001360}
1361
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001362static const struct net_device_ops igb_netdev_ops = {
1363 .ndo_open = igb_open,
1364 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001365 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001366 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001367 .ndo_set_rx_mode = igb_set_rx_mode,
1368 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001369 .ndo_set_mac_address = igb_set_mac,
1370 .ndo_change_mtu = igb_change_mtu,
1371 .ndo_do_ioctl = igb_ioctl,
1372 .ndo_tx_timeout = igb_tx_timeout,
1373 .ndo_validate_addr = eth_validate_addr,
1374 .ndo_vlan_rx_register = igb_vlan_rx_register,
1375 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1376 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1377#ifdef CONFIG_NET_POLL_CONTROLLER
1378 .ndo_poll_controller = igb_netpoll,
1379#endif
1380};
1381
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001382/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001383 * igb_probe - Device Initialization Routine
1384 * @pdev: PCI device information struct
1385 * @ent: entry in igb_pci_tbl
1386 *
1387 * Returns 0 on success, negative on failure
1388 *
1389 * igb_probe initializes an adapter identified by a pci_dev structure.
1390 * The OS initialization, configuring of the adapter private structure,
1391 * and a hardware reset occur.
1392 **/
1393static int __devinit igb_probe(struct pci_dev *pdev,
1394 const struct pci_device_id *ent)
1395{
1396 struct net_device *netdev;
1397 struct igb_adapter *adapter;
1398 struct e1000_hw *hw;
1399 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1400 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001401 int err, pci_using_dac;
Alexander Duyck682337f2009-03-14 22:26:40 -07001402 u16 eeprom_data = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08001403 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1404 u32 part_num;
1405
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001406 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001407 if (err)
1408 return err;
1409
1410 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001411 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001412 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001413 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001414 if (!err)
1415 pci_using_dac = 1;
1416 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001417 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001418 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001419 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001420 if (err) {
1421 dev_err(&pdev->dev, "No usable DMA "
1422 "configuration, aborting\n");
1423 goto err_dma;
1424 }
1425 }
1426 }
1427
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001428 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1429 IORESOURCE_MEM),
1430 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001431 if (err)
1432 goto err_pci_reg;
1433
Frans Pop19d5afd2009-10-02 10:04:12 -07001434 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001435
Auke Kok9d5c8242008-01-24 02:22:38 -08001436 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001437 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001438
1439 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001440 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1441 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001442 if (!netdev)
1443 goto err_alloc_etherdev;
1444
1445 SET_NETDEV_DEV(netdev, &pdev->dev);
1446
1447 pci_set_drvdata(pdev, netdev);
1448 adapter = netdev_priv(netdev);
1449 adapter->netdev = netdev;
1450 adapter->pdev = pdev;
1451 hw = &adapter->hw;
1452 hw->back = adapter;
1453 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1454
1455 mmio_start = pci_resource_start(pdev, 0);
1456 mmio_len = pci_resource_len(pdev, 0);
1457
1458 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001459 hw->hw_addr = ioremap(mmio_start, mmio_len);
1460 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001461 goto err_ioremap;
1462
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001463 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001464 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001465 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001466
1467 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1468
1469 netdev->mem_start = mmio_start;
1470 netdev->mem_end = mmio_start + mmio_len;
1471
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 /* PCI config space info */
1473 hw->vendor_id = pdev->vendor;
1474 hw->device_id = pdev->device;
1475 hw->revision_id = pdev->revision;
1476 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1477 hw->subsystem_device_id = pdev->subsystem_device;
1478
1479 /* setup the private structure */
1480 hw->back = adapter;
1481 /* Copy the default MAC, PHY and NVM function pointers */
1482 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1483 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1484 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1485 /* Initialize skew-specific constants */
1486 err = ei->get_invariants(hw);
1487 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001488 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001489
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001490#ifdef CONFIG_PCI_IOV
1491 /* since iov functionality isn't critical to base device function we
1492 * can accept failure. If it fails we don't allow iov to be enabled */
1493 if (hw->mac.type == e1000_82576) {
1494 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1495 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1496 int i;
1497 unsigned char mac_addr[ETH_ALEN];
1498
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001499 if (num_vfs) {
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001500 adapter->vf_data = kcalloc(num_vfs,
1501 sizeof(struct vf_data_storage),
1502 GFP_KERNEL);
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001503 if (!adapter->vf_data) {
1504 dev_err(&pdev->dev,
1505 "Could not allocate VF private data - "
1506 "IOV enable failed\n");
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001507 } else {
Alexander Duyck9ca046d2009-04-09 22:49:39 +00001508 err = pci_enable_sriov(pdev, num_vfs);
1509 if (!err) {
1510 adapter->vfs_allocated_count = num_vfs;
1511 dev_info(&pdev->dev,
1512 "%d vfs allocated\n",
1513 num_vfs);
1514 for (i = 0;
1515 i < adapter->vfs_allocated_count;
1516 i++) {
1517 random_ether_addr(mac_addr);
1518 igb_set_vf_mac(adapter, i,
1519 mac_addr);
1520 }
1521 } else {
1522 kfree(adapter->vf_data);
1523 adapter->vf_data = NULL;
1524 }
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001525 }
1526 }
1527 }
1528
1529#endif
Alexander Duyck450c87c2009-02-06 23:22:11 +00001530 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001531 err = igb_sw_init(adapter);
1532 if (err)
1533 goto err_sw_init;
1534
1535 igb_get_bus_info_pcie(hw);
1536
1537 hw->phy.autoneg_wait_to_complete = false;
1538 hw->mac.adaptive_ifs = true;
1539
1540 /* Copper options */
1541 if (hw->phy.media_type == e1000_media_type_copper) {
1542 hw->phy.mdix = AUTO_ALL_MODES;
1543 hw->phy.disable_polarity_correction = false;
1544 hw->phy.ms_type = e1000_ms_hw_default;
1545 }
1546
1547 if (igb_check_reset_block(hw))
1548 dev_info(&pdev->dev,
1549 "PHY reset is blocked due to SOL/IDER session.\n");
1550
1551 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001552 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001553 NETIF_F_HW_VLAN_TX |
1554 NETIF_F_HW_VLAN_RX |
1555 NETIF_F_HW_VLAN_FILTER;
1556
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001557 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001558 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 netdev->features |= NETIF_F_TSO6;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001560
Herbert Xu5c0999b2009-01-19 15:20:57 -08001561 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001562
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001563 netdev->vlan_features |= NETIF_F_TSO;
1564 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001565 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001566 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001567 netdev->vlan_features |= NETIF_F_SG;
1568
Auke Kok9d5c8242008-01-24 02:22:38 -08001569 if (pci_using_dac)
1570 netdev->features |= NETIF_F_HIGHDMA;
1571
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001572 if (adapter->hw.mac.type == e1000_82576)
1573 netdev->features |= NETIF_F_SCTP_CSUM;
1574
Auke Kok9d5c8242008-01-24 02:22:38 -08001575 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1576
1577 /* before reading the NVM, reset the controller to put the device in a
1578 * known good starting state */
1579 hw->mac.ops.reset_hw(hw);
1580
1581 /* make sure the NVM is good */
1582 if (igb_validate_nvm_checksum(hw) < 0) {
1583 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1584 err = -EIO;
1585 goto err_eeprom;
1586 }
1587
1588 /* copy the MAC address out of the NVM */
1589 if (hw->mac.ops.read_mac_addr(hw))
1590 dev_err(&pdev->dev, "NVM Read Error\n");
1591
1592 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1593 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1594
1595 if (!is_valid_ether_addr(netdev->perm_addr)) {
1596 dev_err(&pdev->dev, "Invalid MAC Address\n");
1597 err = -EIO;
1598 goto err_eeprom;
1599 }
1600
Alexander Duyck0e340482009-03-20 00:17:08 +00001601 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1602 (unsigned long) adapter);
1603 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1604 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001605
1606 INIT_WORK(&adapter->reset_task, igb_reset_task);
1607 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1608
Alexander Duyck450c87c2009-02-06 23:22:11 +00001609 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001610 adapter->fc_autoneg = true;
1611 hw->mac.autoneg = true;
1612 hw->phy.autoneg_advertised = 0x2f;
1613
Alexander Duyck0cce1192009-07-23 18:10:24 +00001614 hw->fc.requested_mode = e1000_fc_default;
1615 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001616
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001617 adapter->itr_setting = IGB_DEFAULT_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08001618 adapter->itr = IGB_START_ITR;
1619
1620 igb_validate_mdi_setting(hw);
1621
Auke Kok9d5c8242008-01-24 02:22:38 -08001622 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1623 * enable the ACPI Magic Packet filter
1624 */
1625
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001626 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001627 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001628 else if (hw->bus.func == 1)
1629 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001630
1631 if (eeprom_data & eeprom_apme_mask)
1632 adapter->eeprom_wol |= E1000_WUFC_MAG;
1633
1634 /* now that we have the eeprom settings, apply the special cases where
1635 * the eeprom may be wrong or the board simply won't support wake on
1636 * lan on a particular port */
1637 switch (pdev->device) {
1638 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1639 adapter->eeprom_wol = 0;
1640 break;
1641 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001642 case E1000_DEV_ID_82576_FIBER:
1643 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001644 /* Wake events only supported on port A for dual fiber
1645 * regardless of eeprom setting */
1646 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1647 adapter->eeprom_wol = 0;
1648 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001649 case E1000_DEV_ID_82576_QUAD_COPPER:
1650 /* if quad port adapter, disable WoL on all but port A */
1651 if (global_quad_port_a != 0)
1652 adapter->eeprom_wol = 0;
1653 else
1654 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1655 /* Reset for multiple quad port adapters */
1656 if (++global_quad_port_a == 4)
1657 global_quad_port_a = 0;
1658 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001659 }
1660
1661 /* initialize the wol settings based on the eeprom settings */
1662 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001663 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001664
1665 /* reset the hardware with the new settings */
1666 igb_reset(adapter);
1667
1668 /* let the f/w know that the h/w is now under the control of the
1669 * driver. */
1670 igb_get_hw_control(adapter);
1671
Auke Kok9d5c8242008-01-24 02:22:38 -08001672 strcpy(netdev->name, "eth%d");
1673 err = register_netdev(netdev);
1674 if (err)
1675 goto err_register;
1676
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001677 /* carrier off reporting is important to ethtool even BEFORE open */
1678 netif_carrier_off(netdev);
1679
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001680#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001681 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001682 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001683 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001684 igb_setup_dca(adapter);
1685 }
1686#endif
1687
Patrick Ohly38c845c2009-02-12 05:03:41 +00001688 /*
1689 * Initialize hardware timer: we keep it running just in case
1690 * that some program needs it later on.
1691 */
1692 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1693 adapter->cycles.read = igb_read_clock;
1694 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1695 adapter->cycles.mult = 1;
1696 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1697 wr32(E1000_TIMINCA,
1698 (1<<24) |
1699 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1700#if 0
1701 /*
1702 * Avoid rollover while we initialize by resetting the time counter.
1703 */
1704 wr32(E1000_SYSTIML, 0x00000000);
1705 wr32(E1000_SYSTIMH, 0x00000000);
1706#else
1707 /*
1708 * Set registers so that rollover occurs soon to test this.
1709 */
1710 wr32(E1000_SYSTIML, 0x00000000);
1711 wr32(E1000_SYSTIMH, 0xFF800000);
1712#endif
1713 wrfl();
1714 timecounter_init(&adapter->clock,
1715 &adapter->cycles,
1716 ktime_to_ns(ktime_get_real()));
1717
Patrick Ohly33af6bc2009-02-12 05:03:43 +00001718 /*
1719 * Synchronize our NIC clock against system wall clock. NIC
1720 * time stamp reading requires ~3us per sample, each sample
1721 * was pretty stable even under load => only require 10
1722 * samples for each offset comparison.
1723 */
1724 memset(&adapter->compare, 0, sizeof(adapter->compare));
1725 adapter->compare.source = &adapter->clock;
1726 adapter->compare.target = ktime_get_real;
1727 adapter->compare.num_samples = 10;
1728 timecompare_update(&adapter->compare, 0);
1729
Patrick Ohly38c845c2009-02-12 05:03:41 +00001730#ifdef DEBUG
1731 {
1732 char buffer[160];
1733 printk(KERN_DEBUG
1734 "igb: %s: hw %p initialized timer\n",
1735 igb_get_time_str(adapter, buffer),
1736 &adapter->hw);
1737 }
1738#endif
1739
Auke Kok9d5c8242008-01-24 02:22:38 -08001740 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1741 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001742 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001743 netdev->name,
1744 ((hw->bus.speed == e1000_bus_speed_2500)
1745 ? "2.5Gb/s" : "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001746 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1747 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1748 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1749 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001750 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001751
1752 igb_read_part_num(hw, &part_num);
1753 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1754 (part_num >> 8), (part_num & 0xff));
1755
1756 dev_info(&pdev->dev,
1757 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1758 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001759 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001760 adapter->num_rx_queues, adapter->num_tx_queues);
1761
Auke Kok9d5c8242008-01-24 02:22:38 -08001762 return 0;
1763
1764err_register:
1765 igb_release_hw_control(adapter);
1766err_eeprom:
1767 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001768 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001769
1770 if (hw->flash_address)
1771 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001772err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001773 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001774 iounmap(hw->hw_addr);
1775err_ioremap:
1776 free_netdev(netdev);
1777err_alloc_etherdev:
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001778 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1779 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001780err_pci_reg:
1781err_dma:
1782 pci_disable_device(pdev);
1783 return err;
1784}
1785
1786/**
1787 * igb_remove - Device Removal Routine
1788 * @pdev: PCI device information struct
1789 *
1790 * igb_remove is called by the PCI subsystem to alert the driver
1791 * that it should release a PCI device. The could be caused by a
1792 * Hot-Plug event, or because the driver is going to be removed from
1793 * memory.
1794 **/
1795static void __devexit igb_remove(struct pci_dev *pdev)
1796{
1797 struct net_device *netdev = pci_get_drvdata(pdev);
1798 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001799 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001800
1801 /* flush_scheduled work may reschedule our watchdog task, so
1802 * explicitly disable watchdog tasks from being rescheduled */
1803 set_bit(__IGB_DOWN, &adapter->state);
1804 del_timer_sync(&adapter->watchdog_timer);
1805 del_timer_sync(&adapter->phy_info_timer);
1806
1807 flush_scheduled_work();
1808
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001809#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001810 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001811 dev_info(&pdev->dev, "DCA disabled\n");
1812 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001813 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001814 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001815 }
1816#endif
1817
Auke Kok9d5c8242008-01-24 02:22:38 -08001818 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1819 * would have already happened in close and is redundant. */
1820 igb_release_hw_control(adapter);
1821
1822 unregister_netdev(netdev);
1823
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001824 if (!igb_check_reset_block(&adapter->hw))
1825 igb_reset_phy(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001826
Alexander Duyck047e0032009-10-27 15:49:27 +00001827 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001828
Alexander Duyck37680112009-02-19 20:40:30 -08001829#ifdef CONFIG_PCI_IOV
1830 /* reclaim resources allocated to VFs */
1831 if (adapter->vf_data) {
1832 /* disable iov and allow time for transactions to clear */
1833 pci_disable_sriov(pdev);
1834 msleep(500);
1835
1836 kfree(adapter->vf_data);
1837 adapter->vf_data = NULL;
1838 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1839 msleep(100);
1840 dev_info(&pdev->dev, "IOV Disabled\n");
1841 }
1842#endif
Alexander Duyck28b07592009-02-06 23:20:31 +00001843 iounmap(hw->hw_addr);
1844 if (hw->flash_address)
1845 iounmap(hw->flash_address);
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001846 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1847 IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001848
1849 free_netdev(netdev);
1850
Frans Pop19d5afd2009-10-02 10:04:12 -07001851 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001852
Auke Kok9d5c8242008-01-24 02:22:38 -08001853 pci_disable_device(pdev);
1854}
1855
1856/**
1857 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1858 * @adapter: board private structure to initialize
1859 *
1860 * igb_sw_init initializes the Adapter private data structure.
1861 * Fields are initialized based on PCI device information and
1862 * OS network device settings (MTU size).
1863 **/
1864static int __devinit igb_sw_init(struct igb_adapter *adapter)
1865{
1866 struct e1000_hw *hw = &adapter->hw;
1867 struct net_device *netdev = adapter->netdev;
1868 struct pci_dev *pdev = adapter->pdev;
1869
1870 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1871
Alexander Duyck68fd9912008-11-20 00:48:10 -08001872 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1873 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Auke Kok9d5c8242008-01-24 02:22:38 -08001874 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1875 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1876
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001877 /* This call may decrease the number of queues depending on
1878 * interrupt mode. */
Alexander Duyck047e0032009-10-27 15:49:27 +00001879 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001880 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1881 return -ENOMEM;
1882 }
1883
1884 /* Explicitly disable IRQ since the NIC can be in any state. */
1885 igb_irq_disable(adapter);
1886
1887 set_bit(__IGB_DOWN, &adapter->state);
1888 return 0;
1889}
1890
1891/**
1892 * igb_open - Called when a network interface is made active
1893 * @netdev: network interface device structure
1894 *
1895 * Returns 0 on success, negative value on failure
1896 *
1897 * The open entry point is called when a network interface is made
1898 * active by the system (IFF_UP). At this point all resources needed
1899 * for transmit and receive operations are allocated, the interrupt
1900 * handler is registered with the OS, the watchdog timer is started,
1901 * and the stack is notified that the interface is ready.
1902 **/
1903static int igb_open(struct net_device *netdev)
1904{
1905 struct igb_adapter *adapter = netdev_priv(netdev);
1906 struct e1000_hw *hw = &adapter->hw;
1907 int err;
1908 int i;
1909
1910 /* disallow open during test */
1911 if (test_bit(__IGB_TESTING, &adapter->state))
1912 return -EBUSY;
1913
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001914 netif_carrier_off(netdev);
1915
Auke Kok9d5c8242008-01-24 02:22:38 -08001916 /* allocate transmit descriptors */
1917 err = igb_setup_all_tx_resources(adapter);
1918 if (err)
1919 goto err_setup_tx;
1920
1921 /* allocate receive descriptors */
1922 err = igb_setup_all_rx_resources(adapter);
1923 if (err)
1924 goto err_setup_rx;
1925
1926 /* e1000_power_up_phy(adapter); */
1927
1928 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1929 if ((adapter->hw.mng_cookie.status &
1930 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1931 igb_update_mng_vlan(adapter);
1932
1933 /* before we allocate an interrupt, we must be ready to handle it.
1934 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1935 * as soon as we call pci_request_irq, so we have to setup our
1936 * clean_rx handler before we do so. */
1937 igb_configure(adapter);
1938
Alexander Duycke1739522009-02-19 20:39:44 -08001939 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1940
Auke Kok9d5c8242008-01-24 02:22:38 -08001941 err = igb_request_irq(adapter);
1942 if (err)
1943 goto err_req_irq;
1944
1945 /* From here on the code is the same as igb_up() */
1946 clear_bit(__IGB_DOWN, &adapter->state);
1947
Alexander Duyck047e0032009-10-27 15:49:27 +00001948 for (i = 0; i < adapter->num_q_vectors; i++) {
1949 struct igb_q_vector *q_vector = adapter->q_vector[i];
1950 napi_enable(&q_vector->napi);
1951 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001952
1953 /* Clear any pending interrupts. */
1954 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001955
1956 igb_irq_enable(adapter);
1957
Alexander Duyckd4960302009-10-27 15:53:45 +00001958 /* notify VFs that reset has been completed */
1959 if (adapter->vfs_allocated_count) {
1960 u32 reg_data = rd32(E1000_CTRL_EXT);
1961 reg_data |= E1000_CTRL_EXT_PFRSTD;
1962 wr32(E1000_CTRL_EXT, reg_data);
1963 }
1964
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07001965 netif_tx_start_all_queues(netdev);
1966
Auke Kok9d5c8242008-01-24 02:22:38 -08001967 /* Fire a link status change interrupt to start the watchdog. */
1968 wr32(E1000_ICS, E1000_ICS_LSC);
1969
1970 return 0;
1971
1972err_req_irq:
1973 igb_release_hw_control(adapter);
1974 /* e1000_power_down_phy(adapter); */
1975 igb_free_all_rx_resources(adapter);
1976err_setup_rx:
1977 igb_free_all_tx_resources(adapter);
1978err_setup_tx:
1979 igb_reset(adapter);
1980
1981 return err;
1982}
1983
1984/**
1985 * igb_close - Disables a network interface
1986 * @netdev: network interface device structure
1987 *
1988 * Returns 0, this is not allowed to fail
1989 *
1990 * The close entry point is called when an interface is de-activated
1991 * by the OS. The hardware is still under the driver's control, but
1992 * needs to be disabled. A global MAC reset is issued to stop the
1993 * hardware, and all transmit and receive resources are freed.
1994 **/
1995static int igb_close(struct net_device *netdev)
1996{
1997 struct igb_adapter *adapter = netdev_priv(netdev);
1998
1999 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2000 igb_down(adapter);
2001
2002 igb_free_irq(adapter);
2003
2004 igb_free_all_tx_resources(adapter);
2005 igb_free_all_rx_resources(adapter);
2006
2007 /* kill manageability vlan ID if supported, but not if a vlan with
2008 * the same ID is registered on the host OS (let 8021q kill it) */
2009 if ((adapter->hw.mng_cookie.status &
2010 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2011 !(adapter->vlgrp &&
2012 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2013 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2014
2015 return 0;
2016}
2017
2018/**
2019 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002020 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2021 *
2022 * Return 0 on success, negative on failure
2023 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002024int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002025{
Alexander Duyck80785292009-10-27 15:51:47 +00002026 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002027 int size;
2028
2029 size = sizeof(struct igb_buffer) * tx_ring->count;
2030 tx_ring->buffer_info = vmalloc(size);
2031 if (!tx_ring->buffer_info)
2032 goto err;
2033 memset(tx_ring->buffer_info, 0, size);
2034
2035 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002036 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002037 tx_ring->size = ALIGN(tx_ring->size, 4096);
2038
2039 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2040 &tx_ring->dma);
2041
2042 if (!tx_ring->desc)
2043 goto err;
2044
Auke Kok9d5c8242008-01-24 02:22:38 -08002045 tx_ring->next_to_use = 0;
2046 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002047 return 0;
2048
2049err:
2050 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002051 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002052 "Unable to allocate memory for the transmit descriptor ring\n");
2053 return -ENOMEM;
2054}
2055
2056/**
2057 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2058 * (Descriptors) for all queues
2059 * @adapter: board private structure
2060 *
2061 * Return 0 on success, negative on failure
2062 **/
2063static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2064{
2065 int i, err = 0;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07002066 int r_idx;
Auke Kok9d5c8242008-01-24 02:22:38 -08002067
2068 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00002069 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002070 if (err) {
2071 dev_err(&adapter->pdev->dev,
2072 "Allocation for Tx Queue %u failed\n", i);
2073 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002074 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002075 break;
2076 }
2077 }
2078
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07002079 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
2080 r_idx = i % adapter->num_tx_queues;
2081 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002082 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002083 return err;
2084}
2085
2086/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002087 * igb_setup_tctl - configure the transmit control registers
2088 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002089 **/
Alexander Duyck85b430b2009-10-27 15:50:29 +00002090static void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002091{
Auke Kok9d5c8242008-01-24 02:22:38 -08002092 struct e1000_hw *hw = &adapter->hw;
2093 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002094
Alexander Duyck85b430b2009-10-27 15:50:29 +00002095 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2096 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002097
2098 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002099 tctl = rd32(E1000_TCTL);
2100 tctl &= ~E1000_TCTL_CT;
2101 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2102 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2103
2104 igb_config_collision_dist(hw);
2105
Auke Kok9d5c8242008-01-24 02:22:38 -08002106 /* Enable transmits */
2107 tctl |= E1000_TCTL_EN;
2108
2109 wr32(E1000_TCTL, tctl);
2110}
2111
2112/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002113 * igb_configure_tx_ring - Configure transmit ring after Reset
2114 * @adapter: board private structure
2115 * @ring: tx ring to configure
2116 *
2117 * Configure a transmit ring after a reset.
2118 **/
2119static void igb_configure_tx_ring(struct igb_adapter *adapter,
2120 struct igb_ring *ring)
2121{
2122 struct e1000_hw *hw = &adapter->hw;
2123 u32 txdctl;
2124 u64 tdba = ring->dma;
2125 int reg_idx = ring->reg_idx;
2126
2127 /* disable the queue */
2128 txdctl = rd32(E1000_TXDCTL(reg_idx));
2129 wr32(E1000_TXDCTL(reg_idx),
2130 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2131 wrfl();
2132 mdelay(10);
2133
2134 wr32(E1000_TDLEN(reg_idx),
2135 ring->count * sizeof(union e1000_adv_tx_desc));
2136 wr32(E1000_TDBAL(reg_idx),
2137 tdba & 0x00000000ffffffffULL);
2138 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2139
Alexander Duyckfce99e32009-10-27 15:51:27 +00002140 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2141 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2142 writel(0, ring->head);
2143 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002144
2145 txdctl |= IGB_TX_PTHRESH;
2146 txdctl |= IGB_TX_HTHRESH << 8;
2147 txdctl |= IGB_TX_WTHRESH << 16;
2148
2149 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2150 wr32(E1000_TXDCTL(reg_idx), txdctl);
2151}
2152
2153/**
2154 * igb_configure_tx - Configure transmit Unit after Reset
2155 * @adapter: board private structure
2156 *
2157 * Configure the Tx unit of the MAC after a reset.
2158 **/
2159static void igb_configure_tx(struct igb_adapter *adapter)
2160{
2161 int i;
2162
2163 for (i = 0; i < adapter->num_tx_queues; i++)
2164 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002165}
2166
2167/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002168 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002169 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2170 *
2171 * Returns 0 on success, negative on failure
2172 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002173int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002174{
Alexander Duyck80785292009-10-27 15:51:47 +00002175 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002176 int size, desc_len;
2177
2178 size = sizeof(struct igb_buffer) * rx_ring->count;
2179 rx_ring->buffer_info = vmalloc(size);
2180 if (!rx_ring->buffer_info)
2181 goto err;
2182 memset(rx_ring->buffer_info, 0, size);
2183
2184 desc_len = sizeof(union e1000_adv_rx_desc);
2185
2186 /* Round up to nearest 4K */
2187 rx_ring->size = rx_ring->count * desc_len;
2188 rx_ring->size = ALIGN(rx_ring->size, 4096);
2189
2190 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2191 &rx_ring->dma);
2192
2193 if (!rx_ring->desc)
2194 goto err;
2195
2196 rx_ring->next_to_clean = 0;
2197 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002198
Auke Kok9d5c8242008-01-24 02:22:38 -08002199 return 0;
2200
2201err:
2202 vfree(rx_ring->buffer_info);
Alexander Duyck80785292009-10-27 15:51:47 +00002203 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002204 "the receive descriptor ring\n");
2205 return -ENOMEM;
2206}
2207
2208/**
2209 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2210 * (Descriptors) for all queues
2211 * @adapter: board private structure
2212 *
2213 * Return 0 on success, negative on failure
2214 **/
2215static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2216{
2217 int i, err = 0;
2218
2219 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck80785292009-10-27 15:51:47 +00002220 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002221 if (err) {
2222 dev_err(&adapter->pdev->dev,
2223 "Allocation for Rx Queue %u failed\n", i);
2224 for (i--; i >= 0; i--)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002225 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002226 break;
2227 }
2228 }
2229
2230 return err;
2231}
2232
2233/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002234 * igb_setup_mrqc - configure the multiple receive queue control registers
2235 * @adapter: Board private structure
2236 **/
2237static void igb_setup_mrqc(struct igb_adapter *adapter)
2238{
2239 struct e1000_hw *hw = &adapter->hw;
2240 u32 mrqc, rxcsum;
2241 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2242 union e1000_reta {
2243 u32 dword;
2244 u8 bytes[4];
2245 } reta;
2246 static const u8 rsshash[40] = {
2247 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2248 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2249 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2250 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2251
2252 /* Fill out hash function seeds */
2253 for (j = 0; j < 10; j++) {
2254 u32 rsskey = rsshash[(j * 4)];
2255 rsskey |= rsshash[(j * 4) + 1] << 8;
2256 rsskey |= rsshash[(j * 4) + 2] << 16;
2257 rsskey |= rsshash[(j * 4) + 3] << 24;
2258 array_wr32(E1000_RSSRK(0), j, rsskey);
2259 }
2260
2261 num_rx_queues = adapter->num_rx_queues;
2262
2263 if (adapter->vfs_allocated_count) {
2264 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2265 switch (hw->mac.type) {
2266 case e1000_82576:
2267 shift = 3;
2268 num_rx_queues = 2;
2269 break;
2270 case e1000_82575:
2271 shift = 2;
2272 shift2 = 6;
2273 default:
2274 break;
2275 }
2276 } else {
2277 if (hw->mac.type == e1000_82575)
2278 shift = 6;
2279 }
2280
2281 for (j = 0; j < (32 * 4); j++) {
2282 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2283 if (shift2)
2284 reta.bytes[j & 3] |= num_rx_queues << shift2;
2285 if ((j & 3) == 3)
2286 wr32(E1000_RETA(j >> 2), reta.dword);
2287 }
2288
2289 /*
2290 * Disable raw packet checksumming so that RSS hash is placed in
2291 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2292 * offloads as they are enabled by default
2293 */
2294 rxcsum = rd32(E1000_RXCSUM);
2295 rxcsum |= E1000_RXCSUM_PCSD;
2296
2297 if (adapter->hw.mac.type >= e1000_82576)
2298 /* Enable Receive Checksum Offload for SCTP */
2299 rxcsum |= E1000_RXCSUM_CRCOFL;
2300
2301 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2302 wr32(E1000_RXCSUM, rxcsum);
2303
2304 /* If VMDq is enabled then we set the appropriate mode for that, else
2305 * we default to RSS so that an RSS hash is calculated per packet even
2306 * if we are only using one queue */
2307 if (adapter->vfs_allocated_count) {
2308 if (hw->mac.type > e1000_82575) {
2309 /* Set the default pool for the PF's first queue */
2310 u32 vtctl = rd32(E1000_VT_CTL);
2311 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2312 E1000_VT_CTL_DISABLE_DEF_POOL);
2313 vtctl |= adapter->vfs_allocated_count <<
2314 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2315 wr32(E1000_VT_CTL, vtctl);
2316 }
2317 if (adapter->num_rx_queues > 1)
2318 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2319 else
2320 mrqc = E1000_MRQC_ENABLE_VMDQ;
2321 } else {
2322 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2323 }
2324 igb_vmm_control(adapter);
2325
2326 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2327 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2328 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2329 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2330 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2331 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2332 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2333 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2334
2335 wr32(E1000_MRQC, mrqc);
2336}
2337
2338/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002339 * igb_setup_rctl - configure the receive control registers
2340 * @adapter: Board private structure
2341 **/
2342static void igb_setup_rctl(struct igb_adapter *adapter)
2343{
2344 struct e1000_hw *hw = &adapter->hw;
2345 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002346
2347 rctl = rd32(E1000_RCTL);
2348
2349 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002350 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002351
Alexander Duyck69d728b2008-11-25 01:04:03 -08002352 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002353 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002354
Auke Kok87cb7e82008-07-08 15:08:29 -07002355 /*
2356 * enable stripping of CRC. It's unlikely this will break BMC
2357 * redirection as it did with e1000. Newer features require
2358 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002359 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002360 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002361
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002362 /*
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002363 * disable store bad packets and clear size bits.
Alexander Duyck9b07f3d32008-11-25 01:03:26 -08002364 */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002365 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002366
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002367 /* enable LPE to prevent packets larger than max_frame_size */
2368 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002369
Alexander Duyck952f72a2009-10-27 15:51:07 +00002370 /* disable queue 0 to prevent tail write w/o re-config */
2371 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002372
Alexander Duycke1739522009-02-19 20:39:44 -08002373 /* Attention!!! For SR-IOV PF driver operations you must enable
2374 * queue drop for all VF and PF queues to prevent head of line blocking
2375 * if an un-trusted VF does not provide descriptors to hardware.
2376 */
2377 if (adapter->vfs_allocated_count) {
2378 u32 vmolr;
2379
Alexander Duycke1739522009-02-19 20:39:44 -08002380 /* set all queue drop enable bits */
2381 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002382
Alexander Duyck77a22942009-05-06 16:43:48 -07002383 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
Alexander Duycke1739522009-02-19 20:39:44 -08002384 if (rctl & E1000_RCTL_LPE)
2385 vmolr |= E1000_VMOLR_LPE;
Alexander Duyck77a22942009-05-06 16:43:48 -07002386 if (adapter->num_rx_queues > 1)
Alexander Duycke1739522009-02-19 20:39:44 -08002387 vmolr |= E1000_VMOLR_RSSE;
Alexander Duyck77a22942009-05-06 16:43:48 -07002388 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
Alexander Duycke1739522009-02-19 20:39:44 -08002389 }
2390
Auke Kok9d5c8242008-01-24 02:22:38 -08002391 wr32(E1000_RCTL, rctl);
2392}
2393
2394/**
Alexander Duycke1739522009-02-19 20:39:44 -08002395 * igb_rlpml_set - set maximum receive packet size
2396 * @adapter: board private structure
2397 *
2398 * Configure maximum receivable packet size.
2399 **/
2400static void igb_rlpml_set(struct igb_adapter *adapter)
2401{
2402 u32 max_frame_size = adapter->max_frame_size;
2403 struct e1000_hw *hw = &adapter->hw;
2404 u16 pf_id = adapter->vfs_allocated_count;
2405
2406 if (adapter->vlgrp)
2407 max_frame_size += VLAN_TAG_SIZE;
2408
2409 /* if vfs are enabled we set RLPML to the largest possible request
2410 * size and set the VMOLR RLPML to the size we need */
2411 if (pf_id) {
2412 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2413 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2414 }
2415
2416 wr32(E1000_RLPML, max_frame_size);
2417}
2418
2419/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002420 * igb_configure_rx_ring - Configure a receive ring after Reset
2421 * @adapter: board private structure
2422 * @ring: receive ring to be configured
2423 *
2424 * Configure the Rx unit of the MAC after a reset.
2425 **/
2426static void igb_configure_rx_ring(struct igb_adapter *adapter,
2427 struct igb_ring *ring)
2428{
2429 struct e1000_hw *hw = &adapter->hw;
2430 u64 rdba = ring->dma;
2431 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002432 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002433
2434 /* disable the queue */
2435 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2436 wr32(E1000_RXDCTL(reg_idx),
2437 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2438
2439 /* Set DMA base address registers */
2440 wr32(E1000_RDBAL(reg_idx),
2441 rdba & 0x00000000ffffffffULL);
2442 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2443 wr32(E1000_RDLEN(reg_idx),
2444 ring->count * sizeof(union e1000_adv_rx_desc));
2445
2446 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002447 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2448 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2449 writel(0, ring->head);
2450 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002451
Alexander Duyck952f72a2009-10-27 15:51:07 +00002452 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002453 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2454 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002455 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2456#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2457 srrctl |= IGB_RXBUFFER_16384 >>
2458 E1000_SRRCTL_BSIZEPKT_SHIFT;
2459#else
2460 srrctl |= (PAGE_SIZE / 2) >>
2461 E1000_SRRCTL_BSIZEPKT_SHIFT;
2462#endif
2463 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2464 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002465 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002466 E1000_SRRCTL_BSIZEPKT_SHIFT;
2467 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2468 }
2469
2470 wr32(E1000_SRRCTL(reg_idx), srrctl);
2471
Alexander Duyck85b430b2009-10-27 15:50:29 +00002472 /* enable receive descriptor fetching */
2473 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2474 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2475 rxdctl &= 0xFFF00000;
2476 rxdctl |= IGB_RX_PTHRESH;
2477 rxdctl |= IGB_RX_HTHRESH << 8;
2478 rxdctl |= IGB_RX_WTHRESH << 16;
2479 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2480}
2481
2482/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002483 * igb_configure_rx - Configure receive Unit after Reset
2484 * @adapter: board private structure
2485 *
2486 * Configure the Rx unit of the MAC after a reset.
2487 **/
2488static void igb_configure_rx(struct igb_adapter *adapter)
2489{
Hannes Eder91075842009-02-18 19:36:04 -08002490 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002491
Alexander Duyck68d480c2009-10-05 06:33:08 +00002492 /* set UTA to appropriate mode */
2493 igb_set_uta(adapter);
2494
Alexander Duyck26ad9172009-10-05 06:32:49 +00002495 /* set the correct pool for the PF default MAC address in entry 0 */
2496 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2497 adapter->vfs_allocated_count);
2498
Alexander Duyck06cf2662009-10-27 15:53:25 +00002499 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2500 * the Base and Length of the Rx Descriptor Ring */
2501 for (i = 0; i < adapter->num_rx_queues; i++)
2502 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002503}
2504
2505/**
2506 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002507 * @tx_ring: Tx descriptor ring for a specific queue
2508 *
2509 * Free all transmit software resources
2510 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002511void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002512{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002513 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002514
2515 vfree(tx_ring->buffer_info);
2516 tx_ring->buffer_info = NULL;
2517
Alexander Duyck80785292009-10-27 15:51:47 +00002518 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2519 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002520
2521 tx_ring->desc = NULL;
2522}
2523
2524/**
2525 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2526 * @adapter: board private structure
2527 *
2528 * Free all transmit software resources
2529 **/
2530static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2531{
2532 int i;
2533
2534 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002535 igb_free_tx_resources(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002536}
2537
Alexander Duyck80785292009-10-27 15:51:47 +00002538static void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08002539 struct igb_buffer *buffer_info)
2540{
Alexander Duyck65689fe2009-03-20 00:17:43 +00002541 buffer_info->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002542 if (buffer_info->skb) {
Alexander Duyck80785292009-10-27 15:51:47 +00002543 skb_dma_unmap(&tx_ring->pdev->dev,
2544 buffer_info->skb,
Alexander Duyck65689fe2009-03-20 00:17:43 +00002545 DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002546 dev_kfree_skb_any(buffer_info->skb);
2547 buffer_info->skb = NULL;
2548 }
2549 buffer_info->time_stamp = 0;
2550 /* buffer_info must be completely set up in the transmit path */
2551}
2552
2553/**
2554 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002555 * @tx_ring: ring to be cleaned
2556 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002557static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002558{
2559 struct igb_buffer *buffer_info;
2560 unsigned long size;
2561 unsigned int i;
2562
2563 if (!tx_ring->buffer_info)
2564 return;
2565 /* Free all the Tx ring sk_buffs */
2566
2567 for (i = 0; i < tx_ring->count; i++) {
2568 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002569 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002570 }
2571
2572 size = sizeof(struct igb_buffer) * tx_ring->count;
2573 memset(tx_ring->buffer_info, 0, size);
2574
2575 /* Zero out the descriptor ring */
2576
2577 memset(tx_ring->desc, 0, tx_ring->size);
2578
2579 tx_ring->next_to_use = 0;
2580 tx_ring->next_to_clean = 0;
2581
Alexander Duyckfce99e32009-10-27 15:51:27 +00002582 writel(0, tx_ring->head);
2583 writel(0, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08002584}
2585
2586/**
2587 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2588 * @adapter: board private structure
2589 **/
2590static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2591{
2592 int i;
2593
2594 for (i = 0; i < adapter->num_tx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002595 igb_clean_tx_ring(&adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002596}
2597
2598/**
2599 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002600 * @rx_ring: ring to clean the resources from
2601 *
2602 * Free all receive software resources
2603 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002604void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002605{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002606 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002607
2608 vfree(rx_ring->buffer_info);
2609 rx_ring->buffer_info = NULL;
2610
Alexander Duyck80785292009-10-27 15:51:47 +00002611 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2612 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002613
2614 rx_ring->desc = NULL;
2615}
2616
2617/**
2618 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2619 * @adapter: board private structure
2620 *
2621 * Free all receive software resources
2622 **/
2623static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2624{
2625 int i;
2626
2627 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002628 igb_free_rx_resources(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002629}
2630
2631/**
2632 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002633 * @rx_ring: ring to free buffers from
2634 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002635static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002636{
2637 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 unsigned long size;
2639 unsigned int i;
2640
2641 if (!rx_ring->buffer_info)
2642 return;
2643 /* Free all the Rx ring sk_buffs */
2644 for (i = 0; i < rx_ring->count; i++) {
2645 buffer_info = &rx_ring->buffer_info[i];
2646 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002647 pci_unmap_single(rx_ring->pdev,
2648 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002649 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002650 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002651 buffer_info->dma = 0;
2652 }
2653
2654 if (buffer_info->skb) {
2655 dev_kfree_skb(buffer_info->skb);
2656 buffer_info->skb = NULL;
2657 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002658 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002659 pci_unmap_page(rx_ring->pdev,
2660 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002661 PAGE_SIZE / 2,
2662 PCI_DMA_FROMDEVICE);
2663 buffer_info->page_dma = 0;
2664 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002666 put_page(buffer_info->page);
2667 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002668 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002669 }
2670 }
2671
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 size = sizeof(struct igb_buffer) * rx_ring->count;
2673 memset(rx_ring->buffer_info, 0, size);
2674
2675 /* Zero out the descriptor ring */
2676 memset(rx_ring->desc, 0, rx_ring->size);
2677
2678 rx_ring->next_to_clean = 0;
2679 rx_ring->next_to_use = 0;
2680
Alexander Duyckfce99e32009-10-27 15:51:27 +00002681 writel(0, rx_ring->head);
2682 writel(0, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08002683}
2684
2685/**
2686 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2687 * @adapter: board private structure
2688 **/
2689static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2690{
2691 int i;
2692
2693 for (i = 0; i < adapter->num_rx_queues; i++)
Mitch Williams3b644cf2008-06-27 10:59:48 -07002694 igb_clean_rx_ring(&adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002695}
2696
2697/**
2698 * igb_set_mac - Change the Ethernet Address of the NIC
2699 * @netdev: network interface device structure
2700 * @p: pointer to an address structure
2701 *
2702 * Returns 0 on success, negative on failure
2703 **/
2704static int igb_set_mac(struct net_device *netdev, void *p)
2705{
2706 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002707 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 struct sockaddr *addr = p;
2709
2710 if (!is_valid_ether_addr(addr->sa_data))
2711 return -EADDRNOTAVAIL;
2712
2713 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002714 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002715
Alexander Duyck26ad9172009-10-05 06:32:49 +00002716 /* set the correct pool for the new PF MAC address in entry 0 */
2717 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2718 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002719
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 return 0;
2721}
2722
2723/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002724 * igb_write_mc_addr_list - write multicast addresses to MTA
2725 * @netdev: network interface device structure
2726 *
2727 * Writes multicast address list to the MTA hash table.
2728 * Returns: -ENOMEM on failure
2729 * 0 on no addresses written
2730 * X on writing X addresses to MTA
2731 **/
2732static int igb_write_mc_addr_list(struct net_device *netdev)
2733{
2734 struct igb_adapter *adapter = netdev_priv(netdev);
2735 struct e1000_hw *hw = &adapter->hw;
2736 struct dev_mc_list *mc_ptr = netdev->mc_list;
2737 u8 *mta_list;
2738 u32 vmolr = 0;
2739 int i;
2740
2741 if (!netdev->mc_count) {
2742 /* nothing to program, so clear mc list */
2743 igb_update_mc_addr_list(hw, NULL, 0);
2744 igb_restore_vf_multicasts(adapter);
2745 return 0;
2746 }
2747
2748 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2749 if (!mta_list)
2750 return -ENOMEM;
2751
2752 /* set vmolr receive overflow multicast bit */
2753 vmolr |= E1000_VMOLR_ROMPE;
2754
2755 /* The shared function expects a packed array of only addresses. */
2756 mc_ptr = netdev->mc_list;
2757
2758 for (i = 0; i < netdev->mc_count; i++) {
2759 if (!mc_ptr)
2760 break;
2761 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2762 mc_ptr = mc_ptr->next;
2763 }
2764 igb_update_mc_addr_list(hw, mta_list, i);
2765 kfree(mta_list);
2766
2767 return netdev->mc_count;
2768}
2769
2770/**
2771 * igb_write_uc_addr_list - write unicast addresses to RAR table
2772 * @netdev: network interface device structure
2773 *
2774 * Writes unicast address list to the RAR table.
2775 * Returns: -ENOMEM on failure/insufficient address space
2776 * 0 on no addresses written
2777 * X on writing X addresses to the RAR table
2778 **/
2779static int igb_write_uc_addr_list(struct net_device *netdev)
2780{
2781 struct igb_adapter *adapter = netdev_priv(netdev);
2782 struct e1000_hw *hw = &adapter->hw;
2783 unsigned int vfn = adapter->vfs_allocated_count;
2784 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2785 int count = 0;
2786
2787 /* return ENOMEM indicating insufficient memory for addresses */
2788 if (netdev->uc.count > rar_entries)
2789 return -ENOMEM;
2790
2791 if (netdev->uc.count && rar_entries) {
2792 struct netdev_hw_addr *ha;
2793 list_for_each_entry(ha, &netdev->uc.list, list) {
2794 if (!rar_entries)
2795 break;
2796 igb_rar_set_qsel(adapter, ha->addr,
2797 rar_entries--,
2798 vfn);
2799 count++;
2800 }
2801 }
2802 /* write the addresses in reverse order to avoid write combining */
2803 for (; rar_entries > 0 ; rar_entries--) {
2804 wr32(E1000_RAH(rar_entries), 0);
2805 wr32(E1000_RAL(rar_entries), 0);
2806 }
2807 wrfl();
2808
2809 return count;
2810}
2811
2812/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002813 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002814 * @netdev: network interface device structure
2815 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002816 * The set_rx_mode entry point is called whenever the unicast or multicast
2817 * address lists or the network interface flags are updated. This routine is
2818 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002819 * promiscuous mode, and all-multi behavior.
2820 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002821static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002822{
2823 struct igb_adapter *adapter = netdev_priv(netdev);
2824 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002825 unsigned int vfn = adapter->vfs_allocated_count;
2826 u32 rctl, vmolr = 0;
2827 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002828
2829 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002830 rctl = rd32(E1000_RCTL);
2831
Alexander Duyck68d480c2009-10-05 06:33:08 +00002832 /* clear the effected bits */
2833 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2834
Patrick McHardy746b9f02008-07-16 20:15:45 -07002835 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002836 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002837 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002838 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002839 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002840 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002841 vmolr |= E1000_VMOLR_MPME;
2842 } else {
2843 /*
2844 * Write addresses to the MTA, if the attempt fails
2845 * then we should just turn on promiscous mode so
2846 * that we can at least receive multicast traffic
2847 */
2848 count = igb_write_mc_addr_list(netdev);
2849 if (count < 0) {
2850 rctl |= E1000_RCTL_MPE;
2851 vmolr |= E1000_VMOLR_MPME;
2852 } else if (count) {
2853 vmolr |= E1000_VMOLR_ROMPE;
2854 }
2855 }
2856 /*
2857 * Write addresses to available RAR registers, if there is not
2858 * sufficient space to store all the addresses then enable
2859 * unicast promiscous mode
2860 */
2861 count = igb_write_uc_addr_list(netdev);
2862 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002863 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002864 vmolr |= E1000_VMOLR_ROPE;
2865 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07002866 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07002867 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002868 wr32(E1000_RCTL, rctl);
2869
Alexander Duyck68d480c2009-10-05 06:33:08 +00002870 /*
2871 * In order to support SR-IOV and eventually VMDq it is necessary to set
2872 * the VMOLR to enable the appropriate modes. Without this workaround
2873 * we will have issues with VLAN tag stripping not being done for frames
2874 * that are only arriving because we are the default pool
2875 */
2876 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002877 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002878
Alexander Duyck68d480c2009-10-05 06:33:08 +00002879 vmolr |= rd32(E1000_VMOLR(vfn)) &
2880 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2881 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00002882 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002883}
2884
2885/* Need to wait a few seconds after link up to get diagnostic information from
2886 * the phy */
2887static void igb_update_phy_info(unsigned long data)
2888{
2889 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002890 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002891}
2892
2893/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002894 * igb_has_link - check shared code for link and determine up/down
2895 * @adapter: pointer to driver private info
2896 **/
2897static bool igb_has_link(struct igb_adapter *adapter)
2898{
2899 struct e1000_hw *hw = &adapter->hw;
2900 bool link_active = false;
2901 s32 ret_val = 0;
2902
2903 /* get_link_status is set on LSC (link status) interrupt or
2904 * rx sequence error interrupt. get_link_status will stay
2905 * false until the e1000_check_for_link establishes link
2906 * for copper adapters ONLY
2907 */
2908 switch (hw->phy.media_type) {
2909 case e1000_media_type_copper:
2910 if (hw->mac.get_link_status) {
2911 ret_val = hw->mac.ops.check_for_link(hw);
2912 link_active = !hw->mac.get_link_status;
2913 } else {
2914 link_active = true;
2915 }
2916 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002917 case e1000_media_type_internal_serdes:
2918 ret_val = hw->mac.ops.check_for_link(hw);
2919 link_active = hw->mac.serdes_has_link;
2920 break;
2921 default:
2922 case e1000_media_type_unknown:
2923 break;
2924 }
2925
2926 return link_active;
2927}
2928
2929/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002930 * igb_watchdog - Timer Call-back
2931 * @data: pointer to adapter cast into an unsigned long
2932 **/
2933static void igb_watchdog(unsigned long data)
2934{
2935 struct igb_adapter *adapter = (struct igb_adapter *)data;
2936 /* Do the rest outside of interrupt context */
2937 schedule_work(&adapter->watchdog_task);
2938}
2939
2940static void igb_watchdog_task(struct work_struct *work)
2941{
2942 struct igb_adapter *adapter = container_of(work,
2943 struct igb_adapter, watchdog_task);
2944 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002945 struct net_device *netdev = adapter->netdev;
2946 struct igb_ring *tx_ring = adapter->tx_ring;
Auke Kok9d5c8242008-01-24 02:22:38 -08002947 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07002948 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002949
Alexander Duyck4d6b7252009-02-06 23:16:24 +00002950 link = igb_has_link(adapter);
2951 if ((netif_carrier_ok(netdev)) && link)
Auke Kok9d5c8242008-01-24 02:22:38 -08002952 goto link_up;
2953
Auke Kok9d5c8242008-01-24 02:22:38 -08002954 if (link) {
2955 if (!netif_carrier_ok(netdev)) {
2956 u32 ctrl;
2957 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2958 &adapter->link_speed,
2959 &adapter->link_duplex);
2960
2961 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08002962 /* Links status message must follow this format */
2963 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08002964 "Flow Control: %s\n",
Alexander Duyck527d47c2008-11-27 00:21:39 -08002965 netdev->name,
Auke Kok9d5c8242008-01-24 02:22:38 -08002966 adapter->link_speed,
2967 adapter->link_duplex == FULL_DUPLEX ?
2968 "Full Duplex" : "Half Duplex",
2969 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2970 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2971 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2972 E1000_CTRL_TFCE) ? "TX" : "None")));
2973
2974 /* tweak tx_queue_len according to speed/duplex and
2975 * adjust the timeout factor */
2976 netdev->tx_queue_len = adapter->tx_queue_len;
2977 adapter->tx_timeout_factor = 1;
2978 switch (adapter->link_speed) {
2979 case SPEED_10:
2980 netdev->tx_queue_len = 10;
2981 adapter->tx_timeout_factor = 14;
2982 break;
2983 case SPEED_100:
2984 netdev->tx_queue_len = 100;
2985 /* maybe add some timeout factor ? */
2986 break;
2987 }
2988
2989 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002990
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002991 igb_ping_all_vfs(adapter);
2992
Alexander Duyck4b1a9872009-02-06 23:19:50 +00002993 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08002994 if (!test_bit(__IGB_DOWN, &adapter->state))
2995 mod_timer(&adapter->phy_info_timer,
2996 round_jiffies(jiffies + 2 * HZ));
2997 }
2998 } else {
2999 if (netif_carrier_ok(netdev)) {
3000 adapter->link_speed = 0;
3001 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003002 /* Links status message must follow this format */
3003 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3004 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003005 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003006
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003007 igb_ping_all_vfs(adapter);
3008
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003009 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003010 if (!test_bit(__IGB_DOWN, &adapter->state))
3011 mod_timer(&adapter->phy_info_timer,
3012 round_jiffies(jiffies + 2 * HZ));
3013 }
3014 }
3015
3016link_up:
3017 igb_update_stats(adapter);
3018
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003019 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08003020 adapter->tpt_old = adapter->stats.tpt;
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003021 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
Auke Kok9d5c8242008-01-24 02:22:38 -08003022 adapter->colc_old = adapter->stats.colc;
3023
3024 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3025 adapter->gorc_old = adapter->stats.gorc;
3026 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3027 adapter->gotc_old = adapter->stats.gotc;
3028
3029 igb_update_adaptive(&adapter->hw);
3030
3031 if (!netif_carrier_ok(netdev)) {
Alexander Duyckc493ea42009-03-20 00:16:50 +00003032 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003033 /* We've lost link, so the controller stops DMA,
3034 * but we've got queued Tx work that's never going
3035 * to get done, so reset controller to flush Tx.
3036 * (Do the reset outside of interrupt context). */
3037 adapter->tx_timeout_count++;
3038 schedule_work(&adapter->reset_task);
Jesse Brandeburgc2d5ab42009-05-07 11:07:35 +00003039 /* return immediately since reset is imminent */
3040 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003041 }
3042 }
3043
3044 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003045 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003046 u32 eics = 0;
3047 for (i = 0; i < adapter->num_q_vectors; i++) {
3048 struct igb_q_vector *q_vector = adapter->q_vector[i];
3049 eics |= q_vector->eims_value;
3050 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003051 wr32(E1000_EICS, eics);
3052 } else {
3053 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3054 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003055
3056 /* Force detection of hung controller every watchdog period */
3057 tx_ring->detect_tx_hung = true;
3058
3059 /* Reset the timer */
3060 if (!test_bit(__IGB_DOWN, &adapter->state))
3061 mod_timer(&adapter->watchdog_timer,
3062 round_jiffies(jiffies + 2 * HZ));
3063}
3064
3065enum latency_range {
3066 lowest_latency = 0,
3067 low_latency = 1,
3068 bulk_latency = 2,
3069 latency_invalid = 255
3070};
3071
3072
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003073/**
3074 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3075 *
3076 * Stores a new ITR value based on strictly on packet size. This
3077 * algorithm is less sophisticated than that used in igb_update_itr,
3078 * due to the difficulty of synchronizing statistics across multiple
3079 * receive rings. The divisors and thresholds used by this fuction
3080 * were determined based on theoretical maximum wire speed and testing
3081 * data, in order to minimize response time while increasing bulk
3082 * throughput.
3083 * This functionality is controlled by the InterruptThrottleRate module
3084 * parameter (see igb_param.c)
3085 * NOTE: This function is called only when operating in a multiqueue
3086 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003087 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003088 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003089static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003090{
Alexander Duyck047e0032009-10-27 15:49:27 +00003091 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003092 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003093 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003094
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003095 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3096 * ints/sec - ITR timer value of 120 ticks.
3097 */
3098 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003099 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003100 goto set_itr_val;
3101 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003102
3103 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3104 struct igb_ring *ring = q_vector->rx_ring;
3105 avg_wire_size = ring->total_bytes / ring->total_packets;
3106 }
3107
3108 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3109 struct igb_ring *ring = q_vector->tx_ring;
3110 avg_wire_size = max_t(u32, avg_wire_size,
3111 (ring->total_bytes /
3112 ring->total_packets));
3113 }
3114
3115 /* if avg_wire_size isn't set no work was done */
3116 if (!avg_wire_size)
3117 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003118
3119 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3120 avg_wire_size += 24;
3121
3122 /* Don't starve jumbo frames */
3123 avg_wire_size = min(avg_wire_size, 3000);
3124
3125 /* Give a little boost to mid-size frames */
3126 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3127 new_val = avg_wire_size / 3;
3128 else
3129 new_val = avg_wire_size / 2;
3130
3131set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003132 if (new_val != q_vector->itr_val) {
3133 q_vector->itr_val = new_val;
3134 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003135 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003136clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003137 if (q_vector->rx_ring) {
3138 q_vector->rx_ring->total_bytes = 0;
3139 q_vector->rx_ring->total_packets = 0;
3140 }
3141 if (q_vector->tx_ring) {
3142 q_vector->tx_ring->total_bytes = 0;
3143 q_vector->tx_ring->total_packets = 0;
3144 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003145}
3146
3147/**
3148 * igb_update_itr - update the dynamic ITR value based on statistics
3149 * Stores a new ITR value based on packets and byte
3150 * counts during the last interrupt. The advantage of per interrupt
3151 * computation is faster updates and more accurate ITR for the current
3152 * traffic pattern. Constants in this function were computed
3153 * based on theoretical maximum wire speed and thresholds were set based
3154 * on testing data as well as attempting to minimize response time
3155 * while increasing bulk throughput.
3156 * this functionality is controlled by the InterruptThrottleRate module
3157 * parameter (see igb_param.c)
3158 * NOTE: These calculations are only valid when operating in a single-
3159 * queue environment.
3160 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003161 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003162 * @packets: the number of packets during this measurement interval
3163 * @bytes: the number of bytes during this measurement interval
3164 **/
3165static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3166 int packets, int bytes)
3167{
3168 unsigned int retval = itr_setting;
3169
3170 if (packets == 0)
3171 goto update_itr_done;
3172
3173 switch (itr_setting) {
3174 case lowest_latency:
3175 /* handle TSO and jumbo frames */
3176 if (bytes/packets > 8000)
3177 retval = bulk_latency;
3178 else if ((packets < 5) && (bytes > 512))
3179 retval = low_latency;
3180 break;
3181 case low_latency: /* 50 usec aka 20000 ints/s */
3182 if (bytes > 10000) {
3183 /* this if handles the TSO accounting */
3184 if (bytes/packets > 8000) {
3185 retval = bulk_latency;
3186 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3187 retval = bulk_latency;
3188 } else if ((packets > 35)) {
3189 retval = lowest_latency;
3190 }
3191 } else if (bytes/packets > 2000) {
3192 retval = bulk_latency;
3193 } else if (packets <= 2 && bytes < 512) {
3194 retval = lowest_latency;
3195 }
3196 break;
3197 case bulk_latency: /* 250 usec aka 4000 ints/s */
3198 if (bytes > 25000) {
3199 if (packets > 35)
3200 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003201 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003202 retval = low_latency;
3203 }
3204 break;
3205 }
3206
3207update_itr_done:
3208 return retval;
3209}
3210
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003211static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003212{
Alexander Duyck047e0032009-10-27 15:49:27 +00003213 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003214 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003215 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003216
3217 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3218 if (adapter->link_speed != SPEED_1000) {
3219 current_itr = 0;
3220 new_itr = 4000;
3221 goto set_itr_now;
3222 }
3223
3224 adapter->rx_itr = igb_update_itr(adapter,
3225 adapter->rx_itr,
3226 adapter->rx_ring->total_packets,
3227 adapter->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003228
Alexander Duyck047e0032009-10-27 15:49:27 +00003229 adapter->tx_itr = igb_update_itr(adapter,
3230 adapter->tx_itr,
3231 adapter->tx_ring->total_packets,
3232 adapter->tx_ring->total_bytes);
3233 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003234
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003235 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003236 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003237 current_itr = low_latency;
3238
Auke Kok9d5c8242008-01-24 02:22:38 -08003239 switch (current_itr) {
3240 /* counts and packets in update_itr are dependent on these numbers */
3241 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003242 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 break;
3244 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003245 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003246 break;
3247 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003248 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003249 break;
3250 default:
3251 break;
3252 }
3253
3254set_itr_now:
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003255 adapter->rx_ring->total_bytes = 0;
3256 adapter->rx_ring->total_packets = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003257 adapter->tx_ring->total_bytes = 0;
3258 adapter->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003259
Alexander Duyck047e0032009-10-27 15:49:27 +00003260 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003261 /* this attempts to bias the interrupt rate towards Bulk
3262 * by adding intermediate steps when interrupt rate is
3263 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003264 new_itr = new_itr > q_vector->itr_val ?
3265 max((new_itr * q_vector->itr_val) /
3266 (new_itr + (q_vector->itr_val >> 2)),
3267 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003268 new_itr;
3269 /* Don't write the value here; it resets the adapter's
3270 * internal timer, and causes us to delay far longer than
3271 * we should between interrupts. Instead, we write the ITR
3272 * value at the beginning of the next interrupt so the timing
3273 * ends up being correct.
3274 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003275 q_vector->itr_val = new_itr;
3276 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003277 }
3278
3279 return;
3280}
3281
Auke Kok9d5c8242008-01-24 02:22:38 -08003282#define IGB_TX_FLAGS_CSUM 0x00000001
3283#define IGB_TX_FLAGS_VLAN 0x00000002
3284#define IGB_TX_FLAGS_TSO 0x00000004
3285#define IGB_TX_FLAGS_IPV4 0x00000008
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003286#define IGB_TX_FLAGS_TSTAMP 0x00000010
Auke Kok9d5c8242008-01-24 02:22:38 -08003287#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3288#define IGB_TX_FLAGS_VLAN_SHIFT 16
3289
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003290static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003291 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3292{
3293 struct e1000_adv_tx_context_desc *context_desc;
3294 unsigned int i;
3295 int err;
3296 struct igb_buffer *buffer_info;
3297 u32 info = 0, tu_cmd = 0;
3298 u32 mss_l4len_idx, l4len;
3299 *hdr_len = 0;
3300
3301 if (skb_header_cloned(skb)) {
3302 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3303 if (err)
3304 return err;
3305 }
3306
3307 l4len = tcp_hdrlen(skb);
3308 *hdr_len += l4len;
3309
3310 if (skb->protocol == htons(ETH_P_IP)) {
3311 struct iphdr *iph = ip_hdr(skb);
3312 iph->tot_len = 0;
3313 iph->check = 0;
3314 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3315 iph->daddr, 0,
3316 IPPROTO_TCP,
3317 0);
3318 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3319 ipv6_hdr(skb)->payload_len = 0;
3320 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3321 &ipv6_hdr(skb)->daddr,
3322 0, IPPROTO_TCP, 0);
3323 }
3324
3325 i = tx_ring->next_to_use;
3326
3327 buffer_info = &tx_ring->buffer_info[i];
3328 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3329 /* VLAN MACLEN IPLEN */
3330 if (tx_flags & IGB_TX_FLAGS_VLAN)
3331 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3332 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3333 *hdr_len += skb_network_offset(skb);
3334 info |= skb_network_header_len(skb);
3335 *hdr_len += skb_network_header_len(skb);
3336 context_desc->vlan_macip_lens = cpu_to_le32(info);
3337
3338 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3339 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3340
3341 if (skb->protocol == htons(ETH_P_IP))
3342 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3343 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3344
3345 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3346
3347 /* MSS L4LEN IDX */
3348 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3349 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3350
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003351 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003352 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3353 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003354
3355 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3356 context_desc->seqnum_seed = 0;
3357
3358 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003359 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003360 buffer_info->dma = 0;
3361 i++;
3362 if (i == tx_ring->count)
3363 i = 0;
3364
3365 tx_ring->next_to_use = i;
3366
3367 return true;
3368}
3369
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003370static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3371 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003372{
3373 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003374 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003375 struct igb_buffer *buffer_info;
3376 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003377 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003378
3379 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3380 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3381 i = tx_ring->next_to_use;
3382 buffer_info = &tx_ring->buffer_info[i];
3383 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3384
3385 if (tx_flags & IGB_TX_FLAGS_VLAN)
3386 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3387 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3388 if (skb->ip_summed == CHECKSUM_PARTIAL)
3389 info |= skb_network_header_len(skb);
3390
3391 context_desc->vlan_macip_lens = cpu_to_le32(info);
3392
3393 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3394
3395 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003396 __be16 protocol;
3397
3398 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3399 const struct vlan_ethhdr *vhdr =
3400 (const struct vlan_ethhdr*)skb->data;
3401
3402 protocol = vhdr->h_vlan_encapsulated_proto;
3403 } else {
3404 protocol = skb->protocol;
3405 }
3406
3407 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003408 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003409 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003410 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3411 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003412 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3413 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003414 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003415 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003416 /* XXX what about other V6 headers?? */
3417 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3418 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003419 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3420 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003421 break;
3422 default:
3423 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003424 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003425 "partial checksum but proto=%x!\n",
3426 skb->protocol);
3427 break;
3428 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003429 }
3430
3431 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3432 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003433 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003434 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003435 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003436
3437 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003438 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003439 buffer_info->dma = 0;
3440
3441 i++;
3442 if (i == tx_ring->count)
3443 i = 0;
3444 tx_ring->next_to_use = i;
3445
3446 return true;
3447 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003448 return false;
3449}
3450
3451#define IGB_MAX_TXD_PWR 16
3452#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3453
Alexander Duyck80785292009-10-27 15:51:47 +00003454static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003455 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003456{
3457 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003458 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003459 unsigned int len = skb_headlen(skb);
3460 unsigned int count = 0, i;
3461 unsigned int f;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003462 dma_addr_t *map;
Auke Kok9d5c8242008-01-24 02:22:38 -08003463
3464 i = tx_ring->next_to_use;
3465
Alexander Duyck80785292009-10-27 15:51:47 +00003466 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3467 dev_err(&pdev->dev, "TX DMA map failed\n");
Alexander Duyck65689fe2009-03-20 00:17:43 +00003468 return 0;
3469 }
3470
3471 map = skb_shinfo(skb)->dma_maps;
3472
Auke Kok9d5c8242008-01-24 02:22:38 -08003473 buffer_info = &tx_ring->buffer_info[i];
3474 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3475 buffer_info->length = len;
3476 /* set time_stamp *before* dma to help avoid a possible race */
3477 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003478 buffer_info->next_to_watch = i;
Eric Dumazet042a53a2009-06-05 04:04:16 +00003479 buffer_info->dma = skb_shinfo(skb)->dma_head;
Auke Kok9d5c8242008-01-24 02:22:38 -08003480
3481 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3482 struct skb_frag_struct *frag;
3483
Alexander Duyck65689fe2009-03-20 00:17:43 +00003484 i++;
3485 if (i == tx_ring->count)
3486 i = 0;
3487
Auke Kok9d5c8242008-01-24 02:22:38 -08003488 frag = &skb_shinfo(skb)->frags[f];
3489 len = frag->size;
3490
3491 buffer_info = &tx_ring->buffer_info[i];
3492 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3493 buffer_info->length = len;
3494 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003495 buffer_info->next_to_watch = i;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003496 buffer_info->dma = map[count];
Auke Kok9d5c8242008-01-24 02:22:38 -08003497 count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003498 }
3499
Auke Kok9d5c8242008-01-24 02:22:38 -08003500 tx_ring->buffer_info[i].skb = skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003501 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003502
Eric Dumazet042a53a2009-06-05 04:04:16 +00003503 return count + 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003504}
3505
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003506static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003507 int tx_flags, int count, u32 paylen,
3508 u8 hdr_len)
3509{
3510 union e1000_adv_tx_desc *tx_desc = NULL;
3511 struct igb_buffer *buffer_info;
3512 u32 olinfo_status = 0, cmd_type_len;
3513 unsigned int i;
3514
3515 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3516 E1000_ADVTXD_DCMD_DEXT);
3517
3518 if (tx_flags & IGB_TX_FLAGS_VLAN)
3519 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3520
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003521 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3522 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3523
Auke Kok9d5c8242008-01-24 02:22:38 -08003524 if (tx_flags & IGB_TX_FLAGS_TSO) {
3525 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3526
3527 /* insert tcp checksum */
3528 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3529
3530 /* insert ip checksum */
3531 if (tx_flags & IGB_TX_FLAGS_IPV4)
3532 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3533
3534 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3535 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3536 }
3537
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003538 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3539 (tx_flags & (IGB_TX_FLAGS_CSUM |
3540 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003541 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003542 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003543
3544 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3545
3546 i = tx_ring->next_to_use;
3547 while (count--) {
3548 buffer_info = &tx_ring->buffer_info[i];
3549 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3550 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3551 tx_desc->read.cmd_type_len =
3552 cpu_to_le32(cmd_type_len | buffer_info->length);
3553 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3554 i++;
3555 if (i == tx_ring->count)
3556 i = 0;
3557 }
3558
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003559 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003560 /* Force memory writes to complete before letting h/w
3561 * know there are new descriptors to fetch. (Only
3562 * applicable for weak-ordered memory model archs,
3563 * such as IA-64). */
3564 wmb();
3565
3566 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003567 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003568 /* we need this if more than one processor can write to our tail
3569 * at a time, it syncronizes IO on IA64/Altix systems */
3570 mmiowb();
3571}
3572
Alexander Duycke694e962009-10-27 15:53:06 +00003573static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003574{
Alexander Duycke694e962009-10-27 15:53:06 +00003575 struct net_device *netdev = tx_ring->netdev;
3576
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003577 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003578
Auke Kok9d5c8242008-01-24 02:22:38 -08003579 /* Herbert's original patch had:
3580 * smp_mb__after_netif_stop_queue();
3581 * but since that doesn't exist yet, just open code it. */
3582 smp_mb();
3583
3584 /* We need to check again in a case another CPU has just
3585 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003586 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003587 return -EBUSY;
3588
3589 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003590 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003591 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003592 return 0;
3593}
3594
Alexander Duycke694e962009-10-27 15:53:06 +00003595static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003596{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003597 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003598 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003599 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003600}
3601
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003602static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003603 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003604{
Alexander Duycke694e962009-10-27 15:53:06 +00003605 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003606 unsigned int first;
Auke Kok9d5c8242008-01-24 02:22:38 -08003607 unsigned int tx_flags = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003608 u8 hdr_len = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003609 int count = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003610 int tso = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003611 union skb_shared_tx *shtx;
Auke Kok9d5c8242008-01-24 02:22:38 -08003612
Auke Kok9d5c8242008-01-24 02:22:38 -08003613 if (test_bit(__IGB_DOWN, &adapter->state)) {
3614 dev_kfree_skb_any(skb);
3615 return NETDEV_TX_OK;
3616 }
3617
3618 if (skb->len <= 0) {
3619 dev_kfree_skb_any(skb);
3620 return NETDEV_TX_OK;
3621 }
3622
Auke Kok9d5c8242008-01-24 02:22:38 -08003623 /* need: 1 descriptor per page,
3624 * + 2 desc gap to keep tail from touching head,
3625 * + 1 desc for skb->data,
3626 * + 1 desc for context descriptor,
3627 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003628 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003629 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003630 return NETDEV_TX_BUSY;
3631 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003632
3633 /*
3634 * TODO: check that there currently is no other packet with
3635 * time stamping in the queue
3636 *
3637 * When doing time stamping, keep the connection to the socket
3638 * a while longer: it is still needed by skb_hwtstamp_tx(),
3639 * called either in igb_tx_hwtstamp() or by our caller when
3640 * doing software time stamping.
3641 */
3642 shtx = skb_tx(skb);
3643 if (unlikely(shtx->hardware)) {
3644 shtx->in_progress = 1;
3645 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003646 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003647
3648 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3649 tx_flags |= IGB_TX_FLAGS_VLAN;
3650 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3651 }
3652
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003653 if (skb->protocol == htons(ETH_P_IP))
3654 tx_flags |= IGB_TX_FLAGS_IPV4;
3655
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003656 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003657 if (skb_is_gso(skb)) {
3658 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3659 if (tso < 0) {
3660 dev_kfree_skb_any(skb);
3661 return NETDEV_TX_OK;
3662 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003663 }
3664
3665 if (tso)
3666 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003667 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003668 (skb->ip_summed == CHECKSUM_PARTIAL))
3669 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003670
Alexander Duyck65689fe2009-03-20 00:17:43 +00003671 /*
3672 * count reflects descriptors mapped, if 0 then mapping error
3673 * has occured and we need to rewind the descriptor queue
3674 */
Alexander Duyck80785292009-10-27 15:51:47 +00003675 count = igb_tx_map_adv(tx_ring, skb, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08003676
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003677 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003678 dev_kfree_skb_any(skb);
3679 tx_ring->buffer_info[first].time_stamp = 0;
3680 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003681 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003682 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003683
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003684 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3685
3686 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003687 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003688
Auke Kok9d5c8242008-01-24 02:22:38 -08003689 return NETDEV_TX_OK;
3690}
3691
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003692static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3693 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003694{
3695 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003696 struct igb_ring *tx_ring;
3697
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003698 int r_idx = 0;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003699 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003700 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003701
3702 /* This goes back to the question of how to logically map a tx queue
3703 * to a flow. Right now, performance is impacted slightly negatively
3704 * if using multiple tx queues. If the stack breaks away from a
3705 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003706 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003707}
3708
3709/**
3710 * igb_tx_timeout - Respond to a Tx Hang
3711 * @netdev: network interface device structure
3712 **/
3713static void igb_tx_timeout(struct net_device *netdev)
3714{
3715 struct igb_adapter *adapter = netdev_priv(netdev);
3716 struct e1000_hw *hw = &adapter->hw;
3717
3718 /* Do the reset outside of interrupt context */
3719 adapter->tx_timeout_count++;
3720 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003721 wr32(E1000_EICS,
3722 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003723}
3724
3725static void igb_reset_task(struct work_struct *work)
3726{
3727 struct igb_adapter *adapter;
3728 adapter = container_of(work, struct igb_adapter, reset_task);
3729
3730 igb_reinit_locked(adapter);
3731}
3732
3733/**
3734 * igb_get_stats - Get System Network Statistics
3735 * @netdev: network interface device structure
3736 *
3737 * Returns the address of the device statistics structure.
3738 * The statistics are actually updated from the timer callback.
3739 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003740static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003741{
Auke Kok9d5c8242008-01-24 02:22:38 -08003742 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003743 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003744}
3745
3746/**
3747 * igb_change_mtu - Change the Maximum Transfer Unit
3748 * @netdev: network interface device structure
3749 * @new_mtu: new value for maximum frame size
3750 *
3751 * Returns 0 on success, negative on failure
3752 **/
3753static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3754{
3755 struct igb_adapter *adapter = netdev_priv(netdev);
3756 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003757 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003758
3759 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3760 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3761 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3762 return -EINVAL;
3763 }
3764
Auke Kok9d5c8242008-01-24 02:22:38 -08003765 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3766 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3767 return -EINVAL;
3768 }
3769
3770 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3771 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003772
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 /* igb_down has a dependency on max_frame_size */
3774 adapter->max_frame_size = max_frame;
Auke Kok9d5c8242008-01-24 02:22:38 -08003775 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3776 * means we reserve 2 more, this pushes us to allocate from the next
3777 * larger slab size.
3778 * i.e. RXBUFFER_2048 --> size-4096 slab
3779 */
3780
Alexander Duyck7d95b712009-10-27 15:50:08 +00003781 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003782 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003783 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003784 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003785 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003786 rx_buffer_len = IGB_RXBUFFER_128;
3787
3788 if (netif_running(netdev))
3789 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003790
3791 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3792 netdev->mtu, new_mtu);
3793 netdev->mtu = new_mtu;
3794
Alexander Duyck4c844852009-10-27 15:52:07 +00003795 for (i = 0; i < adapter->num_rx_queues; i++)
3796 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3797
Auke Kok9d5c8242008-01-24 02:22:38 -08003798 if (netif_running(netdev))
3799 igb_up(adapter);
3800 else
3801 igb_reset(adapter);
3802
3803 clear_bit(__IGB_RESETTING, &adapter->state);
3804
3805 return 0;
3806}
3807
3808/**
3809 * igb_update_stats - Update the board statistics counters
3810 * @adapter: board private structure
3811 **/
3812
3813void igb_update_stats(struct igb_adapter *adapter)
3814{
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003815 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003816 struct e1000_hw *hw = &adapter->hw;
3817 struct pci_dev *pdev = adapter->pdev;
3818 u16 phy_tmp;
3819
3820#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3821
3822 /*
3823 * Prevent stats update while adapter is being reset, or if the pci
3824 * connection is down.
3825 */
3826 if (adapter->link_speed == 0)
3827 return;
3828 if (pci_channel_offline(pdev))
3829 return;
3830
3831 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3832 adapter->stats.gprc += rd32(E1000_GPRC);
3833 adapter->stats.gorc += rd32(E1000_GORCL);
3834 rd32(E1000_GORCH); /* clear GORCL */
3835 adapter->stats.bprc += rd32(E1000_BPRC);
3836 adapter->stats.mprc += rd32(E1000_MPRC);
3837 adapter->stats.roc += rd32(E1000_ROC);
3838
3839 adapter->stats.prc64 += rd32(E1000_PRC64);
3840 adapter->stats.prc127 += rd32(E1000_PRC127);
3841 adapter->stats.prc255 += rd32(E1000_PRC255);
3842 adapter->stats.prc511 += rd32(E1000_PRC511);
3843 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3844 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3845 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3846 adapter->stats.sec += rd32(E1000_SEC);
3847
3848 adapter->stats.mpc += rd32(E1000_MPC);
3849 adapter->stats.scc += rd32(E1000_SCC);
3850 adapter->stats.ecol += rd32(E1000_ECOL);
3851 adapter->stats.mcc += rd32(E1000_MCC);
3852 adapter->stats.latecol += rd32(E1000_LATECOL);
3853 adapter->stats.dc += rd32(E1000_DC);
3854 adapter->stats.rlec += rd32(E1000_RLEC);
3855 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3856 adapter->stats.xontxc += rd32(E1000_XONTXC);
3857 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3858 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3859 adapter->stats.fcruc += rd32(E1000_FCRUC);
3860 adapter->stats.gptc += rd32(E1000_GPTC);
3861 adapter->stats.gotc += rd32(E1000_GOTCL);
3862 rd32(E1000_GOTCH); /* clear GOTCL */
3863 adapter->stats.rnbc += rd32(E1000_RNBC);
3864 adapter->stats.ruc += rd32(E1000_RUC);
3865 adapter->stats.rfc += rd32(E1000_RFC);
3866 adapter->stats.rjc += rd32(E1000_RJC);
3867 adapter->stats.tor += rd32(E1000_TORH);
3868 adapter->stats.tot += rd32(E1000_TOTH);
3869 adapter->stats.tpr += rd32(E1000_TPR);
3870
3871 adapter->stats.ptc64 += rd32(E1000_PTC64);
3872 adapter->stats.ptc127 += rd32(E1000_PTC127);
3873 adapter->stats.ptc255 += rd32(E1000_PTC255);
3874 adapter->stats.ptc511 += rd32(E1000_PTC511);
3875 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3876 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3877
3878 adapter->stats.mptc += rd32(E1000_MPTC);
3879 adapter->stats.bptc += rd32(E1000_BPTC);
3880
3881 /* used for adaptive IFS */
3882
3883 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3884 adapter->stats.tpt += hw->mac.tx_packet_delta;
3885 hw->mac.collision_delta = rd32(E1000_COLC);
3886 adapter->stats.colc += hw->mac.collision_delta;
3887
3888 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3889 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3890 adapter->stats.tncrs += rd32(E1000_TNCRS);
3891 adapter->stats.tsctc += rd32(E1000_TSCTC);
3892 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3893
3894 adapter->stats.iac += rd32(E1000_IAC);
3895 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3896 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3897 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3898 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3899 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3900 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3901 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3902 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3903
3904 /* Fill out the OS statistics structure */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003905 netdev->stats.multicast = adapter->stats.mprc;
3906 netdev->stats.collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003907
3908 /* Rx Errors */
3909
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003910 if (hw->mac.type != e1000_82575) {
3911 u32 rqdpc_tmp;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003912 u64 rqdpc_total = 0;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003913 int i;
3914 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3915 * Queue Drop Packet Count) stats only gets incremented, if
3916 * the DROP_EN but it set (in the SRRCTL register for that
3917 * queue). If DROP_EN bit is NOT set, then the some what
3918 * equivalent count is stored in RNBC (not per queue basis).
3919 * Also note the drop count is due to lack of available
3920 * descriptors.
3921 */
3922 for (i = 0; i < adapter->num_rx_queues; i++) {
3923 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3924 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003925 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003926 }
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003927 netdev->stats.rx_fifo_errors = rqdpc_total;
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003928 }
3929
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003930 /* Note RNBC (Receive No Buffers Count) is an not an exact
3931 * drop count as the hardware FIFO might save the day. Thats
3932 * one of the reason for saving it in rx_fifo_errors, as its
3933 * potentially not a true drop.
3934 */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003935 netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
Jesper Dangaard Brouer3ea73af2009-05-26 13:50:48 +00003936
Auke Kok9d5c8242008-01-24 02:22:38 -08003937 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00003938 * our own version based on RUC and ROC */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003939 netdev->stats.rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003940 adapter->stats.crcerrs + adapter->stats.algnerrc +
3941 adapter->stats.ruc + adapter->stats.roc +
3942 adapter->stats.cexterr;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003943 netdev->stats.rx_length_errors = adapter->stats.ruc +
Auke Kok9d5c8242008-01-24 02:22:38 -08003944 adapter->stats.roc;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003945 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3946 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3947 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003948
3949 /* Tx Errors */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003950 netdev->stats.tx_errors = adapter->stats.ecol +
Auke Kok9d5c8242008-01-24 02:22:38 -08003951 adapter->stats.latecol;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003952 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3953 netdev->stats.tx_window_errors = adapter->stats.latecol;
3954 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08003955
3956 /* Tx Dropped needs to be maintained elsewhere */
3957
3958 /* Phy Stats */
3959 if (hw->phy.media_type == e1000_media_type_copper) {
3960 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003961 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003962 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3963 adapter->phy_stats.idle_errors += phy_tmp;
3964 }
3965 }
3966
3967 /* Management Stats */
3968 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3969 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3970 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3971}
3972
Auke Kok9d5c8242008-01-24 02:22:38 -08003973static irqreturn_t igb_msix_other(int irq, void *data)
3974{
Alexander Duyck047e0032009-10-27 15:49:27 +00003975 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08003976 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003977 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003978 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00003979
Alexander Duyck047e0032009-10-27 15:49:27 +00003980 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00003981 /* HW is reporting DMA is out of sync */
3982 adapter->stats.doosync++;
3983 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00003984
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003985 /* Check for a mailbox event */
3986 if (icr & E1000_ICR_VMMB)
3987 igb_msg_task(adapter);
3988
3989 if (icr & E1000_ICR_LSC) {
3990 hw->mac.get_link_status = 1;
3991 /* guard against interrupt when we're going down */
3992 if (!test_bit(__IGB_DOWN, &adapter->state))
3993 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3994 }
3995
3996 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003997 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08003998
3999 return IRQ_HANDLED;
4000}
4001
Alexander Duyck047e0032009-10-27 15:49:27 +00004002static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004003{
Alexander Duyck047e0032009-10-27 15:49:27 +00004004 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004005
Alexander Duyck047e0032009-10-27 15:49:27 +00004006 if (!q_vector->set_itr)
4007 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004008
Alexander Duyck047e0032009-10-27 15:49:27 +00004009 if (!itr_val)
4010 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004011
Alexander Duyck047e0032009-10-27 15:49:27 +00004012 if (q_vector->itr_shift)
4013 itr_val |= itr_val << q_vector->itr_shift;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004014 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004015 itr_val |= 0x8000000;
4016
4017 writel(itr_val, q_vector->itr_register);
4018 q_vector->set_itr = 0;
4019}
4020
4021static irqreturn_t igb_msix_ring(int irq, void *data)
4022{
4023 struct igb_q_vector *q_vector = data;
4024
4025 /* Write the ITR value calculated from the previous interrupt. */
4026 igb_write_itr(q_vector);
4027
4028 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004029
Auke Kok9d5c8242008-01-24 02:22:38 -08004030 return IRQ_HANDLED;
4031}
4032
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004033#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004034static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004035{
Alexander Duyck047e0032009-10-27 15:49:27 +00004036 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004037 struct e1000_hw *hw = &adapter->hw;
4038 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004039
Alexander Duyck047e0032009-10-27 15:49:27 +00004040 if (q_vector->cpu == cpu)
4041 goto out_no_update;
4042
4043 if (q_vector->tx_ring) {
4044 int q = q_vector->tx_ring->reg_idx;
4045 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4046 if (hw->mac.type == e1000_82575) {
4047 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4048 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4049 } else {
4050 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4051 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4052 E1000_DCA_TXCTRL_CPUID_SHIFT;
4053 }
4054 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4055 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4056 }
4057 if (q_vector->rx_ring) {
4058 int q = q_vector->rx_ring->reg_idx;
4059 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4060 if (hw->mac.type == e1000_82575) {
4061 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4062 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4063 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004064 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004065 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004066 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004067 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004068 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4069 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4070 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4071 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004072 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004073 q_vector->cpu = cpu;
4074out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004075 put_cpu();
4076}
4077
4078static void igb_setup_dca(struct igb_adapter *adapter)
4079{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004080 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004081 int i;
4082
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004083 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004084 return;
4085
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004086 /* Always use CB2 mode, difference is masked in the CB driver. */
4087 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4088
Alexander Duyck047e0032009-10-27 15:49:27 +00004089 for (i = 0; i < adapter->num_q_vectors; i++) {
4090 struct igb_q_vector *q_vector = adapter->q_vector[i];
4091 q_vector->cpu = -1;
4092 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004093 }
4094}
4095
4096static int __igb_notify_dca(struct device *dev, void *data)
4097{
4098 struct net_device *netdev = dev_get_drvdata(dev);
4099 struct igb_adapter *adapter = netdev_priv(netdev);
4100 struct e1000_hw *hw = &adapter->hw;
4101 unsigned long event = *(unsigned long *)data;
4102
4103 switch (event) {
4104 case DCA_PROVIDER_ADD:
4105 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004106 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004107 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004108 /* Always use CB2 mode, difference is masked
4109 * in the CB driver. */
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004110 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004111 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004112 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004113 dev_info(&adapter->pdev->dev, "DCA enabled\n");
4114 igb_setup_dca(adapter);
4115 break;
4116 }
4117 /* Fall Through since DCA is disabled. */
4118 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004119 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004120 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004121 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004122 dca_remove_requester(dev);
4123 dev_info(&adapter->pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004124 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004125 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004126 }
4127 break;
4128 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004129
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004130 return 0;
4131}
4132
4133static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4134 void *p)
4135{
4136 int ret_val;
4137
4138 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4139 __igb_notify_dca);
4140
4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4142}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004143#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004144
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004145static void igb_ping_all_vfs(struct igb_adapter *adapter)
4146{
4147 struct e1000_hw *hw = &adapter->hw;
4148 u32 ping;
4149 int i;
4150
4151 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4152 ping = E1000_PF_CONTROL_MSG;
4153 if (adapter->vf_data[i].clear_to_send)
4154 ping |= E1000_VT_MSGTYPE_CTS;
4155 igb_write_mbx(hw, &ping, 1, i);
4156 }
4157}
4158
4159static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4160 u32 *msgbuf, u32 vf)
4161{
4162 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4163 u16 *hash_list = (u16 *)&msgbuf[1];
4164 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4165 int i;
4166
4167 /* only up to 30 hash values supported */
4168 if (n > 30)
4169 n = 30;
4170
4171 /* salt away the number of multi cast addresses assigned
4172 * to this VF for later use to restore when the PF multi cast
4173 * list changes
4174 */
4175 vf_data->num_vf_mc_hashes = n;
4176
4177 /* VFs are limited to using the MTA hash table for their multicast
4178 * addresses */
4179 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004180 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004181
4182 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004183 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004184
4185 return 0;
4186}
4187
4188static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4189{
4190 struct e1000_hw *hw = &adapter->hw;
4191 struct vf_data_storage *vf_data;
4192 int i, j;
4193
4194 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4195 vf_data = &adapter->vf_data[i];
Alexander Duyck75f4f382009-03-13 20:41:55 +00004196 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004197 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4198 }
4199}
4200
4201static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4202{
4203 struct e1000_hw *hw = &adapter->hw;
4204 u32 pool_mask, reg, vid;
4205 int i;
4206
4207 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4208
4209 /* Find the vlan filter for this id */
4210 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4211 reg = rd32(E1000_VLVF(i));
4212
4213 /* remove the vf from the pool */
4214 reg &= ~pool_mask;
4215
4216 /* if pool is empty then remove entry from vfta */
4217 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4218 (reg & E1000_VLVF_VLANID_ENABLE)) {
4219 reg = 0;
4220 vid = reg & E1000_VLVF_VLANID_MASK;
4221 igb_vfta_set(hw, vid, false);
4222 }
4223
4224 wr32(E1000_VLVF(i), reg);
4225 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004226
4227 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004228}
4229
4230static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4231{
4232 struct e1000_hw *hw = &adapter->hw;
4233 u32 reg, i;
4234
4235 /* It is an error to call this function when VFs are not enabled */
4236 if (!adapter->vfs_allocated_count)
4237 return -1;
4238
4239 /* Find the vlan filter for this id */
4240 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4241 reg = rd32(E1000_VLVF(i));
4242 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4243 vid == (reg & E1000_VLVF_VLANID_MASK))
4244 break;
4245 }
4246
4247 if (add) {
4248 if (i == E1000_VLVF_ARRAY_SIZE) {
4249 /* Did not find a matching VLAN ID entry that was
4250 * enabled. Search for a free filter entry, i.e.
4251 * one without the enable bit set
4252 */
4253 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4254 reg = rd32(E1000_VLVF(i));
4255 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4256 break;
4257 }
4258 }
4259 if (i < E1000_VLVF_ARRAY_SIZE) {
4260 /* Found an enabled/available entry */
4261 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4262
4263 /* if !enabled we need to set this up in vfta */
4264 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyckcad6d052009-03-13 20:41:37 +00004265 /* add VID to filter table, if bit already set
4266 * PF must have added it outside of table */
4267 if (igb_vfta_set(hw, vid, true))
4268 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4269 adapter->vfs_allocated_count);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004270 reg |= E1000_VLVF_VLANID_ENABLE;
4271 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004272 reg &= ~E1000_VLVF_VLANID_MASK;
4273 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004274
4275 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004276
4277 /* do not modify RLPML for PF devices */
4278 if (vf >= adapter->vfs_allocated_count)
4279 return 0;
4280
4281 if (!adapter->vf_data[vf].vlans_enabled) {
4282 u32 size;
4283 reg = rd32(E1000_VMOLR(vf));
4284 size = reg & E1000_VMOLR_RLPML_MASK;
4285 size += 4;
4286 reg &= ~E1000_VMOLR_RLPML_MASK;
4287 reg |= size;
4288 wr32(E1000_VMOLR(vf), reg);
4289 }
4290 adapter->vf_data[vf].vlans_enabled++;
4291
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004292 return 0;
4293 }
4294 } else {
4295 if (i < E1000_VLVF_ARRAY_SIZE) {
4296 /* remove vf from the pool */
4297 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4298 /* if pool is empty then remove entry from vfta */
4299 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4300 reg = 0;
4301 igb_vfta_set(hw, vid, false);
4302 }
4303 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004304
4305 /* do not modify RLPML for PF devices */
4306 if (vf >= adapter->vfs_allocated_count)
4307 return 0;
4308
4309 adapter->vf_data[vf].vlans_enabled--;
4310 if (!adapter->vf_data[vf].vlans_enabled) {
4311 u32 size;
4312 reg = rd32(E1000_VMOLR(vf));
4313 size = reg & E1000_VMOLR_RLPML_MASK;
4314 size -= 4;
4315 reg &= ~E1000_VMOLR_RLPML_MASK;
4316 reg |= size;
4317 wr32(E1000_VMOLR(vf), reg);
4318 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004319 return 0;
4320 }
4321 }
4322 return -1;
4323}
4324
4325static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4326{
4327 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4328 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4329
4330 return igb_vlvf_set(adapter, vid, add, vf);
4331}
4332
4333static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4334{
4335 struct e1000_hw *hw = &adapter->hw;
4336
4337 /* disable mailbox functionality for vf */
4338 adapter->vf_data[vf].clear_to_send = false;
4339
4340 /* reset offloads to defaults */
4341 igb_set_vmolr(hw, vf);
4342
4343 /* reset vlans for device */
4344 igb_clear_vf_vfta(adapter, vf);
4345
4346 /* reset multicast table array for vf */
4347 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4348
4349 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004350 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004351}
4352
4353static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4354{
4355 struct e1000_hw *hw = &adapter->hw;
4356 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004357 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004358 u32 reg, msgbuf[3];
4359 u8 *addr = (u8 *)(&msgbuf[1]);
4360
4361 /* process all the same items cleared in a function level reset */
4362 igb_vf_reset_event(adapter, vf);
4363
4364 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004365 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004366
4367 /* enable transmit and receive for vf */
4368 reg = rd32(E1000_VFTE);
4369 wr32(E1000_VFTE, reg | (1 << vf));
4370 reg = rd32(E1000_VFRE);
4371 wr32(E1000_VFRE, reg | (1 << vf));
4372
4373 /* enable mailbox functionality for vf */
4374 adapter->vf_data[vf].clear_to_send = true;
4375
4376 /* reply to reset with ack and vf mac address */
4377 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4378 memcpy(addr, vf_mac, 6);
4379 igb_write_mbx(hw, msgbuf, 3, vf);
4380}
4381
4382static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4383{
4384 unsigned char *addr = (char *)&msg[1];
4385 int err = -1;
4386
4387 if (is_valid_ether_addr(addr))
4388 err = igb_set_vf_mac(adapter, vf, addr);
4389
4390 return err;
4391
4392}
4393
4394static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4395{
4396 struct e1000_hw *hw = &adapter->hw;
4397 u32 msg = E1000_VT_MSGTYPE_NACK;
4398
4399 /* if device isn't clear to send it shouldn't be reading either */
4400 if (!adapter->vf_data[vf].clear_to_send)
4401 igb_write_mbx(hw, &msg, 1, vf);
4402}
4403
4404
4405static void igb_msg_task(struct igb_adapter *adapter)
4406{
4407 struct e1000_hw *hw = &adapter->hw;
4408 u32 vf;
4409
4410 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4411 /* process any reset requests */
4412 if (!igb_check_for_rst(hw, vf)) {
4413 adapter->vf_data[vf].clear_to_send = false;
4414 igb_vf_reset_event(adapter, vf);
4415 }
4416
4417 /* process any messages pending */
4418 if (!igb_check_for_msg(hw, vf))
4419 igb_rcv_msg_from_vf(adapter, vf);
4420
4421 /* process any acks */
4422 if (!igb_check_for_ack(hw, vf))
4423 igb_rcv_ack_from_vf(adapter, vf);
4424
4425 }
4426}
4427
4428static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4429{
4430 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4431 u32 msgbuf[mbx_size];
4432 struct e1000_hw *hw = &adapter->hw;
4433 s32 retval;
4434
4435 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4436
4437 if (retval)
4438 dev_err(&adapter->pdev->dev,
4439 "Error receiving message from VF\n");
4440
4441 /* this is a message we already processed, do nothing */
4442 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4443 return retval;
4444
4445 /*
4446 * until the vf completes a reset it should not be
4447 * allowed to start any configuration.
4448 */
4449
4450 if (msgbuf[0] == E1000_VF_RESET) {
4451 igb_vf_reset_msg(adapter, vf);
4452
4453 return retval;
4454 }
4455
4456 if (!adapter->vf_data[vf].clear_to_send) {
4457 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4458 igb_write_mbx(hw, msgbuf, 1, vf);
4459 return retval;
4460 }
4461
4462 switch ((msgbuf[0] & 0xFFFF)) {
4463 case E1000_VF_SET_MAC_ADDR:
4464 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4465 break;
4466 case E1000_VF_SET_MULTICAST:
4467 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4468 break;
4469 case E1000_VF_SET_LPE:
4470 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4471 break;
4472 case E1000_VF_SET_VLAN:
4473 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4474 break;
4475 default:
4476 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4477 retval = -1;
4478 break;
4479 }
4480
4481 /* notify the VF of the results of what it sent us */
4482 if (retval)
4483 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4484 else
4485 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4486
4487 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4488
4489 igb_write_mbx(hw, msgbuf, 1, vf);
4490
4491 return retval;
4492}
4493
Auke Kok9d5c8242008-01-24 02:22:38 -08004494/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004495 * igb_set_uta - Set unicast filter table address
4496 * @adapter: board private structure
4497 *
4498 * The unicast table address is a register array of 32-bit registers.
4499 * The table is meant to be used in a way similar to how the MTA is used
4500 * however due to certain limitations in the hardware it is necessary to
4501 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4502 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4503 **/
4504static void igb_set_uta(struct igb_adapter *adapter)
4505{
4506 struct e1000_hw *hw = &adapter->hw;
4507 int i;
4508
4509 /* The UTA table only exists on 82576 hardware and newer */
4510 if (hw->mac.type < e1000_82576)
4511 return;
4512
4513 /* we only need to do this if VMDq is enabled */
4514 if (!adapter->vfs_allocated_count)
4515 return;
4516
4517 for (i = 0; i < hw->mac.uta_reg_count; i++)
4518 array_wr32(E1000_UTA, i, ~0);
4519}
4520
4521/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004522 * igb_intr_msi - Interrupt Handler
4523 * @irq: interrupt number
4524 * @data: pointer to a network interface device structure
4525 **/
4526static irqreturn_t igb_intr_msi(int irq, void *data)
4527{
Alexander Duyck047e0032009-10-27 15:49:27 +00004528 struct igb_adapter *adapter = data;
4529 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004530 struct e1000_hw *hw = &adapter->hw;
4531 /* read ICR disables interrupts using IAM */
4532 u32 icr = rd32(E1000_ICR);
4533
Alexander Duyck047e0032009-10-27 15:49:27 +00004534 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004535
Alexander Duyck047e0032009-10-27 15:49:27 +00004536 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004537 /* HW is reporting DMA is out of sync */
4538 adapter->stats.doosync++;
4539 }
4540
Auke Kok9d5c8242008-01-24 02:22:38 -08004541 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4542 hw->mac.get_link_status = 1;
4543 if (!test_bit(__IGB_DOWN, &adapter->state))
4544 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4545 }
4546
Alexander Duyck047e0032009-10-27 15:49:27 +00004547 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004548
4549 return IRQ_HANDLED;
4550}
4551
4552/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004553 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004554 * @irq: interrupt number
4555 * @data: pointer to a network interface device structure
4556 **/
4557static irqreturn_t igb_intr(int irq, void *data)
4558{
Alexander Duyck047e0032009-10-27 15:49:27 +00004559 struct igb_adapter *adapter = data;
4560 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004561 struct e1000_hw *hw = &adapter->hw;
4562 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4563 * need for the IMC write */
4564 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004565 if (!icr)
4566 return IRQ_NONE; /* Not our interrupt */
4567
Alexander Duyck047e0032009-10-27 15:49:27 +00004568 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004569
4570 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4571 * not set, then the adapter didn't send an interrupt */
4572 if (!(icr & E1000_ICR_INT_ASSERTED))
4573 return IRQ_NONE;
4574
Alexander Duyck047e0032009-10-27 15:49:27 +00004575 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004576 /* HW is reporting DMA is out of sync */
4577 adapter->stats.doosync++;
4578 }
4579
Auke Kok9d5c8242008-01-24 02:22:38 -08004580 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4581 hw->mac.get_link_status = 1;
4582 /* guard against interrupt when we're going down */
4583 if (!test_bit(__IGB_DOWN, &adapter->state))
4584 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4585 }
4586
Alexander Duyck047e0032009-10-27 15:49:27 +00004587 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004588
4589 return IRQ_HANDLED;
4590}
4591
Alexander Duyck047e0032009-10-27 15:49:27 +00004592static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004593{
Alexander Duyck047e0032009-10-27 15:49:27 +00004594 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004595 struct e1000_hw *hw = &adapter->hw;
4596
4597 if (adapter->itr_setting & 3) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004598 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004599 igb_set_itr(adapter);
4600 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004601 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004602 }
4603
4604 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4605 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004606 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004607 else
4608 igb_irq_enable(adapter);
4609 }
4610}
4611
Auke Kok9d5c8242008-01-24 02:22:38 -08004612/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004613 * igb_poll - NAPI Rx polling callback
4614 * @napi: napi polling structure
4615 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004616 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004617static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004618{
Alexander Duyck047e0032009-10-27 15:49:27 +00004619 struct igb_q_vector *q_vector = container_of(napi,
4620 struct igb_q_vector,
4621 napi);
4622 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004623
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004624#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004625 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4626 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004627#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004628 if (q_vector->tx_ring)
4629 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004630
Alexander Duyck047e0032009-10-27 15:49:27 +00004631 if (q_vector->rx_ring)
4632 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4633
4634 if (!tx_clean_complete)
4635 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004636
Alexander Duyck46544252009-02-19 20:39:04 -08004637 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004638 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004639 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004640 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004641 }
4642
4643 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004644}
Al Viro6d8126f2008-03-16 22:23:24 +00004645
Auke Kok9d5c8242008-01-24 02:22:38 -08004646/**
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004647 * igb_hwtstamp - utility function which checks for TX time stamp
4648 * @adapter: board private structure
4649 * @skb: packet that was just sent
4650 *
4651 * If we were asked to do hardware stamping and such a time stamp is
4652 * available, then it must have been for this skb here because we only
4653 * allow only one such packet into the queue.
4654 */
4655static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4656{
4657 union skb_shared_tx *shtx = skb_tx(skb);
4658 struct e1000_hw *hw = &adapter->hw;
4659
4660 if (unlikely(shtx->hardware)) {
4661 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4662 if (valid) {
4663 u64 regval = rd32(E1000_TXSTMPL);
4664 u64 ns;
4665 struct skb_shared_hwtstamps shhwtstamps;
4666
4667 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4668 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4669 ns = timecounter_cyc2time(&adapter->clock,
4670 regval);
4671 timecompare_update(&adapter->compare, ns);
4672 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4673 shhwtstamps.syststamp =
4674 timecompare_transform(&adapter->compare, ns);
4675 skb_tstamp_tx(skb, &shhwtstamps);
4676 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004677 }
4678}
4679
4680/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004681 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004682 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004683 * returns true if ring is completely cleaned
4684 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004685static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004686{
Alexander Duyck047e0032009-10-27 15:49:27 +00004687 struct igb_adapter *adapter = q_vector->adapter;
4688 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004689 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004690 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004691 struct igb_buffer *buffer_info;
4692 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004693 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004694 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004695 unsigned int i, eop, count = 0;
4696 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08004697
Auke Kok9d5c8242008-01-24 02:22:38 -08004698 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004699 eop = tx_ring->buffer_info[i].next_to_watch;
4700 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4701
4702 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4703 (count < tx_ring->count)) {
4704 for (cleaned = false; !cleaned; count++) {
4705 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004706 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004707 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08004708 skb = buffer_info->skb;
4709
4710 if (skb) {
4711 unsigned int segs, bytecount;
4712 /* gso_segs is currently only valid for tcp */
4713 segs = skb_shinfo(skb)->gso_segs ?: 1;
4714 /* multiply data chunks by size of headers */
4715 bytecount = ((segs - 1) * skb_headlen(skb)) +
4716 skb->len;
4717 total_packets += segs;
4718 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004719
4720 igb_tx_hwtstamp(adapter, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004721 }
4722
Alexander Duyck80785292009-10-27 15:51:47 +00004723 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004724 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004725
4726 i++;
4727 if (i == tx_ring->count)
4728 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004729 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004730 eop = tx_ring->buffer_info[i].next_to_watch;
4731 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4732 }
4733
Auke Kok9d5c8242008-01-24 02:22:38 -08004734 tx_ring->next_to_clean = i;
4735
Alexander Duyckfc7d3452008-08-26 04:25:08 -07004736 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08004737 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00004738 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004739 /* Make sure that anybody stopping the queue after this
4740 * sees the new next_to_clean.
4741 */
4742 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004743 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4744 !(test_bit(__IGB_DOWN, &adapter->state))) {
4745 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004746 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004747 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004748 }
4749
4750 if (tx_ring->detect_tx_hung) {
4751 /* Detect a transmit hang in hardware, this serializes the
4752 * check with the clearing of time_stamp and movement of i */
4753 tx_ring->detect_tx_hung = false;
4754 if (tx_ring->buffer_info[i].time_stamp &&
4755 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4756 (adapter->tx_timeout_factor * HZ))
4757 && !(rd32(E1000_STATUS) &
4758 E1000_STATUS_TXOFF)) {
4759
Auke Kok9d5c8242008-01-24 02:22:38 -08004760 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00004761 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08004762 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07004763 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004764 " TDH <%x>\n"
4765 " TDT <%x>\n"
4766 " next_to_use <%x>\n"
4767 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004768 "buffer_info[next_to_clean]\n"
4769 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004770 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08004771 " jiffies <%lx>\n"
4772 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07004773 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00004774 readl(tx_ring->head),
4775 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08004776 tx_ring->next_to_use,
4777 tx_ring->next_to_clean,
Auke Kok9d5c8242008-01-24 02:22:38 -08004778 tx_ring->buffer_info[i].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004779 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08004780 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004781 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004782 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08004783 }
4784 }
4785 tx_ring->total_bytes += total_bytes;
4786 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07004787 tx_ring->tx_stats.bytes += total_bytes;
4788 tx_ring->tx_stats.packets += total_packets;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00004789 netdev->stats.tx_bytes += total_bytes;
4790 netdev->stats.tx_packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004791 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08004792}
4793
Auke Kok9d5c8242008-01-24 02:22:38 -08004794/**
4795 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00004796 * @q_vector: structure containing interrupt and ring information
4797 * @skb: packet to send up
4798 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08004799 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004800static void igb_receive_skb(struct igb_q_vector *q_vector,
4801 struct sk_buff *skb,
4802 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08004803{
Alexander Duyck047e0032009-10-27 15:49:27 +00004804 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07004805
Alexander Duyck047e0032009-10-27 15:49:27 +00004806 if (vlan_tag)
4807 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4808 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00004809 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004810 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004811}
4812
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004813static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08004814 u32 status_err, struct sk_buff *skb)
4815{
4816 skb->ip_summed = CHECKSUM_NONE;
4817
4818 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004819 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4820 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08004821 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004822
Auke Kok9d5c8242008-01-24 02:22:38 -08004823 /* TCP/UDP checksum error bit is set */
4824 if (status_err &
4825 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004826 /*
4827 * work around errata with sctp packets where the TCPE aka
4828 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4829 * packets, (aka let the stack check the crc32c)
4830 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004831 if ((skb->len == 60) &&
4832 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00004833 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004834
Auke Kok9d5c8242008-01-24 02:22:38 -08004835 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08004836 return;
4837 }
4838 /* It must be a TCP or UDP packet with a valid checksum */
4839 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4840 skb->ip_summed = CHECKSUM_UNNECESSARY;
4841
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004842 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08004843}
4844
Alexander Duyck4c844852009-10-27 15:52:07 +00004845static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004846 union e1000_adv_rx_desc *rx_desc)
4847{
4848 /* HW will not DMA in data larger than the given buffer, even if it
4849 * parses the (NFS, of course) header to be larger. In that case, it
4850 * fills the header buffer and spills the rest into the page.
4851 */
4852 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4853 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00004854 if (hlen > rx_ring->rx_buffer_len)
4855 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004856 return hlen;
4857}
4858
Alexander Duyck047e0032009-10-27 15:49:27 +00004859static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4860 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004861{
Alexander Duyck047e0032009-10-27 15:49:27 +00004862 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004863 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004864 struct net_device *netdev = rx_ring->netdev;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004865 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck80785292009-10-27 15:51:47 +00004866 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004867 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4868 struct igb_buffer *buffer_info , *next_buffer;
4869 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08004870 bool cleaned = false;
4871 int cleaned_count = 0;
4872 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004873 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00004874 u32 staterr;
4875 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00004876 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08004877
4878 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004879 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004880 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4881 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4882
4883 while (staterr & E1000_RXD_STAT_DD) {
4884 if (*work_done >= budget)
4885 break;
4886 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004887
4888 skb = buffer_info->skb;
4889 prefetch(skb->data - NET_IP_ALIGN);
4890 buffer_info->skb = NULL;
4891
4892 i++;
4893 if (i == rx_ring->count)
4894 i = 0;
4895 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4896 prefetch(next_rxd);
4897 next_buffer = &rx_ring->buffer_info[i];
4898
4899 length = le16_to_cpu(rx_desc->wb.upper.length);
4900 cleaned = true;
4901 cleaned_count++;
4902
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004903 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004904 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00004905 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004906 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00004907 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00004908 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004909 skb_put(skb, length);
4910 goto send_up;
4911 }
Alexander Duyck4c844852009-10-27 15:52:07 +00004912 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004913 }
4914
4915 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004916 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004917 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004918 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004919
4920 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4921 buffer_info->page,
4922 buffer_info->page_offset,
4923 length);
4924
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004925 if (page_count(buffer_info->page) != 1)
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004926 buffer_info->page = NULL;
4927 else
4928 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08004929
4930 skb->len += length;
4931 skb->data_len += length;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004932
Auke Kok9d5c8242008-01-24 02:22:38 -08004933 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08004934 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004935
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004936 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08004937 buffer_info->skb = next_buffer->skb;
4938 buffer_info->dma = next_buffer->dma;
4939 next_buffer->skb = skb;
4940 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07004941 goto next_desc;
4942 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00004943send_up:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004944 /*
4945 * If this bit is set, then the RX registers contain
4946 * the time stamp. No other packet will be time
4947 * stamped until we read these registers, so read the
4948 * registers to make them available again. Because
4949 * only one packet can be time stamped at a time, we
4950 * know that the register values must belong to this
4951 * one here and therefore we don't need to compare
4952 * any of the additional attributes stored for it.
4953 *
4954 * If nothing went wrong, then it should have a
4955 * skb_shared_tx that we can turn into a
4956 * skb_shared_hwtstamps.
4957 *
4958 * TODO: can time stamping be triggered (thus locking
4959 * the registers) without the packet reaching this point
4960 * here? In that case RX time stamping would get stuck.
4961 *
4962 * TODO: in "time stamp all packets" mode this bit is
4963 * not set. Need a global flag for this mode and then
4964 * always read the registers. Cannot be done without
4965 * a race condition.
4966 */
4967 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4968 u64 regval;
4969 u64 ns;
4970 struct skb_shared_hwtstamps *shhwtstamps =
4971 skb_hwtstamps(skb);
4972
4973 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4974 "igb: no RX time stamp available for time stamped packet");
4975 regval = rd32(E1000_RXSTMPL);
4976 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4977 ns = timecounter_cyc2time(&adapter->clock, regval);
4978 timecompare_update(&adapter->compare, ns);
4979 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4980 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4981 shhwtstamps->syststamp =
4982 timecompare_transform(&adapter->compare, ns);
4983 }
4984
Auke Kok9d5c8242008-01-24 02:22:38 -08004985 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4986 dev_kfree_skb_irq(skb);
4987 goto next_desc;
4988 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004989
4990 total_bytes += skb->len;
4991 total_packets++;
4992
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004993 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004994
4995 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00004996 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08004997
Alexander Duyck047e0032009-10-27 15:49:27 +00004998 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
4999 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5000
5001 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005002
Auke Kok9d5c8242008-01-24 02:22:38 -08005003next_desc:
5004 rx_desc->wb.upper.status_error = 0;
5005
5006 /* return some buffers to hardware, one at a time is too slow */
5007 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005008 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005009 cleaned_count = 0;
5010 }
5011
5012 /* use prefetched values */
5013 rx_desc = next_rxd;
5014 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005015 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5016 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005017
Auke Kok9d5c8242008-01-24 02:22:38 -08005018 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005019 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005020
5021 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005022 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005023
5024 rx_ring->total_packets += total_packets;
5025 rx_ring->total_bytes += total_bytes;
5026 rx_ring->rx_stats.packets += total_packets;
5027 rx_ring->rx_stats.bytes += total_bytes;
Ajit Khaparde8d24e932009-10-07 02:42:56 +00005028 netdev->stats.rx_bytes += total_bytes;
5029 netdev->stats.rx_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005030 return cleaned;
5031}
5032
Auke Kok9d5c8242008-01-24 02:22:38 -08005033/**
5034 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5035 * @adapter: address of board private structure
5036 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07005037static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005038 int cleaned_count)
5039{
Alexander Duycke694e962009-10-27 15:53:06 +00005040 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005041 union e1000_adv_rx_desc *rx_desc;
5042 struct igb_buffer *buffer_info;
5043 struct sk_buff *skb;
5044 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005045 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005046
5047 i = rx_ring->next_to_use;
5048 buffer_info = &rx_ring->buffer_info[i];
5049
Alexander Duyck4c844852009-10-27 15:52:07 +00005050 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005051
Auke Kok9d5c8242008-01-24 02:22:38 -08005052 while (cleaned_count--) {
5053 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5054
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005055 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005056 if (!buffer_info->page) {
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005057 buffer_info->page = alloc_page(GFP_ATOMIC);
5058 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005059 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005060 goto no_buffers;
5061 }
5062 buffer_info->page_offset = 0;
5063 } else {
5064 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005065 }
5066 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005067 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005068 buffer_info->page_offset,
5069 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005070 PCI_DMA_FROMDEVICE);
5071 }
5072
5073 if (!buffer_info->skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005074 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005075 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005076 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005077 goto no_buffers;
5078 }
5079
Auke Kok9d5c8242008-01-24 02:22:38 -08005080 buffer_info->skb = skb;
Alexander Duyck80785292009-10-27 15:51:47 +00005081 buffer_info->dma = pci_map_single(rx_ring->pdev,
5082 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005083 bufsz,
5084 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005085 }
5086 /* Refresh the desc even if buffer_addrs didn't change because
5087 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005088 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005089 rx_desc->read.pkt_addr =
5090 cpu_to_le64(buffer_info->page_dma);
5091 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5092 } else {
5093 rx_desc->read.pkt_addr =
5094 cpu_to_le64(buffer_info->dma);
5095 rx_desc->read.hdr_addr = 0;
5096 }
5097
5098 i++;
5099 if (i == rx_ring->count)
5100 i = 0;
5101 buffer_info = &rx_ring->buffer_info[i];
5102 }
5103
5104no_buffers:
5105 if (rx_ring->next_to_use != i) {
5106 rx_ring->next_to_use = i;
5107 if (i == 0)
5108 i = (rx_ring->count - 1);
5109 else
5110 i--;
5111
5112 /* Force memory writes to complete before letting h/w
5113 * know there are new descriptors to fetch. (Only
5114 * applicable for weak-ordered memory model archs,
5115 * such as IA-64). */
5116 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005117 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005118 }
5119}
5120
5121/**
5122 * igb_mii_ioctl -
5123 * @netdev:
5124 * @ifreq:
5125 * @cmd:
5126 **/
5127static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5128{
5129 struct igb_adapter *adapter = netdev_priv(netdev);
5130 struct mii_ioctl_data *data = if_mii(ifr);
5131
5132 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5133 return -EOPNOTSUPP;
5134
5135 switch (cmd) {
5136 case SIOCGMIIPHY:
5137 data->phy_id = adapter->hw.phy.addr;
5138 break;
5139 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005140 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5141 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005142 return -EIO;
5143 break;
5144 case SIOCSMIIREG:
5145 default:
5146 return -EOPNOTSUPP;
5147 }
5148 return 0;
5149}
5150
5151/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005152 * igb_hwtstamp_ioctl - control hardware time stamping
5153 * @netdev:
5154 * @ifreq:
5155 * @cmd:
5156 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005157 * Outgoing time stamping can be enabled and disabled. Play nice and
5158 * disable it when requested, although it shouldn't case any overhead
5159 * when no packet needs it. At most one packet in the queue may be
5160 * marked for time stamping, otherwise it would be impossible to tell
5161 * for sure to which packet the hardware time stamp belongs.
5162 *
5163 * Incoming time stamping has to be configured via the hardware
5164 * filters. Not all combinations are supported, in particular event
5165 * type has to be specified. Matching the kind of event packet is
5166 * not supported, with the exception of "all V2 events regardless of
5167 * level 2 or 4".
5168 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005169 **/
5170static int igb_hwtstamp_ioctl(struct net_device *netdev,
5171 struct ifreq *ifr, int cmd)
5172{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005173 struct igb_adapter *adapter = netdev_priv(netdev);
5174 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005175 struct hwtstamp_config config;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005176 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5177 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
5178 u32 tsync_rx_ctl_type = 0;
5179 u32 tsync_rx_cfg = 0;
5180 int is_l4 = 0;
5181 int is_l2 = 0;
5182 short port = 319; /* PTP */
5183 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005184
5185 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5186 return -EFAULT;
5187
5188 /* reserved for future extensions */
5189 if (config.flags)
5190 return -EINVAL;
5191
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005192 switch (config.tx_type) {
5193 case HWTSTAMP_TX_OFF:
5194 tsync_tx_ctl_bit = 0;
5195 break;
5196 case HWTSTAMP_TX_ON:
5197 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5198 break;
5199 default:
5200 return -ERANGE;
5201 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005202
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005203 switch (config.rx_filter) {
5204 case HWTSTAMP_FILTER_NONE:
5205 tsync_rx_ctl_bit = 0;
5206 break;
5207 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5208 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5209 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5210 case HWTSTAMP_FILTER_ALL:
5211 /*
5212 * register TSYNCRXCFG must be set, therefore it is not
5213 * possible to time stamp both Sync and Delay_Req messages
5214 * => fall back to time stamping all packets
5215 */
5216 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
5217 config.rx_filter = HWTSTAMP_FILTER_ALL;
5218 break;
5219 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5220 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5221 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5222 is_l4 = 1;
5223 break;
5224 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5225 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5226 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5227 is_l4 = 1;
5228 break;
5229 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5230 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5231 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5232 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5233 is_l2 = 1;
5234 is_l4 = 1;
5235 config.rx_filter = HWTSTAMP_FILTER_SOME;
5236 break;
5237 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5238 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5239 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5240 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5241 is_l2 = 1;
5242 is_l4 = 1;
5243 config.rx_filter = HWTSTAMP_FILTER_SOME;
5244 break;
5245 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5246 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5247 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5248 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5249 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5250 is_l2 = 1;
5251 break;
5252 default:
5253 return -ERANGE;
5254 }
5255
5256 /* enable/disable TX */
5257 regval = rd32(E1000_TSYNCTXCTL);
5258 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
5259 wr32(E1000_TSYNCTXCTL, regval);
5260
5261 /* enable/disable RX, define which PTP packets are time stamped */
5262 regval = rd32(E1000_TSYNCRXCTL);
5263 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
5264 regval = (regval & ~0xE) | tsync_rx_ctl_type;
5265 wr32(E1000_TSYNCRXCTL, regval);
5266 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5267
5268 /*
5269 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
5270 * (Ethertype to filter on)
5271 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5272 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5273 */
5274 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5275
5276 /* L4 Queue Filter[0]: only filter by source and destination port */
5277 wr32(E1000_SPQF0, htons(port));
5278 wr32(E1000_IMIREXT(0), is_l4 ?
5279 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5280 wr32(E1000_IMIR(0), is_l4 ?
5281 (htons(port)
5282 | (0<<16) /* immediate interrupt disabled */
5283 | 0 /* (1<<17) bit cleared: do not bypass
5284 destination port check */)
5285 : 0);
5286 wr32(E1000_FTQF0, is_l4 ?
5287 (0x11 /* UDP */
5288 | (1<<15) /* VF not compared */
5289 | (1<<27) /* Enable Timestamping */
5290 | (7<<28) /* only source port filter enabled,
5291 source/target address and protocol
5292 masked */)
5293 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5294 enabled */));
5295
5296 wrfl();
5297
5298 adapter->hwtstamp_config = config;
5299
5300 /* clear TX/RX time stamp registers, just to be sure */
5301 regval = rd32(E1000_TXSTMPH);
5302 regval = rd32(E1000_RXSTMPH);
5303
5304 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5305 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005306}
5307
5308/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005309 * igb_ioctl -
5310 * @netdev:
5311 * @ifreq:
5312 * @cmd:
5313 **/
5314static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5315{
5316 switch (cmd) {
5317 case SIOCGMIIPHY:
5318 case SIOCGMIIREG:
5319 case SIOCSMIIREG:
5320 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005321 case SIOCSHWTSTAMP:
5322 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005323 default:
5324 return -EOPNOTSUPP;
5325 }
5326}
5327
Alexander Duyck009bc062009-07-23 18:08:35 +00005328s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5329{
5330 struct igb_adapter *adapter = hw->back;
5331 u16 cap_offset;
5332
5333 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5334 if (!cap_offset)
5335 return -E1000_ERR_CONFIG;
5336
5337 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5338
5339 return 0;
5340}
5341
5342s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5343{
5344 struct igb_adapter *adapter = hw->back;
5345 u16 cap_offset;
5346
5347 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5348 if (!cap_offset)
5349 return -E1000_ERR_CONFIG;
5350
5351 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5352
5353 return 0;
5354}
5355
Auke Kok9d5c8242008-01-24 02:22:38 -08005356static void igb_vlan_rx_register(struct net_device *netdev,
5357 struct vlan_group *grp)
5358{
5359 struct igb_adapter *adapter = netdev_priv(netdev);
5360 struct e1000_hw *hw = &adapter->hw;
5361 u32 ctrl, rctl;
5362
5363 igb_irq_disable(adapter);
5364 adapter->vlgrp = grp;
5365
5366 if (grp) {
5367 /* enable VLAN tag insert/strip */
5368 ctrl = rd32(E1000_CTRL);
5369 ctrl |= E1000_CTRL_VME;
5370 wr32(E1000_CTRL, ctrl);
5371
5372 /* enable VLAN receive filtering */
5373 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005374 rctl &= ~E1000_RCTL_CFIEN;
5375 wr32(E1000_RCTL, rctl);
5376 igb_update_mng_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005377 } else {
5378 /* disable VLAN tag insert/strip */
5379 ctrl = rd32(E1000_CTRL);
5380 ctrl &= ~E1000_CTRL_VME;
5381 wr32(E1000_CTRL, ctrl);
5382
Auke Kok9d5c8242008-01-24 02:22:38 -08005383 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5384 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5385 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5386 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005387 }
5388
Alexander Duycke1739522009-02-19 20:39:44 -08005389 igb_rlpml_set(adapter);
5390
Auke Kok9d5c8242008-01-24 02:22:38 -08005391 if (!test_bit(__IGB_DOWN, &adapter->state))
5392 igb_irq_enable(adapter);
5393}
5394
5395static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5396{
5397 struct igb_adapter *adapter = netdev_priv(netdev);
5398 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005399 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005400
Alexander Duyck28b07592009-02-06 23:20:31 +00005401 if ((hw->mng_cookie.status &
Auke Kok9d5c8242008-01-24 02:22:38 -08005402 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5403 (vid == adapter->mng_vlan_id))
5404 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005405
5406 /* add vid to vlvf if sr-iov is enabled,
5407 * if that fails add directly to filter table */
5408 if (igb_vlvf_set(adapter, vid, true, pf_id))
5409 igb_vfta_set(hw, vid, true);
5410
Auke Kok9d5c8242008-01-24 02:22:38 -08005411}
5412
5413static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5414{
5415 struct igb_adapter *adapter = netdev_priv(netdev);
5416 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005417 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005418
5419 igb_irq_disable(adapter);
5420 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5421
5422 if (!test_bit(__IGB_DOWN, &adapter->state))
5423 igb_irq_enable(adapter);
5424
5425 if ((adapter->hw.mng_cookie.status &
5426 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5427 (vid == adapter->mng_vlan_id)) {
5428 /* release control to f/w */
5429 igb_release_hw_control(adapter);
5430 return;
5431 }
5432
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005433 /* remove vid from vlvf if sr-iov is enabled,
5434 * if not in vlvf remove from vfta */
5435 if (igb_vlvf_set(adapter, vid, false, pf_id))
5436 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005437}
5438
5439static void igb_restore_vlan(struct igb_adapter *adapter)
5440{
5441 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5442
5443 if (adapter->vlgrp) {
5444 u16 vid;
5445 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5446 if (!vlan_group_get_device(adapter->vlgrp, vid))
5447 continue;
5448 igb_vlan_rx_add_vid(adapter->netdev, vid);
5449 }
5450 }
5451}
5452
5453int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5454{
5455 struct e1000_mac_info *mac = &adapter->hw.mac;
5456
5457 mac->autoneg = 0;
5458
Auke Kok9d5c8242008-01-24 02:22:38 -08005459 switch (spddplx) {
5460 case SPEED_10 + DUPLEX_HALF:
5461 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5462 break;
5463 case SPEED_10 + DUPLEX_FULL:
5464 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5465 break;
5466 case SPEED_100 + DUPLEX_HALF:
5467 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5468 break;
5469 case SPEED_100 + DUPLEX_FULL:
5470 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5471 break;
5472 case SPEED_1000 + DUPLEX_FULL:
5473 mac->autoneg = 1;
5474 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5475 break;
5476 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5477 default:
5478 dev_err(&adapter->pdev->dev,
5479 "Unsupported Speed/Duplex configuration\n");
5480 return -EINVAL;
5481 }
5482 return 0;
5483}
5484
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005485static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005486{
5487 struct net_device *netdev = pci_get_drvdata(pdev);
5488 struct igb_adapter *adapter = netdev_priv(netdev);
5489 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005490 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005491 u32 wufc = adapter->wol;
5492#ifdef CONFIG_PM
5493 int retval = 0;
5494#endif
5495
5496 netif_device_detach(netdev);
5497
Alexander Duycka88f10e2008-07-08 15:13:38 -07005498 if (netif_running(netdev))
5499 igb_close(netdev);
5500
Alexander Duyck047e0032009-10-27 15:49:27 +00005501 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005502
5503#ifdef CONFIG_PM
5504 retval = pci_save_state(pdev);
5505 if (retval)
5506 return retval;
5507#endif
5508
5509 status = rd32(E1000_STATUS);
5510 if (status & E1000_STATUS_LU)
5511 wufc &= ~E1000_WUFC_LNKC;
5512
5513 if (wufc) {
5514 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005515 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005516
5517 /* turn on all-multi mode if wake on multicast is enabled */
5518 if (wufc & E1000_WUFC_MC) {
5519 rctl = rd32(E1000_RCTL);
5520 rctl |= E1000_RCTL_MPE;
5521 wr32(E1000_RCTL, rctl);
5522 }
5523
5524 ctrl = rd32(E1000_CTRL);
5525 /* advertise wake from D3Cold */
5526 #define E1000_CTRL_ADVD3WUC 0x00100000
5527 /* phy power management enable */
5528 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5529 ctrl |= E1000_CTRL_ADVD3WUC;
5530 wr32(E1000_CTRL, ctrl);
5531
Auke Kok9d5c8242008-01-24 02:22:38 -08005532 /* Allow time for pending master requests to run */
5533 igb_disable_pcie_master(&adapter->hw);
5534
5535 wr32(E1000_WUC, E1000_WUC_PME_EN);
5536 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005537 } else {
5538 wr32(E1000_WUC, 0);
5539 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005540 }
5541
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005542 *enable_wake = wufc || adapter->en_mng_pt;
5543 if (!*enable_wake)
Alexander Duyck2fb02a22009-09-14 08:22:54 +00005544 igb_shutdown_serdes_link_82575(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005545
5546 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5547 * would have already happened in close and is redundant. */
5548 igb_release_hw_control(adapter);
5549
5550 pci_disable_device(pdev);
5551
Auke Kok9d5c8242008-01-24 02:22:38 -08005552 return 0;
5553}
5554
5555#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005556static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5557{
5558 int retval;
5559 bool wake;
5560
5561 retval = __igb_shutdown(pdev, &wake);
5562 if (retval)
5563 return retval;
5564
5565 if (wake) {
5566 pci_prepare_to_sleep(pdev);
5567 } else {
5568 pci_wake_from_d3(pdev, false);
5569 pci_set_power_state(pdev, PCI_D3hot);
5570 }
5571
5572 return 0;
5573}
5574
Auke Kok9d5c8242008-01-24 02:22:38 -08005575static int igb_resume(struct pci_dev *pdev)
5576{
5577 struct net_device *netdev = pci_get_drvdata(pdev);
5578 struct igb_adapter *adapter = netdev_priv(netdev);
5579 struct e1000_hw *hw = &adapter->hw;
5580 u32 err;
5581
5582 pci_set_power_state(pdev, PCI_D0);
5583 pci_restore_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005584
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005585 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005586 if (err) {
5587 dev_err(&pdev->dev,
5588 "igb: Cannot enable PCI device from suspend\n");
5589 return err;
5590 }
5591 pci_set_master(pdev);
5592
5593 pci_enable_wake(pdev, PCI_D3hot, 0);
5594 pci_enable_wake(pdev, PCI_D3cold, 0);
5595
Alexander Duyck047e0032009-10-27 15:49:27 +00005596 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005597 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5598 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005599 }
5600
5601 /* e1000_power_up_phy(adapter); */
5602
5603 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005604
5605 /* let the f/w know that the h/w is now under the control of the
5606 * driver. */
5607 igb_get_hw_control(adapter);
5608
Auke Kok9d5c8242008-01-24 02:22:38 -08005609 wr32(E1000_WUS, ~0);
5610
Alexander Duycka88f10e2008-07-08 15:13:38 -07005611 if (netif_running(netdev)) {
5612 err = igb_open(netdev);
5613 if (err)
5614 return err;
5615 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005616
5617 netif_device_attach(netdev);
5618
Auke Kok9d5c8242008-01-24 02:22:38 -08005619 return 0;
5620}
5621#endif
5622
5623static void igb_shutdown(struct pci_dev *pdev)
5624{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005625 bool wake;
5626
5627 __igb_shutdown(pdev, &wake);
5628
5629 if (system_state == SYSTEM_POWER_OFF) {
5630 pci_wake_from_d3(pdev, wake);
5631 pci_set_power_state(pdev, PCI_D3hot);
5632 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005633}
5634
5635#ifdef CONFIG_NET_POLL_CONTROLLER
5636/*
5637 * Polling 'interrupt' - used by things like netconsole to send skbs
5638 * without having to re-enable interrupts. It's not called while
5639 * the interrupt routine is executing.
5640 */
5641static void igb_netpoll(struct net_device *netdev)
5642{
5643 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005644 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005645 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005646
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005647 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005648 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005649 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005650 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005651 return;
5652 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005653
Alexander Duyck047e0032009-10-27 15:49:27 +00005654 for (i = 0; i < adapter->num_q_vectors; i++) {
5655 struct igb_q_vector *q_vector = adapter->q_vector[i];
5656 wr32(E1000_EIMC, q_vector->eims_value);
5657 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005658 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005659}
5660#endif /* CONFIG_NET_POLL_CONTROLLER */
5661
5662/**
5663 * igb_io_error_detected - called when PCI error is detected
5664 * @pdev: Pointer to PCI device
5665 * @state: The current pci connection state
5666 *
5667 * This function is called after a PCI bus error affecting
5668 * this device has been detected.
5669 */
5670static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5671 pci_channel_state_t state)
5672{
5673 struct net_device *netdev = pci_get_drvdata(pdev);
5674 struct igb_adapter *adapter = netdev_priv(netdev);
5675
5676 netif_device_detach(netdev);
5677
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005678 if (state == pci_channel_io_perm_failure)
5679 return PCI_ERS_RESULT_DISCONNECT;
5680
Auke Kok9d5c8242008-01-24 02:22:38 -08005681 if (netif_running(netdev))
5682 igb_down(adapter);
5683 pci_disable_device(pdev);
5684
5685 /* Request a slot slot reset. */
5686 return PCI_ERS_RESULT_NEED_RESET;
5687}
5688
5689/**
5690 * igb_io_slot_reset - called after the pci bus has been reset.
5691 * @pdev: Pointer to PCI device
5692 *
5693 * Restart the card from scratch, as if from a cold-boot. Implementation
5694 * resembles the first-half of the igb_resume routine.
5695 */
5696static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5697{
5698 struct net_device *netdev = pci_get_drvdata(pdev);
5699 struct igb_adapter *adapter = netdev_priv(netdev);
5700 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005701 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005702 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005703
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005704 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005705 dev_err(&pdev->dev,
5706 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005707 result = PCI_ERS_RESULT_DISCONNECT;
5708 } else {
5709 pci_set_master(pdev);
5710 pci_restore_state(pdev);
5711
5712 pci_enable_wake(pdev, PCI_D3hot, 0);
5713 pci_enable_wake(pdev, PCI_D3cold, 0);
5714
5715 igb_reset(adapter);
5716 wr32(E1000_WUS, ~0);
5717 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08005718 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005719
Jeff Kirsherea943d42008-12-11 20:34:19 -08005720 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5721 if (err) {
5722 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5723 "failed 0x%0x\n", err);
5724 /* non-fatal, continue */
5725 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005726
Alexander Duyck40a914f2008-11-27 00:24:37 -08005727 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08005728}
5729
5730/**
5731 * igb_io_resume - called when traffic can start flowing again.
5732 * @pdev: Pointer to PCI device
5733 *
5734 * This callback is called when the error recovery driver tells us that
5735 * its OK to resume normal operation. Implementation resembles the
5736 * second-half of the igb_resume routine.
5737 */
5738static void igb_io_resume(struct pci_dev *pdev)
5739{
5740 struct net_device *netdev = pci_get_drvdata(pdev);
5741 struct igb_adapter *adapter = netdev_priv(netdev);
5742
Auke Kok9d5c8242008-01-24 02:22:38 -08005743 if (netif_running(netdev)) {
5744 if (igb_up(adapter)) {
5745 dev_err(&pdev->dev, "igb_up failed after reset\n");
5746 return;
5747 }
5748 }
5749
5750 netif_device_attach(netdev);
5751
5752 /* let the f/w know that the h/w is now under the control of the
5753 * driver. */
5754 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005755}
5756
Alexander Duyck26ad9172009-10-05 06:32:49 +00005757static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5758 u8 qsel)
5759{
5760 u32 rar_low, rar_high;
5761 struct e1000_hw *hw = &adapter->hw;
5762
5763 /* HW expects these in little endian so we reverse the byte order
5764 * from network order (big endian) to little endian
5765 */
5766 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5767 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5768 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5769
5770 /* Indicate to hardware the Address is Valid. */
5771 rar_high |= E1000_RAH_AV;
5772
5773 if (hw->mac.type == e1000_82575)
5774 rar_high |= E1000_RAH_POOL_1 * qsel;
5775 else
5776 rar_high |= E1000_RAH_POOL_1 << qsel;
5777
5778 wr32(E1000_RAL(index), rar_low);
5779 wrfl();
5780 wr32(E1000_RAH(index), rar_high);
5781 wrfl();
5782}
5783
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005784static int igb_set_vf_mac(struct igb_adapter *adapter,
5785 int vf, unsigned char *mac_addr)
5786{
5787 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005788 /* VF MAC addresses start at end of receive addresses and moves
5789 * torwards the first, as a result a collision should not be possible */
5790 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005791
Alexander Duyck37680112009-02-19 20:40:30 -08005792 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005793
Alexander Duyck26ad9172009-10-05 06:32:49 +00005794 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005795
5796 return 0;
5797}
5798
5799static void igb_vmm_control(struct igb_adapter *adapter)
5800{
5801 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005802
Alexander Duyckd4960302009-10-27 15:53:45 +00005803 /* replication is not supported for 82575 */
5804 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005805 return;
5806
Alexander Duyckd4960302009-10-27 15:53:45 +00005807 if (adapter->vfs_allocated_count) {
5808 igb_vmdq_set_loopback_pf(hw, true);
5809 igb_vmdq_set_replication_pf(hw, true);
5810 } else {
5811 igb_vmdq_set_loopback_pf(hw, false);
5812 igb_vmdq_set_replication_pf(hw, false);
5813 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005814}
5815
Auke Kok9d5c8242008-01-24 02:22:38 -08005816/* igb_main.c */