blob: e72760c2448b3aed17c679b1e0c1632a39e8f70c [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck55cac242009-11-19 12:42:21 +000052#define DRV_VERSION "2.1.0-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000063static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyck55cac242009-11-19 12:42:21 +000064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
80 /* required last entry */
81 {0, }
82};
83
84MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
85
86void igb_reset(struct igb_adapter *);
87static int igb_setup_all_tx_resources(struct igb_adapter *);
88static int igb_setup_all_rx_resources(struct igb_adapter *);
89static void igb_free_all_tx_resources(struct igb_adapter *);
90static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000091static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080092void igb_update_stats(struct igb_adapter *);
93static int igb_probe(struct pci_dev *, const struct pci_device_id *);
94static void __devexit igb_remove(struct pci_dev *pdev);
95static int igb_sw_init(struct igb_adapter *);
96static int igb_open(struct net_device *);
97static int igb_close(struct net_device *);
98static void igb_configure_tx(struct igb_adapter *);
99static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800100static void igb_clean_all_tx_rings(struct igb_adapter *);
101static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700102static void igb_clean_tx_ring(struct igb_ring *);
103static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000104static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800105static void igb_update_phy_info(unsigned long);
106static void igb_watchdog(unsigned long);
107static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000108static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800109static struct net_device_stats *igb_get_stats(struct net_device *);
110static int igb_change_mtu(struct net_device *, int);
111static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000112static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800113static irqreturn_t igb_intr(int irq, void *);
114static irqreturn_t igb_intr_msi(int irq, void *);
115static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000116static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700117#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000118static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700119static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700120#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000121static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700122static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000123static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800124static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
125static void igb_tx_timeout(struct net_device *);
126static void igb_reset_task(struct work_struct *);
127static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
128static void igb_vlan_rx_add_vid(struct net_device *, u16);
129static void igb_vlan_rx_kill_vid(struct net_device *, u16);
130static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000131static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800132static void igb_ping_all_vfs(struct igb_adapter *);
133static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800134static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000135static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000137static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
138static int igb_ndo_set_vf_vlan(struct net_device *netdev,
139 int vf, u16 vlan, u8 qos);
140static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
141static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
142 struct ifla_vf_info *ivi);
Auke Kok9d5c8242008-01-24 02:22:38 -0800143
Auke Kok9d5c8242008-01-24 02:22:38 -0800144#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000145static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800146static int igb_resume(struct pci_dev *);
147#endif
148static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700149#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700150static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
151static struct notifier_block dca_notifier = {
152 .notifier_call = igb_notify_dca,
153 .next = NULL,
154 .priority = 0
155};
156#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800157#ifdef CONFIG_NET_POLL_CONTROLLER
158/* for netdump / net console */
159static void igb_netpoll(struct net_device *);
160#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800161#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000162static unsigned int max_vfs = 0;
163module_param(max_vfs, uint, 0);
164MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
165 "per physical function");
166#endif /* CONFIG_PCI_IOV */
167
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
169 pci_channel_state_t);
170static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
171static void igb_io_resume(struct pci_dev *);
172
173static struct pci_error_handlers igb_err_handler = {
174 .error_detected = igb_io_error_detected,
175 .slot_reset = igb_io_slot_reset,
176 .resume = igb_io_resume,
177};
178
179
180static struct pci_driver igb_driver = {
181 .name = igb_driver_name,
182 .id_table = igb_pci_tbl,
183 .probe = igb_probe,
184 .remove = __devexit_p(igb_remove),
185#ifdef CONFIG_PM
186 /* Power Managment Hooks */
187 .suspend = igb_suspend,
188 .resume = igb_resume,
189#endif
190 .shutdown = igb_shutdown,
191 .err_handler = &igb_err_handler
192};
193
194MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
195MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
196MODULE_LICENSE("GPL");
197MODULE_VERSION(DRV_VERSION);
198
Patrick Ohly38c845c2009-02-12 05:03:41 +0000199/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000200 * igb_read_clock - read raw cycle counter (to be used by time counter)
201 */
202static cycle_t igb_read_clock(const struct cyclecounter *tc)
203{
204 struct igb_adapter *adapter =
205 container_of(tc, struct igb_adapter, cycles);
206 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000207 u64 stamp = 0;
208 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000209
Alexander Duyck55cac242009-11-19 12:42:21 +0000210 /*
211 * The timestamp latches on lowest register read. For the 82580
212 * the lowest register is SYSTIMR instead of SYSTIML. However we never
213 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
214 */
215 if (hw->mac.type == e1000_82580) {
216 stamp = rd32(E1000_SYSTIMR) >> 8;
217 shift = IGB_82580_TSYNC_SHIFT;
218 }
219
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000220 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
221 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000222 return stamp;
223}
224
Auke Kok9d5c8242008-01-24 02:22:38 -0800225#ifdef DEBUG
226/**
227 * igb_get_hw_dev_name - return device name string
228 * used by hardware layer to print debugging information
229 **/
230char *igb_get_hw_dev_name(struct e1000_hw *hw)
231{
232 struct igb_adapter *adapter = hw->back;
233 return adapter->netdev->name;
234}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000235
236/**
237 * igb_get_time_str - format current NIC and system time as string
238 */
239static char *igb_get_time_str(struct igb_adapter *adapter,
240 char buffer[160])
241{
242 cycle_t hw = adapter->cycles.read(&adapter->cycles);
243 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
244 struct timespec sys;
245 struct timespec delta;
246 getnstimeofday(&sys);
247
248 delta = timespec_sub(nic, sys);
249
250 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000251 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
252 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000253 (long)nic.tv_sec, nic.tv_nsec,
254 (long)sys.tv_sec, sys.tv_nsec,
255 (long)delta.tv_sec, delta.tv_nsec);
256
257 return buffer;
258}
Auke Kok9d5c8242008-01-24 02:22:38 -0800259#endif
260
261/**
262 * igb_init_module - Driver Registration Routine
263 *
264 * igb_init_module is the first routine called when the driver is
265 * loaded. All it does is register with the PCI subsystem.
266 **/
267static int __init igb_init_module(void)
268{
269 int ret;
270 printk(KERN_INFO "%s - version %s\n",
271 igb_driver_string, igb_driver_version);
272
273 printk(KERN_INFO "%s\n", igb_copyright);
274
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700275#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700276 dca_register_notify(&dca_notifier);
277#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800278 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800279 return ret;
280}
281
282module_init(igb_init_module);
283
284/**
285 * igb_exit_module - Driver Exit Cleanup Routine
286 *
287 * igb_exit_module is called just before the driver is removed
288 * from memory.
289 **/
290static void __exit igb_exit_module(void)
291{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700292#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700293 dca_unregister_notify(&dca_notifier);
294#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800295 pci_unregister_driver(&igb_driver);
296}
297
298module_exit(igb_exit_module);
299
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800300#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
301/**
302 * igb_cache_ring_register - Descriptor ring to register mapping
303 * @adapter: board private structure to initialize
304 *
305 * Once we know the feature-set enabled for the device, we'll cache
306 * the register offset the descriptor ring is assigned to.
307 **/
308static void igb_cache_ring_register(struct igb_adapter *adapter)
309{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000310 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000311 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800312
313 switch (adapter->hw.mac.type) {
314 case e1000_82576:
315 /* The queues are allocated for virtualization such that VF 0
316 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
317 * In order to avoid collision we start at the first free queue
318 * and continue consuming queues in the same sequence
319 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000320 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000321 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000322 adapter->rx_ring[i]->reg_idx = rbase_offset +
323 Q_IDX_82576(i);
Alexander Duycka99955f2009-11-12 18:37:19 +0000324 for (; j < adapter->rss_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000325 adapter->tx_ring[j]->reg_idx = rbase_offset +
326 Q_IDX_82576(j);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000327 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800328 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000329 case e1000_82580:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800330 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000331 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000332 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000333 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000334 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800335 break;
336 }
337}
338
Alexander Duyck047e0032009-10-27 15:49:27 +0000339static void igb_free_queues(struct igb_adapter *adapter)
340{
Alexander Duyck3025a442010-02-17 01:02:39 +0000341 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000342
Alexander Duyck3025a442010-02-17 01:02:39 +0000343 for (i = 0; i < adapter->num_tx_queues; i++) {
344 kfree(adapter->tx_ring[i]);
345 adapter->tx_ring[i] = NULL;
346 }
347 for (i = 0; i < adapter->num_rx_queues; i++) {
348 kfree(adapter->rx_ring[i]);
349 adapter->rx_ring[i] = NULL;
350 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000351 adapter->num_rx_queues = 0;
352 adapter->num_tx_queues = 0;
353}
354
Auke Kok9d5c8242008-01-24 02:22:38 -0800355/**
356 * igb_alloc_queues - Allocate memory for all rings
357 * @adapter: board private structure to initialize
358 *
359 * We allocate one ring per queue at run-time since we don't know the
360 * number of queues at compile-time.
361 **/
362static int igb_alloc_queues(struct igb_adapter *adapter)
363{
Alexander Duyck3025a442010-02-17 01:02:39 +0000364 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800365 int i;
366
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700367 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000368 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
369 if (!ring)
370 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800371 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700372 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000373 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000374 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000375 /* For 82575, context index must be unique per ring. */
376 if (adapter->hw.mac.type == e1000_82575)
377 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000378 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700379 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000380
Auke Kok9d5c8242008-01-24 02:22:38 -0800381 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000382 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
383 if (!ring)
384 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800385 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700386 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000387 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000388 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000389 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000390 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
391 /* set flag indicating ring supports SCTP checksum offload */
392 if (adapter->hw.mac.type >= e1000_82576)
393 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000394 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800395 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800396
397 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000398
Auke Kok9d5c8242008-01-24 02:22:38 -0800399 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800400
Alexander Duyck047e0032009-10-27 15:49:27 +0000401err:
402 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700403
Alexander Duyck047e0032009-10-27 15:49:27 +0000404 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700405}
406
Auke Kok9d5c8242008-01-24 02:22:38 -0800407#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000408static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800409{
410 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000411 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800412 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700413 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000414 int rx_queue = IGB_N0_QUEUE;
415 int tx_queue = IGB_N0_QUEUE;
416
417 if (q_vector->rx_ring)
418 rx_queue = q_vector->rx_ring->reg_idx;
419 if (q_vector->tx_ring)
420 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700421
422 switch (hw->mac.type) {
423 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800424 /* The 82575 assigns vectors using a bitmask, which matches the
425 bitmask for the EICR/EIMS/EIMC registers. To assign one
426 or more queues to a vector, we write the appropriate bits
427 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000428 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800429 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000430 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800431 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000432 if (!adapter->msix_entries && msix_vector == 0)
433 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800434 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000435 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700436 break;
437 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800438 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700439 Each queue has a single entry in the table to which we write
440 a vector number along with a "valid" bit. Sadly, the layout
441 of the table is somewhat counterintuitive. */
442 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000443 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700444 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000445 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800446 /* vector goes into low byte of register */
447 ivar = ivar & 0xFFFFFF00;
448 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000449 } else {
450 /* vector goes into third byte of register */
451 ivar = ivar & 0xFF00FFFF;
452 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700453 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700454 array_wr32(E1000_IVAR0, index, ivar);
455 }
456 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000457 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700458 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000459 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800460 /* vector goes into second byte of register */
461 ivar = ivar & 0xFFFF00FF;
462 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000463 } else {
464 /* vector goes into high byte of register */
465 ivar = ivar & 0x00FFFFFF;
466 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700467 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700468 array_wr32(E1000_IVAR0, index, ivar);
469 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000470 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700471 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000472 case e1000_82580:
473 /* 82580 uses the same table-based approach as 82576 but has fewer
474 entries as a result we carry over for queues greater than 4. */
475 if (rx_queue > IGB_N0_QUEUE) {
476 index = (rx_queue >> 1);
477 ivar = array_rd32(E1000_IVAR0, index);
478 if (rx_queue & 0x1) {
479 /* vector goes into third byte of register */
480 ivar = ivar & 0xFF00FFFF;
481 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
482 } else {
483 /* vector goes into low byte of register */
484 ivar = ivar & 0xFFFFFF00;
485 ivar |= msix_vector | E1000_IVAR_VALID;
486 }
487 array_wr32(E1000_IVAR0, index, ivar);
488 }
489 if (tx_queue > IGB_N0_QUEUE) {
490 index = (tx_queue >> 1);
491 ivar = array_rd32(E1000_IVAR0, index);
492 if (tx_queue & 0x1) {
493 /* vector goes into high byte of register */
494 ivar = ivar & 0x00FFFFFF;
495 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
496 } else {
497 /* vector goes into second byte of register */
498 ivar = ivar & 0xFFFF00FF;
499 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
500 }
501 array_wr32(E1000_IVAR0, index, ivar);
502 }
503 q_vector->eims_value = 1 << msix_vector;
504 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700505 default:
506 BUG();
507 break;
508 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000509
510 /* add q_vector eims value to global eims_enable_mask */
511 adapter->eims_enable_mask |= q_vector->eims_value;
512
513 /* configure q_vector to set itr on first interrupt */
514 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800515}
516
517/**
518 * igb_configure_msix - Configure MSI-X hardware
519 *
520 * igb_configure_msix sets up the hardware to properly
521 * generate MSI-X interrupts.
522 **/
523static void igb_configure_msix(struct igb_adapter *adapter)
524{
525 u32 tmp;
526 int i, vector = 0;
527 struct e1000_hw *hw = &adapter->hw;
528
529 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800530
531 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700532 switch (hw->mac.type) {
533 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800534 tmp = rd32(E1000_CTRL_EXT);
535 /* enable MSI-X PBA support*/
536 tmp |= E1000_CTRL_EXT_PBA_CLR;
537
538 /* Auto-Mask interrupts upon ICR read. */
539 tmp |= E1000_CTRL_EXT_EIAME;
540 tmp |= E1000_CTRL_EXT_IRCA;
541
542 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000543
544 /* enable msix_other interrupt */
545 array_wr32(E1000_MSIXBM(0), vector++,
546 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700547 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800548
Alexander Duyck2d064c02008-07-08 15:10:12 -0700549 break;
550
551 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000552 case e1000_82580:
Alexander Duyck047e0032009-10-27 15:49:27 +0000553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
557 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700558
Alexander Duyck047e0032009-10-27 15:49:27 +0000559 /* enable msix_other interrupt */
560 adapter->eims_other = 1 << vector;
561 tmp = (vector++ | E1000_IVAR_VALID) << 8;
562
563 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700564 break;
565 default:
566 /* do nothing, since nothing else supports MSI-X */
567 break;
568 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000569
570 adapter->eims_enable_mask |= adapter->eims_other;
571
Alexander Duyck26b39272010-02-17 01:00:41 +0000572 for (i = 0; i < adapter->num_q_vectors; i++)
573 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000574
Auke Kok9d5c8242008-01-24 02:22:38 -0800575 wrfl();
576}
577
578/**
579 * igb_request_msix - Initialize MSI-X interrupts
580 *
581 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
582 * kernel.
583 **/
584static int igb_request_msix(struct igb_adapter *adapter)
585{
586 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000587 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800588 int i, err = 0, vector = 0;
589
Auke Kok9d5c8242008-01-24 02:22:38 -0800590 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800591 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800592 if (err)
593 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000594 vector++;
595
596 for (i = 0; i < adapter->num_q_vectors; i++) {
597 struct igb_q_vector *q_vector = adapter->q_vector[i];
598
599 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
600
601 if (q_vector->rx_ring && q_vector->tx_ring)
602 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
603 q_vector->rx_ring->queue_index);
604 else if (q_vector->tx_ring)
605 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
606 q_vector->tx_ring->queue_index);
607 else if (q_vector->rx_ring)
608 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
609 q_vector->rx_ring->queue_index);
610 else
611 sprintf(q_vector->name, "%s-unused", netdev->name);
612
613 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800614 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000615 q_vector);
616 if (err)
617 goto out;
618 vector++;
619 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800620
Auke Kok9d5c8242008-01-24 02:22:38 -0800621 igb_configure_msix(adapter);
622 return 0;
623out:
624 return err;
625}
626
627static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
628{
629 if (adapter->msix_entries) {
630 pci_disable_msix(adapter->pdev);
631 kfree(adapter->msix_entries);
632 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000633 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800634 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000635 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800636}
637
Alexander Duyck047e0032009-10-27 15:49:27 +0000638/**
639 * igb_free_q_vectors - Free memory allocated for interrupt vectors
640 * @adapter: board private structure to initialize
641 *
642 * This function frees the memory allocated to the q_vectors. In addition if
643 * NAPI is enabled it will delete any references to the NAPI struct prior
644 * to freeing the q_vector.
645 **/
646static void igb_free_q_vectors(struct igb_adapter *adapter)
647{
648 int v_idx;
649
650 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
651 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
652 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000653 if (!q_vector)
654 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000655 netif_napi_del(&q_vector->napi);
656 kfree(q_vector);
657 }
658 adapter->num_q_vectors = 0;
659}
660
661/**
662 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
663 *
664 * This function resets the device so that it has 0 rx queues, tx queues, and
665 * MSI-X interrupts allocated.
666 */
667static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
668{
669 igb_free_queues(adapter);
670 igb_free_q_vectors(adapter);
671 igb_reset_interrupt_capability(adapter);
672}
Auke Kok9d5c8242008-01-24 02:22:38 -0800673
674/**
675 * igb_set_interrupt_capability - set MSI or MSI-X if supported
676 *
677 * Attempt to configure interrupts using the best available
678 * capabilities of the hardware and kernel.
679 **/
680static void igb_set_interrupt_capability(struct igb_adapter *adapter)
681{
682 int err;
683 int numvecs, i;
684
Alexander Duyck83b71802009-02-06 23:15:45 +0000685 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +0000686 adapter->num_rx_queues = adapter->rss_queues;
687 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +0000688
Alexander Duyck047e0032009-10-27 15:49:27 +0000689 /* start with one vector for every rx queue */
690 numvecs = adapter->num_rx_queues;
691
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800692 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +0000693 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
694 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +0000695
696 /* store the number of vectors reserved for queues */
697 adapter->num_q_vectors = numvecs;
698
699 /* add 1 vector for link status interrupts */
700 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800701 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
702 GFP_KERNEL);
703 if (!adapter->msix_entries)
704 goto msi_only;
705
706 for (i = 0; i < numvecs; i++)
707 adapter->msix_entries[i].entry = i;
708
709 err = pci_enable_msix(adapter->pdev,
710 adapter->msix_entries,
711 numvecs);
712 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700713 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800714
715 igb_reset_interrupt_capability(adapter);
716
717 /* If we can't do MSI-X, try MSI */
718msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000719#ifdef CONFIG_PCI_IOV
720 /* disable SR-IOV for non MSI-X configurations */
721 if (adapter->vf_data) {
722 struct e1000_hw *hw = &adapter->hw;
723 /* disable iov and allow time for transactions to clear */
724 pci_disable_sriov(adapter->pdev);
725 msleep(500);
726
727 kfree(adapter->vf_data);
728 adapter->vf_data = NULL;
729 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
730 msleep(100);
731 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
732 }
733#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000734 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +0000735 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000736 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800737 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700738 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000739 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800740 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700741 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700742out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700743 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700744 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800745 return;
746}
747
748/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000749 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
750 * @adapter: board private structure to initialize
751 *
752 * We allocate one q_vector per queue interrupt. If allocation fails we
753 * return -ENOMEM.
754 **/
755static int igb_alloc_q_vectors(struct igb_adapter *adapter)
756{
757 struct igb_q_vector *q_vector;
758 struct e1000_hw *hw = &adapter->hw;
759 int v_idx;
760
761 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
762 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
763 if (!q_vector)
764 goto err_out;
765 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +0000766 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
767 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000768 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
769 adapter->q_vector[v_idx] = q_vector;
770 }
771 return 0;
772
773err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000774 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000775 return -ENOMEM;
776}
777
778static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
779 int ring_idx, int v_idx)
780{
Alexander Duyck3025a442010-02-17 01:02:39 +0000781 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000782
Alexander Duyck3025a442010-02-17 01:02:39 +0000783 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000784 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000785 q_vector->itr_val = adapter->rx_itr_setting;
786 if (q_vector->itr_val && q_vector->itr_val <= 3)
787 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000788}
789
790static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
791 int ring_idx, int v_idx)
792{
Alexander Duyck3025a442010-02-17 01:02:39 +0000793 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000794
Alexander Duyck3025a442010-02-17 01:02:39 +0000795 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000796 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000797 q_vector->itr_val = adapter->tx_itr_setting;
798 if (q_vector->itr_val && q_vector->itr_val <= 3)
799 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000800}
801
802/**
803 * igb_map_ring_to_vector - maps allocated queues to vectors
804 *
805 * This function maps the recently allocated queues to vectors.
806 **/
807static int igb_map_ring_to_vector(struct igb_adapter *adapter)
808{
809 int i;
810 int v_idx = 0;
811
812 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
813 (adapter->num_q_vectors < adapter->num_tx_queues))
814 return -ENOMEM;
815
816 if (adapter->num_q_vectors >=
817 (adapter->num_rx_queues + adapter->num_tx_queues)) {
818 for (i = 0; i < adapter->num_rx_queues; i++)
819 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
820 for (i = 0; i < adapter->num_tx_queues; i++)
821 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
822 } else {
823 for (i = 0; i < adapter->num_rx_queues; i++) {
824 if (i < adapter->num_tx_queues)
825 igb_map_tx_ring_to_vector(adapter, i, v_idx);
826 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
827 }
828 for (; i < adapter->num_tx_queues; i++)
829 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
830 }
831 return 0;
832}
833
834/**
835 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
836 *
837 * This function initializes the interrupts and allocates all of the queues.
838 **/
839static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
840{
841 struct pci_dev *pdev = adapter->pdev;
842 int err;
843
844 igb_set_interrupt_capability(adapter);
845
846 err = igb_alloc_q_vectors(adapter);
847 if (err) {
848 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
849 goto err_alloc_q_vectors;
850 }
851
852 err = igb_alloc_queues(adapter);
853 if (err) {
854 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
855 goto err_alloc_queues;
856 }
857
858 err = igb_map_ring_to_vector(adapter);
859 if (err) {
860 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
861 goto err_map_queues;
862 }
863
864
865 return 0;
866err_map_queues:
867 igb_free_queues(adapter);
868err_alloc_queues:
869 igb_free_q_vectors(adapter);
870err_alloc_q_vectors:
871 igb_reset_interrupt_capability(adapter);
872 return err;
873}
874
875/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800876 * igb_request_irq - initialize interrupts
877 *
878 * Attempts to configure interrupts using the best available
879 * capabilities of the hardware and kernel.
880 **/
881static int igb_request_irq(struct igb_adapter *adapter)
882{
883 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000884 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800885 int err = 0;
886
887 if (adapter->msix_entries) {
888 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700889 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800890 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800891 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000892 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800893 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700894 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800895 igb_free_all_tx_resources(adapter);
896 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000897 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800898 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000899 adapter->num_q_vectors = 1;
900 err = igb_alloc_q_vectors(adapter);
901 if (err) {
902 dev_err(&pdev->dev,
903 "Unable to allocate memory for vectors\n");
904 goto request_done;
905 }
906 err = igb_alloc_queues(adapter);
907 if (err) {
908 dev_err(&pdev->dev,
909 "Unable to allocate memory for queues\n");
910 igb_free_q_vectors(adapter);
911 goto request_done;
912 }
913 igb_setup_all_tx_resources(adapter);
914 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700915 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000916 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800917 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700918
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700919 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -0800920 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000921 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800922 if (!err)
923 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000924
Auke Kok9d5c8242008-01-24 02:22:38 -0800925 /* fall back to legacy interrupts */
926 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700927 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 }
929
Joe Perchesa0607fd2009-11-18 23:29:17 -0800930 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000931 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800932
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800933 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800934 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
935 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800936
937request_done:
938 return err;
939}
940
941static void igb_free_irq(struct igb_adapter *adapter)
942{
Auke Kok9d5c8242008-01-24 02:22:38 -0800943 if (adapter->msix_entries) {
944 int vector = 0, i;
945
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800947
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 for (i = 0; i < adapter->num_q_vectors; i++) {
949 struct igb_q_vector *q_vector = adapter->q_vector[i];
950 free_irq(adapter->msix_entries[vector++].vector,
951 q_vector);
952 }
953 } else {
954 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800955 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800956}
957
958/**
959 * igb_irq_disable - Mask off interrupt generation on the NIC
960 * @adapter: board private structure
961 **/
962static void igb_irq_disable(struct igb_adapter *adapter)
963{
964 struct e1000_hw *hw = &adapter->hw;
965
Alexander Duyck25568a52009-10-27 23:49:59 +0000966 /*
967 * we need to be careful when disabling interrupts. The VFs are also
968 * mapped into these registers and so clearing the bits can cause
969 * issues on the VF drivers so we only need to clear what we set
970 */
Auke Kok9d5c8242008-01-24 02:22:38 -0800971 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000972 u32 regval = rd32(E1000_EIAM);
973 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
974 wr32(E1000_EIMC, adapter->eims_enable_mask);
975 regval = rd32(E1000_EIAC);
976 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800977 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700978
979 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800980 wr32(E1000_IMC, ~0);
981 wrfl();
982 synchronize_irq(adapter->pdev->irq);
983}
984
985/**
986 * igb_irq_enable - Enable default interrupt generation settings
987 * @adapter: board private structure
988 **/
989static void igb_irq_enable(struct igb_adapter *adapter)
990{
991 struct e1000_hw *hw = &adapter->hw;
992
993 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +0000994 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000995 u32 regval = rd32(E1000_EIAC);
996 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
997 regval = rd32(E1000_EIAM);
998 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700999 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001000 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001001 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001002 ims |= E1000_IMS_VMMB;
1003 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001004 if (adapter->hw.mac.type == e1000_82580)
1005 ims |= E1000_IMS_DRSTA;
1006
Alexander Duyck25568a52009-10-27 23:49:59 +00001007 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001008 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001009 wr32(E1000_IMS, IMS_ENABLE_MASK |
1010 E1000_IMS_DRSTA);
1011 wr32(E1000_IAM, IMS_ENABLE_MASK |
1012 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001013 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001014}
1015
1016static void igb_update_mng_vlan(struct igb_adapter *adapter)
1017{
Alexander Duyck51466232009-10-27 23:47:35 +00001018 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001019 u16 vid = adapter->hw.mng_cookie.vlan_id;
1020 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001021
Alexander Duyck51466232009-10-27 23:47:35 +00001022 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1023 /* add VID to filter table */
1024 igb_vfta_set(hw, vid, true);
1025 adapter->mng_vlan_id = vid;
1026 } else {
1027 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1028 }
1029
1030 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1031 (vid != old_vid) &&
1032 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1033 /* remove VID from filter table */
1034 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001035 }
1036}
1037
1038/**
1039 * igb_release_hw_control - release control of the h/w to f/w
1040 * @adapter: address of board private structure
1041 *
1042 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1043 * For ASF and Pass Through versions of f/w this means that the
1044 * driver is no longer loaded.
1045 *
1046 **/
1047static void igb_release_hw_control(struct igb_adapter *adapter)
1048{
1049 struct e1000_hw *hw = &adapter->hw;
1050 u32 ctrl_ext;
1051
1052 /* Let firmware take over control of h/w */
1053 ctrl_ext = rd32(E1000_CTRL_EXT);
1054 wr32(E1000_CTRL_EXT,
1055 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1056}
1057
Auke Kok9d5c8242008-01-24 02:22:38 -08001058/**
1059 * igb_get_hw_control - get control of the h/w from f/w
1060 * @adapter: address of board private structure
1061 *
1062 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1063 * For ASF and Pass Through versions of f/w this means that
1064 * the driver is loaded.
1065 *
1066 **/
1067static void igb_get_hw_control(struct igb_adapter *adapter)
1068{
1069 struct e1000_hw *hw = &adapter->hw;
1070 u32 ctrl_ext;
1071
1072 /* Let firmware know the driver has taken over */
1073 ctrl_ext = rd32(E1000_CTRL_EXT);
1074 wr32(E1000_CTRL_EXT,
1075 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1076}
1077
Auke Kok9d5c8242008-01-24 02:22:38 -08001078/**
1079 * igb_configure - configure the hardware for RX and TX
1080 * @adapter: private board structure
1081 **/
1082static void igb_configure(struct igb_adapter *adapter)
1083{
1084 struct net_device *netdev = adapter->netdev;
1085 int i;
1086
1087 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001088 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001089
1090 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001091
Alexander Duyck85b430b2009-10-27 15:50:29 +00001092 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001093 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001094 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001095
1096 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001097 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001098
1099 igb_rx_fifo_flush_82575(&adapter->hw);
1100
Alexander Duyckc493ea42009-03-20 00:16:50 +00001101 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001102 * at least 1 descriptor unused to make sure
1103 * next_to_use != next_to_clean */
1104 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001105 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001106 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001107 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001108}
1109
Nick Nunley88a268c2010-02-17 01:01:59 +00001110/**
1111 * igb_power_up_link - Power up the phy/serdes link
1112 * @adapter: address of board private structure
1113 **/
1114void igb_power_up_link(struct igb_adapter *adapter)
1115{
1116 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1117 igb_power_up_phy_copper(&adapter->hw);
1118 else
1119 igb_power_up_serdes_link_82575(&adapter->hw);
1120}
1121
1122/**
1123 * igb_power_down_link - Power down the phy/serdes link
1124 * @adapter: address of board private structure
1125 */
1126static void igb_power_down_link(struct igb_adapter *adapter)
1127{
1128 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1129 igb_power_down_phy_copper_82575(&adapter->hw);
1130 else
1131 igb_shutdown_serdes_link_82575(&adapter->hw);
1132}
Auke Kok9d5c8242008-01-24 02:22:38 -08001133
1134/**
1135 * igb_up - Open the interface and prepare it to handle traffic
1136 * @adapter: board private structure
1137 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001138int igb_up(struct igb_adapter *adapter)
1139{
1140 struct e1000_hw *hw = &adapter->hw;
1141 int i;
1142
1143 /* hardware has been reset, we need to reload some things */
1144 igb_configure(adapter);
1145
1146 clear_bit(__IGB_DOWN, &adapter->state);
1147
Alexander Duyck047e0032009-10-27 15:49:27 +00001148 for (i = 0; i < adapter->num_q_vectors; i++) {
1149 struct igb_q_vector *q_vector = adapter->q_vector[i];
1150 napi_enable(&q_vector->napi);
1151 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001152 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001153 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001154 else
1155 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001156
1157 /* Clear any pending interrupts. */
1158 rd32(E1000_ICR);
1159 igb_irq_enable(adapter);
1160
Alexander Duyckd4960302009-10-27 15:53:45 +00001161 /* notify VFs that reset has been completed */
1162 if (adapter->vfs_allocated_count) {
1163 u32 reg_data = rd32(E1000_CTRL_EXT);
1164 reg_data |= E1000_CTRL_EXT_PFRSTD;
1165 wr32(E1000_CTRL_EXT, reg_data);
1166 }
1167
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001168 netif_tx_start_all_queues(adapter->netdev);
1169
Alexander Duyck25568a52009-10-27 23:49:59 +00001170 /* start the watchdog. */
1171 hw->mac.get_link_status = 1;
1172 schedule_work(&adapter->watchdog_task);
1173
Auke Kok9d5c8242008-01-24 02:22:38 -08001174 return 0;
1175}
1176
1177void igb_down(struct igb_adapter *adapter)
1178{
Auke Kok9d5c8242008-01-24 02:22:38 -08001179 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001180 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001181 u32 tctl, rctl;
1182 int i;
1183
1184 /* signal that we're down so the interrupt handler does not
1185 * reschedule our watchdog timer */
1186 set_bit(__IGB_DOWN, &adapter->state);
1187
1188 /* disable receives in the hardware */
1189 rctl = rd32(E1000_RCTL);
1190 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1191 /* flush and sleep below */
1192
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001193 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001194
1195 /* disable transmits in the hardware */
1196 tctl = rd32(E1000_TCTL);
1197 tctl &= ~E1000_TCTL_EN;
1198 wr32(E1000_TCTL, tctl);
1199 /* flush both disables and wait for them to finish */
1200 wrfl();
1201 msleep(10);
1202
Alexander Duyck047e0032009-10-27 15:49:27 +00001203 for (i = 0; i < adapter->num_q_vectors; i++) {
1204 struct igb_q_vector *q_vector = adapter->q_vector[i];
1205 napi_disable(&q_vector->napi);
1206 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001207
Auke Kok9d5c8242008-01-24 02:22:38 -08001208 igb_irq_disable(adapter);
1209
1210 del_timer_sync(&adapter->watchdog_timer);
1211 del_timer_sync(&adapter->phy_info_timer);
1212
Auke Kok9d5c8242008-01-24 02:22:38 -08001213 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001214
1215 /* record the stats before reset*/
1216 igb_update_stats(adapter);
1217
Auke Kok9d5c8242008-01-24 02:22:38 -08001218 adapter->link_speed = 0;
1219 adapter->link_duplex = 0;
1220
Jeff Kirsher30236822008-06-24 17:01:15 -07001221 if (!pci_channel_offline(adapter->pdev))
1222 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001223 igb_clean_all_tx_rings(adapter);
1224 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001225#ifdef CONFIG_IGB_DCA
1226
1227 /* since we reset the hardware DCA settings were cleared */
1228 igb_setup_dca(adapter);
1229#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001230}
1231
1232void igb_reinit_locked(struct igb_adapter *adapter)
1233{
1234 WARN_ON(in_interrupt());
1235 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1236 msleep(1);
1237 igb_down(adapter);
1238 igb_up(adapter);
1239 clear_bit(__IGB_RESETTING, &adapter->state);
1240}
1241
1242void igb_reset(struct igb_adapter *adapter)
1243{
Alexander Duyck090b1792009-10-27 23:51:55 +00001244 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001245 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001246 struct e1000_mac_info *mac = &hw->mac;
1247 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001248 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1249 u16 hwm;
1250
1251 /* Repartition Pba for greater than 9k mtu
1252 * To take effect CTRL.RST is required.
1253 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001254 switch (mac->type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001255 case e1000_82580:
1256 pba = rd32(E1000_RXPBS);
1257 pba = igb_rxpbs_adjust_82580(pba);
1258 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001259 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001260 pba = rd32(E1000_RXPBS);
1261 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001262 break;
1263 case e1000_82575:
1264 default:
1265 pba = E1000_PBA_34K;
1266 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001267 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001268
Alexander Duyck2d064c02008-07-08 15:10:12 -07001269 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1270 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 /* adjust PBA for jumbo frames */
1272 wr32(E1000_PBA, pba);
1273
1274 /* To maintain wire speed transmits, the Tx FIFO should be
1275 * large enough to accommodate two full transmit packets,
1276 * rounded up to the next 1KB and expressed in KB. Likewise,
1277 * the Rx FIFO should be large enough to accommodate at least
1278 * one full receive packet and is similarly rounded up and
1279 * expressed in KB. */
1280 pba = rd32(E1000_PBA);
1281 /* upper 16 bits has Tx packet buffer allocation size in KB */
1282 tx_space = pba >> 16;
1283 /* lower 16 bits has Rx packet buffer allocation size in KB */
1284 pba &= 0xffff;
1285 /* the tx fifo also stores 16 bytes of information about the tx
1286 * but don't include ethernet FCS because hardware appends it */
1287 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001288 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001289 ETH_FCS_LEN) * 2;
1290 min_tx_space = ALIGN(min_tx_space, 1024);
1291 min_tx_space >>= 10;
1292 /* software strips receive CRC, so leave room for it */
1293 min_rx_space = adapter->max_frame_size;
1294 min_rx_space = ALIGN(min_rx_space, 1024);
1295 min_rx_space >>= 10;
1296
1297 /* If current Tx allocation is less than the min Tx FIFO size,
1298 * and the min Tx FIFO size is less than the current Rx FIFO
1299 * allocation, take space away from current Rx allocation */
1300 if (tx_space < min_tx_space &&
1301 ((min_tx_space - tx_space) < pba)) {
1302 pba = pba - (min_tx_space - tx_space);
1303
1304 /* if short on rx space, rx wins and must trump tx
1305 * adjustment */
1306 if (pba < min_rx_space)
1307 pba = min_rx_space;
1308 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001309 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001310 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001311
1312 /* flow control settings */
1313 /* The high water mark must be low enough to fit one full frame
1314 * (or the size used for early receive) above it in the Rx FIFO.
1315 * Set it to the lower of:
1316 * - 90% of the Rx FIFO size, or
1317 * - the full Rx FIFO size minus one full frame */
1318 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001319 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001320
Alexander Duyckd405ea32009-12-23 13:21:27 +00001321 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1322 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001323 fc->pause_time = 0xFFFF;
1324 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001325 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001326
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001327 /* disable receive for all VFs and wait one second */
1328 if (adapter->vfs_allocated_count) {
1329 int i;
1330 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001331 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001332
1333 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001334 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001335
1336 /* disable transmits and receives */
1337 wr32(E1000_VFRE, 0);
1338 wr32(E1000_VFTE, 0);
1339 }
1340
Auke Kok9d5c8242008-01-24 02:22:38 -08001341 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001342 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001343 wr32(E1000_WUC, 0);
1344
Alexander Duyck330a6d62009-10-27 23:51:35 +00001345 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001346 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001347
Alexander Duyck55cac242009-11-19 12:42:21 +00001348 if (hw->mac.type == e1000_82580) {
1349 u32 reg = rd32(E1000_PCIEMISC);
1350 wr32(E1000_PCIEMISC,
1351 reg & ~E1000_PCIEMISC_LX_DECISION);
1352 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001353 if (!netif_running(adapter->netdev))
1354 igb_power_down_link(adapter);
1355
Auke Kok9d5c8242008-01-24 02:22:38 -08001356 igb_update_mng_vlan(adapter);
1357
1358 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1359 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1360
Alexander Duyck330a6d62009-10-27 23:51:35 +00001361 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001362}
1363
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001364static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001365 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001366 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001367 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001368 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001369 .ndo_set_rx_mode = igb_set_rx_mode,
1370 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001371 .ndo_set_mac_address = igb_set_mac,
1372 .ndo_change_mtu = igb_change_mtu,
1373 .ndo_do_ioctl = igb_ioctl,
1374 .ndo_tx_timeout = igb_tx_timeout,
1375 .ndo_validate_addr = eth_validate_addr,
1376 .ndo_vlan_rx_register = igb_vlan_rx_register,
1377 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1378 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001379 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1380 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1381 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1382 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001383#ifdef CONFIG_NET_POLL_CONTROLLER
1384 .ndo_poll_controller = igb_netpoll,
1385#endif
1386};
1387
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001388/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001389 * igb_probe - Device Initialization Routine
1390 * @pdev: PCI device information struct
1391 * @ent: entry in igb_pci_tbl
1392 *
1393 * Returns 0 on success, negative on failure
1394 *
1395 * igb_probe initializes an adapter identified by a pci_dev structure.
1396 * The OS initialization, configuring of the adapter private structure,
1397 * and a hardware reset occur.
1398 **/
1399static int __devinit igb_probe(struct pci_dev *pdev,
1400 const struct pci_device_id *ent)
1401{
1402 struct net_device *netdev;
1403 struct igb_adapter *adapter;
1404 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001405 u16 eeprom_data = 0;
1406 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001407 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1408 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001409 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001410 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1411 u32 part_num;
1412
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001413 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001414 if (err)
1415 return err;
1416
1417 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001418 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001419 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001420 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001421 if (!err)
1422 pci_using_dac = 1;
1423 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001424 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001425 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001426 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001427 if (err) {
1428 dev_err(&pdev->dev, "No usable DMA "
1429 "configuration, aborting\n");
1430 goto err_dma;
1431 }
1432 }
1433 }
1434
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001435 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1436 IORESOURCE_MEM),
1437 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001438 if (err)
1439 goto err_pci_reg;
1440
Frans Pop19d5afd2009-10-02 10:04:12 -07001441 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001442
Auke Kok9d5c8242008-01-24 02:22:38 -08001443 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001444 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001445
1446 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001447 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1448 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001449 if (!netdev)
1450 goto err_alloc_etherdev;
1451
1452 SET_NETDEV_DEV(netdev, &pdev->dev);
1453
1454 pci_set_drvdata(pdev, netdev);
1455 adapter = netdev_priv(netdev);
1456 adapter->netdev = netdev;
1457 adapter->pdev = pdev;
1458 hw = &adapter->hw;
1459 hw->back = adapter;
1460 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1461
1462 mmio_start = pci_resource_start(pdev, 0);
1463 mmio_len = pci_resource_len(pdev, 0);
1464
1465 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001466 hw->hw_addr = ioremap(mmio_start, mmio_len);
1467 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001468 goto err_ioremap;
1469
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001470 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001471 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001473
1474 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1475
1476 netdev->mem_start = mmio_start;
1477 netdev->mem_end = mmio_start + mmio_len;
1478
Auke Kok9d5c8242008-01-24 02:22:38 -08001479 /* PCI config space info */
1480 hw->vendor_id = pdev->vendor;
1481 hw->device_id = pdev->device;
1482 hw->revision_id = pdev->revision;
1483 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1484 hw->subsystem_device_id = pdev->subsystem_device;
1485
Auke Kok9d5c8242008-01-24 02:22:38 -08001486 /* Copy the default MAC, PHY and NVM function pointers */
1487 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1488 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1489 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1490 /* Initialize skew-specific constants */
1491 err = ei->get_invariants(hw);
1492 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001493 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001494
Alexander Duyck450c87c2009-02-06 23:22:11 +00001495 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001496 err = igb_sw_init(adapter);
1497 if (err)
1498 goto err_sw_init;
1499
1500 igb_get_bus_info_pcie(hw);
1501
1502 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001503
1504 /* Copper options */
1505 if (hw->phy.media_type == e1000_media_type_copper) {
1506 hw->phy.mdix = AUTO_ALL_MODES;
1507 hw->phy.disable_polarity_correction = false;
1508 hw->phy.ms_type = e1000_ms_hw_default;
1509 }
1510
1511 if (igb_check_reset_block(hw))
1512 dev_info(&pdev->dev,
1513 "PHY reset is blocked due to SOL/IDER session.\n");
1514
1515 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001516 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001517 NETIF_F_HW_VLAN_TX |
1518 NETIF_F_HW_VLAN_RX |
1519 NETIF_F_HW_VLAN_FILTER;
1520
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001521 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001522 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001523 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001524 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001525
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001526 netdev->vlan_features |= NETIF_F_TSO;
1527 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001528 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001529 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001530 netdev->vlan_features |= NETIF_F_SG;
1531
Auke Kok9d5c8242008-01-24 02:22:38 -08001532 if (pci_using_dac)
1533 netdev->features |= NETIF_F_HIGHDMA;
1534
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001535 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001536 netdev->features |= NETIF_F_SCTP_CSUM;
1537
Alexander Duyck330a6d62009-10-27 23:51:35 +00001538 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001539
1540 /* before reading the NVM, reset the controller to put the device in a
1541 * known good starting state */
1542 hw->mac.ops.reset_hw(hw);
1543
1544 /* make sure the NVM is good */
1545 if (igb_validate_nvm_checksum(hw) < 0) {
1546 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1547 err = -EIO;
1548 goto err_eeprom;
1549 }
1550
1551 /* copy the MAC address out of the NVM */
1552 if (hw->mac.ops.read_mac_addr(hw))
1553 dev_err(&pdev->dev, "NVM Read Error\n");
1554
1555 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1556 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1557
1558 if (!is_valid_ether_addr(netdev->perm_addr)) {
1559 dev_err(&pdev->dev, "Invalid MAC Address\n");
1560 err = -EIO;
1561 goto err_eeprom;
1562 }
1563
Alexander Duyck0e340482009-03-20 00:17:08 +00001564 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1565 (unsigned long) adapter);
1566 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1567 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001568
1569 INIT_WORK(&adapter->reset_task, igb_reset_task);
1570 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1571
Alexander Duyck450c87c2009-02-06 23:22:11 +00001572 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001573 adapter->fc_autoneg = true;
1574 hw->mac.autoneg = true;
1575 hw->phy.autoneg_advertised = 0x2f;
1576
Alexander Duyck0cce1192009-07-23 18:10:24 +00001577 hw->fc.requested_mode = e1000_fc_default;
1578 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001579
Auke Kok9d5c8242008-01-24 02:22:38 -08001580 igb_validate_mdi_setting(hw);
1581
Auke Kok9d5c8242008-01-24 02:22:38 -08001582 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1583 * enable the ACPI Magic Packet filter
1584 */
1585
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001586 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001587 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001588 else if (hw->mac.type == e1000_82580)
1589 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1590 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1591 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001592 else if (hw->bus.func == 1)
1593 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001594
1595 if (eeprom_data & eeprom_apme_mask)
1596 adapter->eeprom_wol |= E1000_WUFC_MAG;
1597
1598 /* now that we have the eeprom settings, apply the special cases where
1599 * the eeprom may be wrong or the board simply won't support wake on
1600 * lan on a particular port */
1601 switch (pdev->device) {
1602 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1603 adapter->eeprom_wol = 0;
1604 break;
1605 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001606 case E1000_DEV_ID_82576_FIBER:
1607 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001608 /* Wake events only supported on port A for dual fiber
1609 * regardless of eeprom setting */
1610 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1611 adapter->eeprom_wol = 0;
1612 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001613 case E1000_DEV_ID_82576_QUAD_COPPER:
1614 /* if quad port adapter, disable WoL on all but port A */
1615 if (global_quad_port_a != 0)
1616 adapter->eeprom_wol = 0;
1617 else
1618 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1619 /* Reset for multiple quad port adapters */
1620 if (++global_quad_port_a == 4)
1621 global_quad_port_a = 0;
1622 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001623 }
1624
1625 /* initialize the wol settings based on the eeprom settings */
1626 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001627 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001628
1629 /* reset the hardware with the new settings */
1630 igb_reset(adapter);
1631
1632 /* let the f/w know that the h/w is now under the control of the
1633 * driver. */
1634 igb_get_hw_control(adapter);
1635
Auke Kok9d5c8242008-01-24 02:22:38 -08001636 strcpy(netdev->name, "eth%d");
1637 err = register_netdev(netdev);
1638 if (err)
1639 goto err_register;
1640
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001641 /* carrier off reporting is important to ethtool even BEFORE open */
1642 netif_carrier_off(netdev);
1643
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001644#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001645 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001646 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001647 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001648 igb_setup_dca(adapter);
1649 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001650
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001651#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001652 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1653 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001654 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001655 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001656 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1657 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001658 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1659 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1660 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1661 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001662 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001663
1664 igb_read_part_num(hw, &part_num);
1665 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1666 (part_num >> 8), (part_num & 0xff));
1667
1668 dev_info(&pdev->dev,
1669 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1670 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001671 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001672 adapter->num_rx_queues, adapter->num_tx_queues);
1673
Auke Kok9d5c8242008-01-24 02:22:38 -08001674 return 0;
1675
1676err_register:
1677 igb_release_hw_control(adapter);
1678err_eeprom:
1679 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001680 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001681
1682 if (hw->flash_address)
1683 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001684err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001685 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001686 iounmap(hw->hw_addr);
1687err_ioremap:
1688 free_netdev(netdev);
1689err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00001690 pci_release_selected_regions(pdev,
1691 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001692err_pci_reg:
1693err_dma:
1694 pci_disable_device(pdev);
1695 return err;
1696}
1697
1698/**
1699 * igb_remove - Device Removal Routine
1700 * @pdev: PCI device information struct
1701 *
1702 * igb_remove is called by the PCI subsystem to alert the driver
1703 * that it should release a PCI device. The could be caused by a
1704 * Hot-Plug event, or because the driver is going to be removed from
1705 * memory.
1706 **/
1707static void __devexit igb_remove(struct pci_dev *pdev)
1708{
1709 struct net_device *netdev = pci_get_drvdata(pdev);
1710 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001711 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001712
1713 /* flush_scheduled work may reschedule our watchdog task, so
1714 * explicitly disable watchdog tasks from being rescheduled */
1715 set_bit(__IGB_DOWN, &adapter->state);
1716 del_timer_sync(&adapter->watchdog_timer);
1717 del_timer_sync(&adapter->phy_info_timer);
1718
1719 flush_scheduled_work();
1720
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001721#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001722 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001723 dev_info(&pdev->dev, "DCA disabled\n");
1724 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001725 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001726 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001727 }
1728#endif
1729
Auke Kok9d5c8242008-01-24 02:22:38 -08001730 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1731 * would have already happened in close and is redundant. */
1732 igb_release_hw_control(adapter);
1733
1734 unregister_netdev(netdev);
1735
Alexander Duyck047e0032009-10-27 15:49:27 +00001736 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001737
Alexander Duyck37680112009-02-19 20:40:30 -08001738#ifdef CONFIG_PCI_IOV
1739 /* reclaim resources allocated to VFs */
1740 if (adapter->vf_data) {
1741 /* disable iov and allow time for transactions to clear */
1742 pci_disable_sriov(pdev);
1743 msleep(500);
1744
1745 kfree(adapter->vf_data);
1746 adapter->vf_data = NULL;
1747 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1748 msleep(100);
1749 dev_info(&pdev->dev, "IOV Disabled\n");
1750 }
1751#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00001752
Alexander Duyck28b07592009-02-06 23:20:31 +00001753 iounmap(hw->hw_addr);
1754 if (hw->flash_address)
1755 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00001756 pci_release_selected_regions(pdev,
1757 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001758
1759 free_netdev(netdev);
1760
Frans Pop19d5afd2009-10-02 10:04:12 -07001761 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001762
Auke Kok9d5c8242008-01-24 02:22:38 -08001763 pci_disable_device(pdev);
1764}
1765
1766/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001767 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1768 * @adapter: board private structure to initialize
1769 *
1770 * This function initializes the vf specific data storage and then attempts to
1771 * allocate the VFs. The reason for ordering it this way is because it is much
1772 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1773 * the memory for the VFs.
1774 **/
1775static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1776{
1777#ifdef CONFIG_PCI_IOV
1778 struct pci_dev *pdev = adapter->pdev;
1779
1780 if (adapter->vfs_allocated_count > 7)
1781 adapter->vfs_allocated_count = 7;
1782
1783 if (adapter->vfs_allocated_count) {
1784 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1785 sizeof(struct vf_data_storage),
1786 GFP_KERNEL);
1787 /* if allocation failed then we do not support SR-IOV */
1788 if (!adapter->vf_data) {
1789 adapter->vfs_allocated_count = 0;
1790 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1791 "Data Storage\n");
1792 }
1793 }
1794
1795 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1796 kfree(adapter->vf_data);
1797 adapter->vf_data = NULL;
1798#endif /* CONFIG_PCI_IOV */
1799 adapter->vfs_allocated_count = 0;
1800#ifdef CONFIG_PCI_IOV
1801 } else {
1802 unsigned char mac_addr[ETH_ALEN];
1803 int i;
1804 dev_info(&pdev->dev, "%d vfs allocated\n",
1805 adapter->vfs_allocated_count);
1806 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1807 random_ether_addr(mac_addr);
1808 igb_set_vf_mac(adapter, i, mac_addr);
1809 }
1810 }
1811#endif /* CONFIG_PCI_IOV */
1812}
1813
Alexander Duyck115f4592009-11-12 18:37:00 +00001814
1815/**
1816 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1817 * @adapter: board private structure to initialize
1818 *
1819 * igb_init_hw_timer initializes the function pointer and values for the hw
1820 * timer found in hardware.
1821 **/
1822static void igb_init_hw_timer(struct igb_adapter *adapter)
1823{
1824 struct e1000_hw *hw = &adapter->hw;
1825
1826 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001827 case e1000_82580:
1828 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1829 adapter->cycles.read = igb_read_clock;
1830 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1831 adapter->cycles.mult = 1;
1832 /*
1833 * The 82580 timesync updates the system timer every 8ns by 8ns
1834 * and the value cannot be shifted. Instead we need to shift
1835 * the registers to generate a 64bit timer value. As a result
1836 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1837 * 24 in order to generate a larger value for synchronization.
1838 */
1839 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1840 /* disable system timer temporarily by setting bit 31 */
1841 wr32(E1000_TSAUXC, 0x80000000);
1842 wrfl();
1843
1844 /* Set registers so that rollover occurs soon to test this. */
1845 wr32(E1000_SYSTIMR, 0x00000000);
1846 wr32(E1000_SYSTIML, 0x80000000);
1847 wr32(E1000_SYSTIMH, 0x000000FF);
1848 wrfl();
1849
1850 /* enable system timer by clearing bit 31 */
1851 wr32(E1000_TSAUXC, 0x0);
1852 wrfl();
1853
1854 timecounter_init(&adapter->clock,
1855 &adapter->cycles,
1856 ktime_to_ns(ktime_get_real()));
1857 /*
1858 * Synchronize our NIC clock against system wall clock. NIC
1859 * time stamp reading requires ~3us per sample, each sample
1860 * was pretty stable even under load => only require 10
1861 * samples for each offset comparison.
1862 */
1863 memset(&adapter->compare, 0, sizeof(adapter->compare));
1864 adapter->compare.source = &adapter->clock;
1865 adapter->compare.target = ktime_get_real;
1866 adapter->compare.num_samples = 10;
1867 timecompare_update(&adapter->compare, 0);
1868 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00001869 case e1000_82576:
1870 /*
1871 * Initialize hardware timer: we keep it running just in case
1872 * that some program needs it later on.
1873 */
1874 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1875 adapter->cycles.read = igb_read_clock;
1876 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1877 adapter->cycles.mult = 1;
1878 /**
1879 * Scale the NIC clock cycle by a large factor so that
1880 * relatively small clock corrections can be added or
1881 * substracted at each clock tick. The drawbacks of a large
1882 * factor are a) that the clock register overflows more quickly
1883 * (not such a big deal) and b) that the increment per tick has
1884 * to fit into 24 bits. As a result we need to use a shift of
1885 * 19 so we can fit a value of 16 into the TIMINCA register.
1886 */
1887 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1888 wr32(E1000_TIMINCA,
1889 (1 << E1000_TIMINCA_16NS_SHIFT) |
1890 (16 << IGB_82576_TSYNC_SHIFT));
1891
1892 /* Set registers so that rollover occurs soon to test this. */
1893 wr32(E1000_SYSTIML, 0x00000000);
1894 wr32(E1000_SYSTIMH, 0xFF800000);
1895 wrfl();
1896
1897 timecounter_init(&adapter->clock,
1898 &adapter->cycles,
1899 ktime_to_ns(ktime_get_real()));
1900 /*
1901 * Synchronize our NIC clock against system wall clock. NIC
1902 * time stamp reading requires ~3us per sample, each sample
1903 * was pretty stable even under load => only require 10
1904 * samples for each offset comparison.
1905 */
1906 memset(&adapter->compare, 0, sizeof(adapter->compare));
1907 adapter->compare.source = &adapter->clock;
1908 adapter->compare.target = ktime_get_real;
1909 adapter->compare.num_samples = 10;
1910 timecompare_update(&adapter->compare, 0);
1911 break;
1912 case e1000_82575:
1913 /* 82575 does not support timesync */
1914 default:
1915 break;
1916 }
1917
1918}
1919
Alexander Duycka6b623e2009-10-27 23:47:53 +00001920/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001921 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1922 * @adapter: board private structure to initialize
1923 *
1924 * igb_sw_init initializes the Adapter private data structure.
1925 * Fields are initialized based on PCI device information and
1926 * OS network device settings (MTU size).
1927 **/
1928static int __devinit igb_sw_init(struct igb_adapter *adapter)
1929{
1930 struct e1000_hw *hw = &adapter->hw;
1931 struct net_device *netdev = adapter->netdev;
1932 struct pci_dev *pdev = adapter->pdev;
1933
1934 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1935
Alexander Duyck68fd9912008-11-20 00:48:10 -08001936 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1937 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001938 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1939 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1940
Auke Kok9d5c8242008-01-24 02:22:38 -08001941 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1942 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1943
Alexander Duycka6b623e2009-10-27 23:47:53 +00001944#ifdef CONFIG_PCI_IOV
1945 if (hw->mac.type == e1000_82576)
1946 adapter->vfs_allocated_count = max_vfs;
1947
1948#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00001949 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1950
1951 /*
1952 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1953 * then we should combine the queues into a queue pair in order to
1954 * conserve interrupts due to limited supply
1955 */
1956 if ((adapter->rss_queues > 4) ||
1957 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1958 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1959
Alexander Duycka6b623e2009-10-27 23:47:53 +00001960 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001961 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001962 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1963 return -ENOMEM;
1964 }
1965
Alexander Duyck115f4592009-11-12 18:37:00 +00001966 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00001967 igb_probe_vfs(adapter);
1968
Auke Kok9d5c8242008-01-24 02:22:38 -08001969 /* Explicitly disable IRQ since the NIC can be in any state. */
1970 igb_irq_disable(adapter);
1971
1972 set_bit(__IGB_DOWN, &adapter->state);
1973 return 0;
1974}
1975
1976/**
1977 * igb_open - Called when a network interface is made active
1978 * @netdev: network interface device structure
1979 *
1980 * Returns 0 on success, negative value on failure
1981 *
1982 * The open entry point is called when a network interface is made
1983 * active by the system (IFF_UP). At this point all resources needed
1984 * for transmit and receive operations are allocated, the interrupt
1985 * handler is registered with the OS, the watchdog timer is started,
1986 * and the stack is notified that the interface is ready.
1987 **/
1988static int igb_open(struct net_device *netdev)
1989{
1990 struct igb_adapter *adapter = netdev_priv(netdev);
1991 struct e1000_hw *hw = &adapter->hw;
1992 int err;
1993 int i;
1994
1995 /* disallow open during test */
1996 if (test_bit(__IGB_TESTING, &adapter->state))
1997 return -EBUSY;
1998
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001999 netif_carrier_off(netdev);
2000
Auke Kok9d5c8242008-01-24 02:22:38 -08002001 /* allocate transmit descriptors */
2002 err = igb_setup_all_tx_resources(adapter);
2003 if (err)
2004 goto err_setup_tx;
2005
2006 /* allocate receive descriptors */
2007 err = igb_setup_all_rx_resources(adapter);
2008 if (err)
2009 goto err_setup_rx;
2010
Nick Nunley88a268c2010-02-17 01:01:59 +00002011 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002012
Auke Kok9d5c8242008-01-24 02:22:38 -08002013 /* before we allocate an interrupt, we must be ready to handle it.
2014 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2015 * as soon as we call pci_request_irq, so we have to setup our
2016 * clean_rx handler before we do so. */
2017 igb_configure(adapter);
2018
2019 err = igb_request_irq(adapter);
2020 if (err)
2021 goto err_req_irq;
2022
2023 /* From here on the code is the same as igb_up() */
2024 clear_bit(__IGB_DOWN, &adapter->state);
2025
Alexander Duyck047e0032009-10-27 15:49:27 +00002026 for (i = 0; i < adapter->num_q_vectors; i++) {
2027 struct igb_q_vector *q_vector = adapter->q_vector[i];
2028 napi_enable(&q_vector->napi);
2029 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002030
2031 /* Clear any pending interrupts. */
2032 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002033
2034 igb_irq_enable(adapter);
2035
Alexander Duyckd4960302009-10-27 15:53:45 +00002036 /* notify VFs that reset has been completed */
2037 if (adapter->vfs_allocated_count) {
2038 u32 reg_data = rd32(E1000_CTRL_EXT);
2039 reg_data |= E1000_CTRL_EXT_PFRSTD;
2040 wr32(E1000_CTRL_EXT, reg_data);
2041 }
2042
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002043 netif_tx_start_all_queues(netdev);
2044
Alexander Duyck25568a52009-10-27 23:49:59 +00002045 /* start the watchdog. */
2046 hw->mac.get_link_status = 1;
2047 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002048
2049 return 0;
2050
2051err_req_irq:
2052 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002053 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002054 igb_free_all_rx_resources(adapter);
2055err_setup_rx:
2056 igb_free_all_tx_resources(adapter);
2057err_setup_tx:
2058 igb_reset(adapter);
2059
2060 return err;
2061}
2062
2063/**
2064 * igb_close - Disables a network interface
2065 * @netdev: network interface device structure
2066 *
2067 * Returns 0, this is not allowed to fail
2068 *
2069 * The close entry point is called when an interface is de-activated
2070 * by the OS. The hardware is still under the driver's control, but
2071 * needs to be disabled. A global MAC reset is issued to stop the
2072 * hardware, and all transmit and receive resources are freed.
2073 **/
2074static int igb_close(struct net_device *netdev)
2075{
2076 struct igb_adapter *adapter = netdev_priv(netdev);
2077
2078 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2079 igb_down(adapter);
2080
2081 igb_free_irq(adapter);
2082
2083 igb_free_all_tx_resources(adapter);
2084 igb_free_all_rx_resources(adapter);
2085
Auke Kok9d5c8242008-01-24 02:22:38 -08002086 return 0;
2087}
2088
2089/**
2090 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002091 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2092 *
2093 * Return 0 on success, negative on failure
2094 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002095int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002096{
Alexander Duyck80785292009-10-27 15:51:47 +00002097 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002098 int size;
2099
2100 size = sizeof(struct igb_buffer) * tx_ring->count;
2101 tx_ring->buffer_info = vmalloc(size);
2102 if (!tx_ring->buffer_info)
2103 goto err;
2104 memset(tx_ring->buffer_info, 0, size);
2105
2106 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002107 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002108 tx_ring->size = ALIGN(tx_ring->size, 4096);
2109
Alexander Duyck439705e2009-10-27 23:49:20 +00002110 tx_ring->desc = pci_alloc_consistent(pdev,
2111 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08002112 &tx_ring->dma);
2113
2114 if (!tx_ring->desc)
2115 goto err;
2116
Auke Kok9d5c8242008-01-24 02:22:38 -08002117 tx_ring->next_to_use = 0;
2118 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 return 0;
2120
2121err:
2122 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002123 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002124 "Unable to allocate memory for the transmit descriptor ring\n");
2125 return -ENOMEM;
2126}
2127
2128/**
2129 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2130 * (Descriptors) for all queues
2131 * @adapter: board private structure
2132 *
2133 * Return 0 on success, negative on failure
2134 **/
2135static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2136{
Alexander Duyck439705e2009-10-27 23:49:20 +00002137 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002138 int i, err = 0;
2139
2140 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002141 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002142 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002143 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002144 "Allocation for Tx Queue %u failed\n", i);
2145 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002146 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002147 break;
2148 }
2149 }
2150
Alexander Duycka99955f2009-11-12 18:37:19 +00002151 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002152 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002153 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002154 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002155 return err;
2156}
2157
2158/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002159 * igb_setup_tctl - configure the transmit control registers
2160 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002161 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002162void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002163{
Auke Kok9d5c8242008-01-24 02:22:38 -08002164 struct e1000_hw *hw = &adapter->hw;
2165 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002166
Alexander Duyck85b430b2009-10-27 15:50:29 +00002167 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2168 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002169
2170 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002171 tctl = rd32(E1000_TCTL);
2172 tctl &= ~E1000_TCTL_CT;
2173 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2174 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2175
2176 igb_config_collision_dist(hw);
2177
Auke Kok9d5c8242008-01-24 02:22:38 -08002178 /* Enable transmits */
2179 tctl |= E1000_TCTL_EN;
2180
2181 wr32(E1000_TCTL, tctl);
2182}
2183
2184/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002185 * igb_configure_tx_ring - Configure transmit ring after Reset
2186 * @adapter: board private structure
2187 * @ring: tx ring to configure
2188 *
2189 * Configure a transmit ring after a reset.
2190 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002191void igb_configure_tx_ring(struct igb_adapter *adapter,
2192 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002193{
2194 struct e1000_hw *hw = &adapter->hw;
2195 u32 txdctl;
2196 u64 tdba = ring->dma;
2197 int reg_idx = ring->reg_idx;
2198
2199 /* disable the queue */
2200 txdctl = rd32(E1000_TXDCTL(reg_idx));
2201 wr32(E1000_TXDCTL(reg_idx),
2202 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2203 wrfl();
2204 mdelay(10);
2205
2206 wr32(E1000_TDLEN(reg_idx),
2207 ring->count * sizeof(union e1000_adv_tx_desc));
2208 wr32(E1000_TDBAL(reg_idx),
2209 tdba & 0x00000000ffffffffULL);
2210 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2211
Alexander Duyckfce99e32009-10-27 15:51:27 +00002212 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2213 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2214 writel(0, ring->head);
2215 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002216
2217 txdctl |= IGB_TX_PTHRESH;
2218 txdctl |= IGB_TX_HTHRESH << 8;
2219 txdctl |= IGB_TX_WTHRESH << 16;
2220
2221 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2222 wr32(E1000_TXDCTL(reg_idx), txdctl);
2223}
2224
2225/**
2226 * igb_configure_tx - Configure transmit Unit after Reset
2227 * @adapter: board private structure
2228 *
2229 * Configure the Tx unit of the MAC after a reset.
2230 **/
2231static void igb_configure_tx(struct igb_adapter *adapter)
2232{
2233 int i;
2234
2235 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002236 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002237}
2238
2239/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002240 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002241 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2242 *
2243 * Returns 0 on success, negative on failure
2244 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002245int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002246{
Alexander Duyck80785292009-10-27 15:51:47 +00002247 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002248 int size, desc_len;
2249
2250 size = sizeof(struct igb_buffer) * rx_ring->count;
2251 rx_ring->buffer_info = vmalloc(size);
2252 if (!rx_ring->buffer_info)
2253 goto err;
2254 memset(rx_ring->buffer_info, 0, size);
2255
2256 desc_len = sizeof(union e1000_adv_rx_desc);
2257
2258 /* Round up to nearest 4K */
2259 rx_ring->size = rx_ring->count * desc_len;
2260 rx_ring->size = ALIGN(rx_ring->size, 4096);
2261
2262 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2263 &rx_ring->dma);
2264
2265 if (!rx_ring->desc)
2266 goto err;
2267
2268 rx_ring->next_to_clean = 0;
2269 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002270
Auke Kok9d5c8242008-01-24 02:22:38 -08002271 return 0;
2272
2273err:
2274 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002275 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002276 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002277 "the receive descriptor ring\n");
2278 return -ENOMEM;
2279}
2280
2281/**
2282 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2283 * (Descriptors) for all queues
2284 * @adapter: board private structure
2285 *
2286 * Return 0 on success, negative on failure
2287 **/
2288static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2289{
Alexander Duyck439705e2009-10-27 23:49:20 +00002290 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002291 int i, err = 0;
2292
2293 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002294 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002295 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002296 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002297 "Allocation for Rx Queue %u failed\n", i);
2298 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002299 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002300 break;
2301 }
2302 }
2303
2304 return err;
2305}
2306
2307/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002308 * igb_setup_mrqc - configure the multiple receive queue control registers
2309 * @adapter: Board private structure
2310 **/
2311static void igb_setup_mrqc(struct igb_adapter *adapter)
2312{
2313 struct e1000_hw *hw = &adapter->hw;
2314 u32 mrqc, rxcsum;
2315 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2316 union e1000_reta {
2317 u32 dword;
2318 u8 bytes[4];
2319 } reta;
2320 static const u8 rsshash[40] = {
2321 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2322 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2323 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2324 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2325
2326 /* Fill out hash function seeds */
2327 for (j = 0; j < 10; j++) {
2328 u32 rsskey = rsshash[(j * 4)];
2329 rsskey |= rsshash[(j * 4) + 1] << 8;
2330 rsskey |= rsshash[(j * 4) + 2] << 16;
2331 rsskey |= rsshash[(j * 4) + 3] << 24;
2332 array_wr32(E1000_RSSRK(0), j, rsskey);
2333 }
2334
Alexander Duycka99955f2009-11-12 18:37:19 +00002335 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002336
2337 if (adapter->vfs_allocated_count) {
2338 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2339 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00002340 case e1000_82580:
2341 num_rx_queues = 1;
2342 shift = 0;
2343 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002344 case e1000_82576:
2345 shift = 3;
2346 num_rx_queues = 2;
2347 break;
2348 case e1000_82575:
2349 shift = 2;
2350 shift2 = 6;
2351 default:
2352 break;
2353 }
2354 } else {
2355 if (hw->mac.type == e1000_82575)
2356 shift = 6;
2357 }
2358
2359 for (j = 0; j < (32 * 4); j++) {
2360 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2361 if (shift2)
2362 reta.bytes[j & 3] |= num_rx_queues << shift2;
2363 if ((j & 3) == 3)
2364 wr32(E1000_RETA(j >> 2), reta.dword);
2365 }
2366
2367 /*
2368 * Disable raw packet checksumming so that RSS hash is placed in
2369 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2370 * offloads as they are enabled by default
2371 */
2372 rxcsum = rd32(E1000_RXCSUM);
2373 rxcsum |= E1000_RXCSUM_PCSD;
2374
2375 if (adapter->hw.mac.type >= e1000_82576)
2376 /* Enable Receive Checksum Offload for SCTP */
2377 rxcsum |= E1000_RXCSUM_CRCOFL;
2378
2379 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2380 wr32(E1000_RXCSUM, rxcsum);
2381
2382 /* If VMDq is enabled then we set the appropriate mode for that, else
2383 * we default to RSS so that an RSS hash is calculated per packet even
2384 * if we are only using one queue */
2385 if (adapter->vfs_allocated_count) {
2386 if (hw->mac.type > e1000_82575) {
2387 /* Set the default pool for the PF's first queue */
2388 u32 vtctl = rd32(E1000_VT_CTL);
2389 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2390 E1000_VT_CTL_DISABLE_DEF_POOL);
2391 vtctl |= adapter->vfs_allocated_count <<
2392 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2393 wr32(E1000_VT_CTL, vtctl);
2394 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002395 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002396 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2397 else
2398 mrqc = E1000_MRQC_ENABLE_VMDQ;
2399 } else {
2400 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2401 }
2402 igb_vmm_control(adapter);
2403
2404 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2405 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2406 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2407 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2408 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2409 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2410 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2411 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2412
2413 wr32(E1000_MRQC, mrqc);
2414}
2415
2416/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002417 * igb_setup_rctl - configure the receive control registers
2418 * @adapter: Board private structure
2419 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002420void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002421{
2422 struct e1000_hw *hw = &adapter->hw;
2423 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002424
2425 rctl = rd32(E1000_RCTL);
2426
2427 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002428 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002429
Alexander Duyck69d728b2008-11-25 01:04:03 -08002430 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002431 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002432
Auke Kok87cb7e82008-07-08 15:08:29 -07002433 /*
2434 * enable stripping of CRC. It's unlikely this will break BMC
2435 * redirection as it did with e1000. Newer features require
2436 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002437 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002438 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002439
Alexander Duyck559e9c42009-10-27 23:52:50 +00002440 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002441 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002442
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002443 /* enable LPE to prevent packets larger than max_frame_size */
2444 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002445
Alexander Duyck952f72a2009-10-27 15:51:07 +00002446 /* disable queue 0 to prevent tail write w/o re-config */
2447 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002448
Alexander Duycke1739522009-02-19 20:39:44 -08002449 /* Attention!!! For SR-IOV PF driver operations you must enable
2450 * queue drop for all VF and PF queues to prevent head of line blocking
2451 * if an un-trusted VF does not provide descriptors to hardware.
2452 */
2453 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002454 /* set all queue drop enable bits */
2455 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002456 }
2457
Auke Kok9d5c8242008-01-24 02:22:38 -08002458 wr32(E1000_RCTL, rctl);
2459}
2460
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002461static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2462 int vfn)
2463{
2464 struct e1000_hw *hw = &adapter->hw;
2465 u32 vmolr;
2466
2467 /* if it isn't the PF check to see if VFs are enabled and
2468 * increase the size to support vlan tags */
2469 if (vfn < adapter->vfs_allocated_count &&
2470 adapter->vf_data[vfn].vlans_enabled)
2471 size += VLAN_TAG_SIZE;
2472
2473 vmolr = rd32(E1000_VMOLR(vfn));
2474 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2475 vmolr |= size | E1000_VMOLR_LPE;
2476 wr32(E1000_VMOLR(vfn), vmolr);
2477
2478 return 0;
2479}
2480
Auke Kok9d5c8242008-01-24 02:22:38 -08002481/**
Alexander Duycke1739522009-02-19 20:39:44 -08002482 * igb_rlpml_set - set maximum receive packet size
2483 * @adapter: board private structure
2484 *
2485 * Configure maximum receivable packet size.
2486 **/
2487static void igb_rlpml_set(struct igb_adapter *adapter)
2488{
2489 u32 max_frame_size = adapter->max_frame_size;
2490 struct e1000_hw *hw = &adapter->hw;
2491 u16 pf_id = adapter->vfs_allocated_count;
2492
2493 if (adapter->vlgrp)
2494 max_frame_size += VLAN_TAG_SIZE;
2495
2496 /* if vfs are enabled we set RLPML to the largest possible request
2497 * size and set the VMOLR RLPML to the size we need */
2498 if (pf_id) {
2499 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002500 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002501 }
2502
2503 wr32(E1000_RLPML, max_frame_size);
2504}
2505
Williams, Mitch A8151d292010-02-10 01:44:24 +00002506static inline void igb_set_vmolr(struct igb_adapter *adapter,
2507 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002508{
2509 struct e1000_hw *hw = &adapter->hw;
2510 u32 vmolr;
2511
2512 /*
2513 * This register exists only on 82576 and newer so if we are older then
2514 * we should exit and do nothing
2515 */
2516 if (hw->mac.type < e1000_82576)
2517 return;
2518
2519 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002520 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2521 if (aupe)
2522 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2523 else
2524 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002525
2526 /* clear all bits that might not be set */
2527 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2528
Alexander Duycka99955f2009-11-12 18:37:19 +00002529 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002530 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2531 /*
2532 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2533 * multicast packets
2534 */
2535 if (vfn <= adapter->vfs_allocated_count)
2536 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2537
2538 wr32(E1000_VMOLR(vfn), vmolr);
2539}
2540
Alexander Duycke1739522009-02-19 20:39:44 -08002541/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002542 * igb_configure_rx_ring - Configure a receive ring after Reset
2543 * @adapter: board private structure
2544 * @ring: receive ring to be configured
2545 *
2546 * Configure the Rx unit of the MAC after a reset.
2547 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002548void igb_configure_rx_ring(struct igb_adapter *adapter,
2549 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002550{
2551 struct e1000_hw *hw = &adapter->hw;
2552 u64 rdba = ring->dma;
2553 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002554 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002555
2556 /* disable the queue */
2557 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2558 wr32(E1000_RXDCTL(reg_idx),
2559 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2560
2561 /* Set DMA base address registers */
2562 wr32(E1000_RDBAL(reg_idx),
2563 rdba & 0x00000000ffffffffULL);
2564 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2565 wr32(E1000_RDLEN(reg_idx),
2566 ring->count * sizeof(union e1000_adv_rx_desc));
2567
2568 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002569 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2570 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2571 writel(0, ring->head);
2572 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002573
Alexander Duyck952f72a2009-10-27 15:51:07 +00002574 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002575 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2576 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002577 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2578#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2579 srrctl |= IGB_RXBUFFER_16384 >>
2580 E1000_SRRCTL_BSIZEPKT_SHIFT;
2581#else
2582 srrctl |= (PAGE_SIZE / 2) >>
2583 E1000_SRRCTL_BSIZEPKT_SHIFT;
2584#endif
2585 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2586 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002587 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002588 E1000_SRRCTL_BSIZEPKT_SHIFT;
2589 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2590 }
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00002591 /* Only set Drop Enable if we are supporting multiple queues */
2592 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2593 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002594
2595 wr32(E1000_SRRCTL(reg_idx), srrctl);
2596
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002597 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002598 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002599
Alexander Duyck85b430b2009-10-27 15:50:29 +00002600 /* enable receive descriptor fetching */
2601 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2602 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2603 rxdctl &= 0xFFF00000;
2604 rxdctl |= IGB_RX_PTHRESH;
2605 rxdctl |= IGB_RX_HTHRESH << 8;
2606 rxdctl |= IGB_RX_WTHRESH << 16;
2607 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2608}
2609
2610/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002611 * igb_configure_rx - Configure receive Unit after Reset
2612 * @adapter: board private structure
2613 *
2614 * Configure the Rx unit of the MAC after a reset.
2615 **/
2616static void igb_configure_rx(struct igb_adapter *adapter)
2617{
Hannes Eder91075842009-02-18 19:36:04 -08002618 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002619
Alexander Duyck68d480c2009-10-05 06:33:08 +00002620 /* set UTA to appropriate mode */
2621 igb_set_uta(adapter);
2622
Alexander Duyck26ad9172009-10-05 06:32:49 +00002623 /* set the correct pool for the PF default MAC address in entry 0 */
2624 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2625 adapter->vfs_allocated_count);
2626
Alexander Duyck06cf2662009-10-27 15:53:25 +00002627 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2628 * the Base and Length of the Rx Descriptor Ring */
2629 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002630 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002631}
2632
2633/**
2634 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002635 * @tx_ring: Tx descriptor ring for a specific queue
2636 *
2637 * Free all transmit software resources
2638 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002639void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002640{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002641 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002642
2643 vfree(tx_ring->buffer_info);
2644 tx_ring->buffer_info = NULL;
2645
Alexander Duyck439705e2009-10-27 23:49:20 +00002646 /* if not set, then don't free */
2647 if (!tx_ring->desc)
2648 return;
2649
Alexander Duyck80785292009-10-27 15:51:47 +00002650 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2651 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002652
2653 tx_ring->desc = NULL;
2654}
2655
2656/**
2657 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2658 * @adapter: board private structure
2659 *
2660 * Free all transmit software resources
2661 **/
2662static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2663{
2664 int i;
2665
2666 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002667 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002668}
2669
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002670void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2671 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002672{
Alexander Duyck6366ad32009-12-02 16:47:18 +00002673 if (buffer_info->dma) {
2674 if (buffer_info->mapped_as_page)
2675 pci_unmap_page(tx_ring->pdev,
2676 buffer_info->dma,
2677 buffer_info->length,
2678 PCI_DMA_TODEVICE);
2679 else
2680 pci_unmap_single(tx_ring->pdev,
2681 buffer_info->dma,
2682 buffer_info->length,
2683 PCI_DMA_TODEVICE);
2684 buffer_info->dma = 0;
2685 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002686 if (buffer_info->skb) {
2687 dev_kfree_skb_any(buffer_info->skb);
2688 buffer_info->skb = NULL;
2689 }
2690 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00002691 buffer_info->length = 0;
2692 buffer_info->next_to_watch = 0;
2693 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002694}
2695
2696/**
2697 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002698 * @tx_ring: ring to be cleaned
2699 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002700static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002701{
2702 struct igb_buffer *buffer_info;
2703 unsigned long size;
2704 unsigned int i;
2705
2706 if (!tx_ring->buffer_info)
2707 return;
2708 /* Free all the Tx ring sk_buffs */
2709
2710 for (i = 0; i < tx_ring->count; i++) {
2711 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002712 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002713 }
2714
2715 size = sizeof(struct igb_buffer) * tx_ring->count;
2716 memset(tx_ring->buffer_info, 0, size);
2717
2718 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002719 memset(tx_ring->desc, 0, tx_ring->size);
2720
2721 tx_ring->next_to_use = 0;
2722 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002723}
2724
2725/**
2726 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2727 * @adapter: board private structure
2728 **/
2729static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2730{
2731 int i;
2732
2733 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002734 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002735}
2736
2737/**
2738 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002739 * @rx_ring: ring to clean the resources from
2740 *
2741 * Free all receive software resources
2742 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002743void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002744{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002745 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002746
2747 vfree(rx_ring->buffer_info);
2748 rx_ring->buffer_info = NULL;
2749
Alexander Duyck439705e2009-10-27 23:49:20 +00002750 /* if not set, then don't free */
2751 if (!rx_ring->desc)
2752 return;
2753
Alexander Duyck80785292009-10-27 15:51:47 +00002754 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2755 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002756
2757 rx_ring->desc = NULL;
2758}
2759
2760/**
2761 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2762 * @adapter: board private structure
2763 *
2764 * Free all receive software resources
2765 **/
2766static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2767{
2768 int i;
2769
2770 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002771 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002772}
2773
2774/**
2775 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 * @rx_ring: ring to free buffers from
2777 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002778static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002779{
2780 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002781 unsigned long size;
2782 unsigned int i;
2783
2784 if (!rx_ring->buffer_info)
2785 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002786
Auke Kok9d5c8242008-01-24 02:22:38 -08002787 /* Free all the Rx ring sk_buffs */
2788 for (i = 0; i < rx_ring->count; i++) {
2789 buffer_info = &rx_ring->buffer_info[i];
2790 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002791 pci_unmap_single(rx_ring->pdev,
2792 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002793 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002794 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002795 buffer_info->dma = 0;
2796 }
2797
2798 if (buffer_info->skb) {
2799 dev_kfree_skb(buffer_info->skb);
2800 buffer_info->skb = NULL;
2801 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002802 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002803 pci_unmap_page(rx_ring->pdev,
2804 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002805 PAGE_SIZE / 2,
2806 PCI_DMA_FROMDEVICE);
2807 buffer_info->page_dma = 0;
2808 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002809 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002810 put_page(buffer_info->page);
2811 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002812 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002813 }
2814 }
2815
Auke Kok9d5c8242008-01-24 02:22:38 -08002816 size = sizeof(struct igb_buffer) * rx_ring->count;
2817 memset(rx_ring->buffer_info, 0, size);
2818
2819 /* Zero out the descriptor ring */
2820 memset(rx_ring->desc, 0, rx_ring->size);
2821
2822 rx_ring->next_to_clean = 0;
2823 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002824}
2825
2826/**
2827 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2828 * @adapter: board private structure
2829 **/
2830static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2831{
2832 int i;
2833
2834 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002835 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002836}
2837
2838/**
2839 * igb_set_mac - Change the Ethernet Address of the NIC
2840 * @netdev: network interface device structure
2841 * @p: pointer to an address structure
2842 *
2843 * Returns 0 on success, negative on failure
2844 **/
2845static int igb_set_mac(struct net_device *netdev, void *p)
2846{
2847 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002848 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002849 struct sockaddr *addr = p;
2850
2851 if (!is_valid_ether_addr(addr->sa_data))
2852 return -EADDRNOTAVAIL;
2853
2854 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002855 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002856
Alexander Duyck26ad9172009-10-05 06:32:49 +00002857 /* set the correct pool for the new PF MAC address in entry 0 */
2858 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2859 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002860
Auke Kok9d5c8242008-01-24 02:22:38 -08002861 return 0;
2862}
2863
2864/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002865 * igb_write_mc_addr_list - write multicast addresses to MTA
2866 * @netdev: network interface device structure
2867 *
2868 * Writes multicast address list to the MTA hash table.
2869 * Returns: -ENOMEM on failure
2870 * 0 on no addresses written
2871 * X on writing X addresses to MTA
2872 **/
2873static int igb_write_mc_addr_list(struct net_device *netdev)
2874{
2875 struct igb_adapter *adapter = netdev_priv(netdev);
2876 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko48e2f182010-02-22 09:22:26 +00002877 struct dev_mc_list *mc_ptr;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002878 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002879 int i;
2880
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002881 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002882 /* nothing to program, so clear mc list */
2883 igb_update_mc_addr_list(hw, NULL, 0);
2884 igb_restore_vf_multicasts(adapter);
2885 return 0;
2886 }
2887
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002888 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002889 if (!mta_list)
2890 return -ENOMEM;
2891
Alexander Duyck68d480c2009-10-05 06:33:08 +00002892 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00002893 i = 0;
2894 netdev_for_each_mc_addr(mc_ptr, netdev)
2895 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002896
Alexander Duyck68d480c2009-10-05 06:33:08 +00002897 igb_update_mc_addr_list(hw, mta_list, i);
2898 kfree(mta_list);
2899
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002900 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002901}
2902
2903/**
2904 * igb_write_uc_addr_list - write unicast addresses to RAR table
2905 * @netdev: network interface device structure
2906 *
2907 * Writes unicast address list to the RAR table.
2908 * Returns: -ENOMEM on failure/insufficient address space
2909 * 0 on no addresses written
2910 * X on writing X addresses to the RAR table
2911 **/
2912static int igb_write_uc_addr_list(struct net_device *netdev)
2913{
2914 struct igb_adapter *adapter = netdev_priv(netdev);
2915 struct e1000_hw *hw = &adapter->hw;
2916 unsigned int vfn = adapter->vfs_allocated_count;
2917 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2918 int count = 0;
2919
2920 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002921 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00002922 return -ENOMEM;
2923
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002924 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002925 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002926
2927 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002928 if (!rar_entries)
2929 break;
2930 igb_rar_set_qsel(adapter, ha->addr,
2931 rar_entries--,
2932 vfn);
2933 count++;
2934 }
2935 }
2936 /* write the addresses in reverse order to avoid write combining */
2937 for (; rar_entries > 0 ; rar_entries--) {
2938 wr32(E1000_RAH(rar_entries), 0);
2939 wr32(E1000_RAL(rar_entries), 0);
2940 }
2941 wrfl();
2942
2943 return count;
2944}
2945
2946/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002947 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002948 * @netdev: network interface device structure
2949 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002950 * The set_rx_mode entry point is called whenever the unicast or multicast
2951 * address lists or the network interface flags are updated. This routine is
2952 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002953 * promiscuous mode, and all-multi behavior.
2954 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002955static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002956{
2957 struct igb_adapter *adapter = netdev_priv(netdev);
2958 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002959 unsigned int vfn = adapter->vfs_allocated_count;
2960 u32 rctl, vmolr = 0;
2961 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002962
2963 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002964 rctl = rd32(E1000_RCTL);
2965
Alexander Duyck68d480c2009-10-05 06:33:08 +00002966 /* clear the effected bits */
2967 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2968
Patrick McHardy746b9f02008-07-16 20:15:45 -07002969 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002970 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002971 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002972 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002973 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002974 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002975 vmolr |= E1000_VMOLR_MPME;
2976 } else {
2977 /*
2978 * Write addresses to the MTA, if the attempt fails
2979 * then we should just turn on promiscous mode so
2980 * that we can at least receive multicast traffic
2981 */
2982 count = igb_write_mc_addr_list(netdev);
2983 if (count < 0) {
2984 rctl |= E1000_RCTL_MPE;
2985 vmolr |= E1000_VMOLR_MPME;
2986 } else if (count) {
2987 vmolr |= E1000_VMOLR_ROMPE;
2988 }
2989 }
2990 /*
2991 * Write addresses to available RAR registers, if there is not
2992 * sufficient space to store all the addresses then enable
2993 * unicast promiscous mode
2994 */
2995 count = igb_write_uc_addr_list(netdev);
2996 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002997 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002998 vmolr |= E1000_VMOLR_ROPE;
2999 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003000 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003001 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003002 wr32(E1000_RCTL, rctl);
3003
Alexander Duyck68d480c2009-10-05 06:33:08 +00003004 /*
3005 * In order to support SR-IOV and eventually VMDq it is necessary to set
3006 * the VMOLR to enable the appropriate modes. Without this workaround
3007 * we will have issues with VLAN tag stripping not being done for frames
3008 * that are only arriving because we are the default pool
3009 */
3010 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003011 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003012
Alexander Duyck68d480c2009-10-05 06:33:08 +00003013 vmolr |= rd32(E1000_VMOLR(vfn)) &
3014 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3015 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003016 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003017}
3018
3019/* Need to wait a few seconds after link up to get diagnostic information from
3020 * the phy */
3021static void igb_update_phy_info(unsigned long data)
3022{
3023 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003024 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003025}
3026
3027/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003028 * igb_has_link - check shared code for link and determine up/down
3029 * @adapter: pointer to driver private info
3030 **/
Nick Nunley31455352010-02-17 01:01:21 +00003031bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003032{
3033 struct e1000_hw *hw = &adapter->hw;
3034 bool link_active = false;
3035 s32 ret_val = 0;
3036
3037 /* get_link_status is set on LSC (link status) interrupt or
3038 * rx sequence error interrupt. get_link_status will stay
3039 * false until the e1000_check_for_link establishes link
3040 * for copper adapters ONLY
3041 */
3042 switch (hw->phy.media_type) {
3043 case e1000_media_type_copper:
3044 if (hw->mac.get_link_status) {
3045 ret_val = hw->mac.ops.check_for_link(hw);
3046 link_active = !hw->mac.get_link_status;
3047 } else {
3048 link_active = true;
3049 }
3050 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003051 case e1000_media_type_internal_serdes:
3052 ret_val = hw->mac.ops.check_for_link(hw);
3053 link_active = hw->mac.serdes_has_link;
3054 break;
3055 default:
3056 case e1000_media_type_unknown:
3057 break;
3058 }
3059
3060 return link_active;
3061}
3062
3063/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003064 * igb_watchdog - Timer Call-back
3065 * @data: pointer to adapter cast into an unsigned long
3066 **/
3067static void igb_watchdog(unsigned long data)
3068{
3069 struct igb_adapter *adapter = (struct igb_adapter *)data;
3070 /* Do the rest outside of interrupt context */
3071 schedule_work(&adapter->watchdog_task);
3072}
3073
3074static void igb_watchdog_task(struct work_struct *work)
3075{
3076 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003077 struct igb_adapter,
3078 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003079 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003080 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003081 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003082 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003083
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003084 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003085 if (link) {
3086 if (!netif_carrier_ok(netdev)) {
3087 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003088 hw->mac.ops.get_speed_and_duplex(hw,
3089 &adapter->link_speed,
3090 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003091
3092 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003093 /* Links status message must follow this format */
3094 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003095 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003096 netdev->name,
3097 adapter->link_speed,
3098 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003099 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003100 ((ctrl & E1000_CTRL_TFCE) &&
3101 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3102 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3103 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003104
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003105 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003106 adapter->tx_timeout_factor = 1;
3107 switch (adapter->link_speed) {
3108 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003109 adapter->tx_timeout_factor = 14;
3110 break;
3111 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003112 /* maybe add some timeout factor ? */
3113 break;
3114 }
3115
3116 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003117
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003118 igb_ping_all_vfs(adapter);
3119
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003120 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003121 if (!test_bit(__IGB_DOWN, &adapter->state))
3122 mod_timer(&adapter->phy_info_timer,
3123 round_jiffies(jiffies + 2 * HZ));
3124 }
3125 } else {
3126 if (netif_carrier_ok(netdev)) {
3127 adapter->link_speed = 0;
3128 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003129 /* Links status message must follow this format */
3130 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3131 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003132 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003133
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003134 igb_ping_all_vfs(adapter);
3135
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003136 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003137 if (!test_bit(__IGB_DOWN, &adapter->state))
3138 mod_timer(&adapter->phy_info_timer,
3139 round_jiffies(jiffies + 2 * HZ));
3140 }
3141 }
3142
Auke Kok9d5c8242008-01-24 02:22:38 -08003143 igb_update_stats(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003144
Alexander Duyckdbabb062009-11-12 18:38:16 +00003145 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003146 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003147 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003148 /* We've lost link, so the controller stops DMA,
3149 * but we've got queued Tx work that's never going
3150 * to get done, so reset controller to flush Tx.
3151 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003152 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3153 adapter->tx_timeout_count++;
3154 schedule_work(&adapter->reset_task);
3155 /* return immediately since reset is imminent */
3156 return;
3157 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003158 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003159
Alexander Duyckdbabb062009-11-12 18:38:16 +00003160 /* Force detection of hung controller every watchdog period */
3161 tx_ring->detect_tx_hung = true;
3162 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003163
Auke Kok9d5c8242008-01-24 02:22:38 -08003164 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003165 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003166 u32 eics = 0;
3167 for (i = 0; i < adapter->num_q_vectors; i++) {
3168 struct igb_q_vector *q_vector = adapter->q_vector[i];
3169 eics |= q_vector->eims_value;
3170 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003171 wr32(E1000_EICS, eics);
3172 } else {
3173 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3174 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003175
Auke Kok9d5c8242008-01-24 02:22:38 -08003176 /* Reset the timer */
3177 if (!test_bit(__IGB_DOWN, &adapter->state))
3178 mod_timer(&adapter->watchdog_timer,
3179 round_jiffies(jiffies + 2 * HZ));
3180}
3181
3182enum latency_range {
3183 lowest_latency = 0,
3184 low_latency = 1,
3185 bulk_latency = 2,
3186 latency_invalid = 255
3187};
3188
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003189/**
3190 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3191 *
3192 * Stores a new ITR value based on strictly on packet size. This
3193 * algorithm is less sophisticated than that used in igb_update_itr,
3194 * due to the difficulty of synchronizing statistics across multiple
3195 * receive rings. The divisors and thresholds used by this fuction
3196 * were determined based on theoretical maximum wire speed and testing
3197 * data, in order to minimize response time while increasing bulk
3198 * throughput.
3199 * This functionality is controlled by the InterruptThrottleRate module
3200 * parameter (see igb_param.c)
3201 * NOTE: This function is called only when operating in a multiqueue
3202 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003203 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003204 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003205static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003206{
Alexander Duyck047e0032009-10-27 15:49:27 +00003207 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003208 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003209 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003210
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003211 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3212 * ints/sec - ITR timer value of 120 ticks.
3213 */
3214 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003215 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003216 goto set_itr_val;
3217 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003218
3219 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3220 struct igb_ring *ring = q_vector->rx_ring;
3221 avg_wire_size = ring->total_bytes / ring->total_packets;
3222 }
3223
3224 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3225 struct igb_ring *ring = q_vector->tx_ring;
3226 avg_wire_size = max_t(u32, avg_wire_size,
3227 (ring->total_bytes /
3228 ring->total_packets));
3229 }
3230
3231 /* if avg_wire_size isn't set no work was done */
3232 if (!avg_wire_size)
3233 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003234
3235 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3236 avg_wire_size += 24;
3237
3238 /* Don't starve jumbo frames */
3239 avg_wire_size = min(avg_wire_size, 3000);
3240
3241 /* Give a little boost to mid-size frames */
3242 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3243 new_val = avg_wire_size / 3;
3244 else
3245 new_val = avg_wire_size / 2;
3246
Nick Nunleyabe1c362010-02-17 01:03:19 +00003247 /* when in itr mode 3 do not exceed 20K ints/sec */
3248 if (adapter->rx_itr_setting == 3 && new_val < 196)
3249 new_val = 196;
3250
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003251set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003252 if (new_val != q_vector->itr_val) {
3253 q_vector->itr_val = new_val;
3254 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003255 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003256clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003257 if (q_vector->rx_ring) {
3258 q_vector->rx_ring->total_bytes = 0;
3259 q_vector->rx_ring->total_packets = 0;
3260 }
3261 if (q_vector->tx_ring) {
3262 q_vector->tx_ring->total_bytes = 0;
3263 q_vector->tx_ring->total_packets = 0;
3264 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003265}
3266
3267/**
3268 * igb_update_itr - update the dynamic ITR value based on statistics
3269 * Stores a new ITR value based on packets and byte
3270 * counts during the last interrupt. The advantage of per interrupt
3271 * computation is faster updates and more accurate ITR for the current
3272 * traffic pattern. Constants in this function were computed
3273 * based on theoretical maximum wire speed and thresholds were set based
3274 * on testing data as well as attempting to minimize response time
3275 * while increasing bulk throughput.
3276 * this functionality is controlled by the InterruptThrottleRate module
3277 * parameter (see igb_param.c)
3278 * NOTE: These calculations are only valid when operating in a single-
3279 * queue environment.
3280 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003281 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003282 * @packets: the number of packets during this measurement interval
3283 * @bytes: the number of bytes during this measurement interval
3284 **/
3285static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3286 int packets, int bytes)
3287{
3288 unsigned int retval = itr_setting;
3289
3290 if (packets == 0)
3291 goto update_itr_done;
3292
3293 switch (itr_setting) {
3294 case lowest_latency:
3295 /* handle TSO and jumbo frames */
3296 if (bytes/packets > 8000)
3297 retval = bulk_latency;
3298 else if ((packets < 5) && (bytes > 512))
3299 retval = low_latency;
3300 break;
3301 case low_latency: /* 50 usec aka 20000 ints/s */
3302 if (bytes > 10000) {
3303 /* this if handles the TSO accounting */
3304 if (bytes/packets > 8000) {
3305 retval = bulk_latency;
3306 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3307 retval = bulk_latency;
3308 } else if ((packets > 35)) {
3309 retval = lowest_latency;
3310 }
3311 } else if (bytes/packets > 2000) {
3312 retval = bulk_latency;
3313 } else if (packets <= 2 && bytes < 512) {
3314 retval = lowest_latency;
3315 }
3316 break;
3317 case bulk_latency: /* 250 usec aka 4000 ints/s */
3318 if (bytes > 25000) {
3319 if (packets > 35)
3320 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003321 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003322 retval = low_latency;
3323 }
3324 break;
3325 }
3326
3327update_itr_done:
3328 return retval;
3329}
3330
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003331static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003332{
Alexander Duyck047e0032009-10-27 15:49:27 +00003333 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003334 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003335 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003336
3337 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3338 if (adapter->link_speed != SPEED_1000) {
3339 current_itr = 0;
3340 new_itr = 4000;
3341 goto set_itr_now;
3342 }
3343
3344 adapter->rx_itr = igb_update_itr(adapter,
3345 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003346 q_vector->rx_ring->total_packets,
3347 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003348
Alexander Duyck047e0032009-10-27 15:49:27 +00003349 adapter->tx_itr = igb_update_itr(adapter,
3350 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003351 q_vector->tx_ring->total_packets,
3352 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003353 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003354
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003355 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003356 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003357 current_itr = low_latency;
3358
Auke Kok9d5c8242008-01-24 02:22:38 -08003359 switch (current_itr) {
3360 /* counts and packets in update_itr are dependent on these numbers */
3361 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003362 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003363 break;
3364 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003365 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003366 break;
3367 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003368 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003369 break;
3370 default:
3371 break;
3372 }
3373
3374set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003375 q_vector->rx_ring->total_bytes = 0;
3376 q_vector->rx_ring->total_packets = 0;
3377 q_vector->tx_ring->total_bytes = 0;
3378 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003379
Alexander Duyck047e0032009-10-27 15:49:27 +00003380 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003381 /* this attempts to bias the interrupt rate towards Bulk
3382 * by adding intermediate steps when interrupt rate is
3383 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003384 new_itr = new_itr > q_vector->itr_val ?
3385 max((new_itr * q_vector->itr_val) /
3386 (new_itr + (q_vector->itr_val >> 2)),
3387 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003388 new_itr;
3389 /* Don't write the value here; it resets the adapter's
3390 * internal timer, and causes us to delay far longer than
3391 * we should between interrupts. Instead, we write the ITR
3392 * value at the beginning of the next interrupt so the timing
3393 * ends up being correct.
3394 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003395 q_vector->itr_val = new_itr;
3396 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003397 }
3398
3399 return;
3400}
3401
Auke Kok9d5c8242008-01-24 02:22:38 -08003402#define IGB_TX_FLAGS_CSUM 0x00000001
3403#define IGB_TX_FLAGS_VLAN 0x00000002
3404#define IGB_TX_FLAGS_TSO 0x00000004
3405#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003406#define IGB_TX_FLAGS_TSTAMP 0x00000010
3407#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3408#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003409
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003410static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003411 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3412{
3413 struct e1000_adv_tx_context_desc *context_desc;
3414 unsigned int i;
3415 int err;
3416 struct igb_buffer *buffer_info;
3417 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003418 u32 mss_l4len_idx;
3419 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003420
3421 if (skb_header_cloned(skb)) {
3422 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3423 if (err)
3424 return err;
3425 }
3426
3427 l4len = tcp_hdrlen(skb);
3428 *hdr_len += l4len;
3429
3430 if (skb->protocol == htons(ETH_P_IP)) {
3431 struct iphdr *iph = ip_hdr(skb);
3432 iph->tot_len = 0;
3433 iph->check = 0;
3434 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3435 iph->daddr, 0,
3436 IPPROTO_TCP,
3437 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003438 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003439 ipv6_hdr(skb)->payload_len = 0;
3440 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3441 &ipv6_hdr(skb)->daddr,
3442 0, IPPROTO_TCP, 0);
3443 }
3444
3445 i = tx_ring->next_to_use;
3446
3447 buffer_info = &tx_ring->buffer_info[i];
3448 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3449 /* VLAN MACLEN IPLEN */
3450 if (tx_flags & IGB_TX_FLAGS_VLAN)
3451 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3452 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3453 *hdr_len += skb_network_offset(skb);
3454 info |= skb_network_header_len(skb);
3455 *hdr_len += skb_network_header_len(skb);
3456 context_desc->vlan_macip_lens = cpu_to_le32(info);
3457
3458 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3459 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3460
3461 if (skb->protocol == htons(ETH_P_IP))
3462 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3463 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3464
3465 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3466
3467 /* MSS L4LEN IDX */
3468 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3469 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3470
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003471 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003472 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3473 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003474
3475 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3476 context_desc->seqnum_seed = 0;
3477
3478 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003479 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003480 buffer_info->dma = 0;
3481 i++;
3482 if (i == tx_ring->count)
3483 i = 0;
3484
3485 tx_ring->next_to_use = i;
3486
3487 return true;
3488}
3489
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003490static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3491 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003492{
3493 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003494 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003495 struct igb_buffer *buffer_info;
3496 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003497 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003498
3499 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3500 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3501 i = tx_ring->next_to_use;
3502 buffer_info = &tx_ring->buffer_info[i];
3503 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3504
3505 if (tx_flags & IGB_TX_FLAGS_VLAN)
3506 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003507
Auke Kok9d5c8242008-01-24 02:22:38 -08003508 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3509 if (skb->ip_summed == CHECKSUM_PARTIAL)
3510 info |= skb_network_header_len(skb);
3511
3512 context_desc->vlan_macip_lens = cpu_to_le32(info);
3513
3514 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3515
3516 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003517 __be16 protocol;
3518
3519 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3520 const struct vlan_ethhdr *vhdr =
3521 (const struct vlan_ethhdr*)skb->data;
3522
3523 protocol = vhdr->h_vlan_encapsulated_proto;
3524 } else {
3525 protocol = skb->protocol;
3526 }
3527
3528 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003529 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003530 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003531 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3532 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003533 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3534 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003535 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003536 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003537 /* XXX what about other V6 headers?? */
3538 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3539 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003540 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3541 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003542 break;
3543 default:
3544 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003545 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003546 "partial checksum but proto=%x!\n",
3547 skb->protocol);
3548 break;
3549 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003550 }
3551
3552 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3553 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003554 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003555 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003556 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003557
3558 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003559 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003560 buffer_info->dma = 0;
3561
3562 i++;
3563 if (i == tx_ring->count)
3564 i = 0;
3565 tx_ring->next_to_use = i;
3566
3567 return true;
3568 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003569 return false;
3570}
3571
3572#define IGB_MAX_TXD_PWR 16
3573#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3574
Alexander Duyck80785292009-10-27 15:51:47 +00003575static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003576 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003577{
3578 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003579 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003580 unsigned int len = skb_headlen(skb);
3581 unsigned int count = 0, i;
3582 unsigned int f;
3583
3584 i = tx_ring->next_to_use;
3585
3586 buffer_info = &tx_ring->buffer_info[i];
3587 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3588 buffer_info->length = len;
3589 /* set time_stamp *before* dma to help avoid a possible race */
3590 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003591 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003592 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3593 PCI_DMA_TODEVICE);
3594 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3595 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08003596
3597 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3598 struct skb_frag_struct *frag;
3599
Alexander Duyck85811452010-01-23 01:35:00 -08003600 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003601 i++;
3602 if (i == tx_ring->count)
3603 i = 0;
3604
Auke Kok9d5c8242008-01-24 02:22:38 -08003605 frag = &skb_shinfo(skb)->frags[f];
3606 len = frag->size;
3607
3608 buffer_info = &tx_ring->buffer_info[i];
3609 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3610 buffer_info->length = len;
3611 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003612 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003613 buffer_info->mapped_as_page = true;
3614 buffer_info->dma = pci_map_page(pdev,
3615 frag->page,
3616 frag->page_offset,
3617 len,
3618 PCI_DMA_TODEVICE);
3619 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3620 goto dma_error;
3621
Auke Kok9d5c8242008-01-24 02:22:38 -08003622 }
3623
Auke Kok9d5c8242008-01-24 02:22:38 -08003624 tx_ring->buffer_info[i].skb = skb;
Nick Nunley40e90c22010-02-17 01:04:37 +00003625 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003626 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003627
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003628 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003629
3630dma_error:
3631 dev_err(&pdev->dev, "TX DMA map failed\n");
3632
3633 /* clear timestamp and dma mappings for failed buffer_info mapping */
3634 buffer_info->dma = 0;
3635 buffer_info->time_stamp = 0;
3636 buffer_info->length = 0;
3637 buffer_info->next_to_watch = 0;
3638 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003639
3640 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00003641 while (count--) {
3642 if (i == 0)
3643 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003644 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003645 buffer_info = &tx_ring->buffer_info[i];
3646 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3647 }
3648
3649 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003650}
3651
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003652static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Nick Nunley91d4ee32010-02-17 01:04:56 +00003653 u32 tx_flags, int count, u32 paylen,
Auke Kok9d5c8242008-01-24 02:22:38 -08003654 u8 hdr_len)
3655{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003656 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003657 struct igb_buffer *buffer_info;
3658 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003659 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08003660
3661 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3662 E1000_ADVTXD_DCMD_DEXT);
3663
3664 if (tx_flags & IGB_TX_FLAGS_VLAN)
3665 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3666
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003667 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3668 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3669
Auke Kok9d5c8242008-01-24 02:22:38 -08003670 if (tx_flags & IGB_TX_FLAGS_TSO) {
3671 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3672
3673 /* insert tcp checksum */
3674 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3675
3676 /* insert ip checksum */
3677 if (tx_flags & IGB_TX_FLAGS_IPV4)
3678 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3679
3680 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3681 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3682 }
3683
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003684 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3685 (tx_flags & (IGB_TX_FLAGS_CSUM |
3686 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003687 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003688 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003689
3690 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3691
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003692 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08003693 buffer_info = &tx_ring->buffer_info[i];
3694 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3695 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3696 tx_desc->read.cmd_type_len =
3697 cpu_to_le32(cmd_type_len | buffer_info->length);
3698 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003699 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08003700 i++;
3701 if (i == tx_ring->count)
3702 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003703 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003704
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003705 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003706 /* Force memory writes to complete before letting h/w
3707 * know there are new descriptors to fetch. (Only
3708 * applicable for weak-ordered memory model archs,
3709 * such as IA-64). */
3710 wmb();
3711
3712 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003713 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003714 /* we need this if more than one processor can write to our tail
3715 * at a time, it syncronizes IO on IA64/Altix systems */
3716 mmiowb();
3717}
3718
Alexander Duycke694e962009-10-27 15:53:06 +00003719static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003720{
Alexander Duycke694e962009-10-27 15:53:06 +00003721 struct net_device *netdev = tx_ring->netdev;
3722
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003723 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003724
Auke Kok9d5c8242008-01-24 02:22:38 -08003725 /* Herbert's original patch had:
3726 * smp_mb__after_netif_stop_queue();
3727 * but since that doesn't exist yet, just open code it. */
3728 smp_mb();
3729
3730 /* We need to check again in a case another CPU has just
3731 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003732 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003733 return -EBUSY;
3734
3735 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003736 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003737 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003738 return 0;
3739}
3740
Nick Nunley717ba082010-02-17 01:04:18 +00003741static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003742{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003743 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003744 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003745 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003746}
3747
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003748netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3749 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003750{
Alexander Duycke694e962009-10-27 15:53:06 +00003751 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003752 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003753 u32 tx_flags = 0;
3754 u16 first;
3755 u8 hdr_len = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003756 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003757
Auke Kok9d5c8242008-01-24 02:22:38 -08003758 /* need: 1 descriptor per page,
3759 * + 2 desc gap to keep tail from touching head,
3760 * + 1 desc for skb->data,
3761 * + 1 desc for context descriptor,
3762 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003763 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003764 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003765 return NETDEV_TX_BUSY;
3766 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003767
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003768 if (unlikely(shtx->hardware)) {
3769 shtx->in_progress = 1;
3770 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003771 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003772
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003773 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003774 tx_flags |= IGB_TX_FLAGS_VLAN;
3775 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3776 }
3777
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003778 if (skb->protocol == htons(ETH_P_IP))
3779 tx_flags |= IGB_TX_FLAGS_IPV4;
3780
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003781 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003782 if (skb_is_gso(skb)) {
3783 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003784
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003785 if (tso < 0) {
3786 dev_kfree_skb_any(skb);
3787 return NETDEV_TX_OK;
3788 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003789 }
3790
3791 if (tso)
3792 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003793 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003794 (skb->ip_summed == CHECKSUM_PARTIAL))
3795 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003796
Alexander Duyck65689fe2009-03-20 00:17:43 +00003797 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003798 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00003799 * has occured and we need to rewind the descriptor queue
3800 */
Alexander Duyck80785292009-10-27 15:51:47 +00003801 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003802 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003803 dev_kfree_skb_any(skb);
3804 tx_ring->buffer_info[first].time_stamp = 0;
3805 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003806 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003807 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003808
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003809 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3810
3811 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003812 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003813
Auke Kok9d5c8242008-01-24 02:22:38 -08003814 return NETDEV_TX_OK;
3815}
3816
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003817static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3818 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003819{
3820 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003821 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003822 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003823
3824 if (test_bit(__IGB_DOWN, &adapter->state)) {
3825 dev_kfree_skb_any(skb);
3826 return NETDEV_TX_OK;
3827 }
3828
3829 if (skb->len <= 0) {
3830 dev_kfree_skb_any(skb);
3831 return NETDEV_TX_OK;
3832 }
3833
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003834 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003835 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003836
3837 /* This goes back to the question of how to logically map a tx queue
3838 * to a flow. Right now, performance is impacted slightly negatively
3839 * if using multiple tx queues. If the stack breaks away from a
3840 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003841 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003842}
3843
3844/**
3845 * igb_tx_timeout - Respond to a Tx Hang
3846 * @netdev: network interface device structure
3847 **/
3848static void igb_tx_timeout(struct net_device *netdev)
3849{
3850 struct igb_adapter *adapter = netdev_priv(netdev);
3851 struct e1000_hw *hw = &adapter->hw;
3852
3853 /* Do the reset outside of interrupt context */
3854 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003855
Alexander Duyck55cac242009-11-19 12:42:21 +00003856 if (hw->mac.type == e1000_82580)
3857 hw->dev_spec._82575.global_device_reset = true;
3858
Auke Kok9d5c8242008-01-24 02:22:38 -08003859 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003860 wr32(E1000_EICS,
3861 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003862}
3863
3864static void igb_reset_task(struct work_struct *work)
3865{
3866 struct igb_adapter *adapter;
3867 adapter = container_of(work, struct igb_adapter, reset_task);
3868
3869 igb_reinit_locked(adapter);
3870}
3871
3872/**
3873 * igb_get_stats - Get System Network Statistics
3874 * @netdev: network interface device structure
3875 *
3876 * Returns the address of the device statistics structure.
3877 * The statistics are actually updated from the timer callback.
3878 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003879static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003880{
Auke Kok9d5c8242008-01-24 02:22:38 -08003881 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003882 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003883}
3884
3885/**
3886 * igb_change_mtu - Change the Maximum Transfer Unit
3887 * @netdev: network interface device structure
3888 * @new_mtu: new value for maximum frame size
3889 *
3890 * Returns 0 on success, negative on failure
3891 **/
3892static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3893{
3894 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00003895 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003896 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003897 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003898
Alexander Duyckc809d222009-10-27 23:52:13 +00003899 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003900 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003901 return -EINVAL;
3902 }
3903
Auke Kok9d5c8242008-01-24 02:22:38 -08003904 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003905 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003906 return -EINVAL;
3907 }
3908
3909 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3910 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003911
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 /* igb_down has a dependency on max_frame_size */
3913 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00003914
Auke Kok9d5c8242008-01-24 02:22:38 -08003915 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3916 * means we reserve 2 more, this pushes us to allocate from the next
3917 * larger slab size.
3918 * i.e. RXBUFFER_2048 --> size-4096 slab
3919 */
3920
Alexander Duyck7d95b712009-10-27 15:50:08 +00003921 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003922 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003923 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003924 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003925 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003926 rx_buffer_len = IGB_RXBUFFER_128;
3927
3928 if (netif_running(netdev))
3929 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003930
Alexander Duyck090b1792009-10-27 23:51:55 +00003931 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08003932 netdev->mtu, new_mtu);
3933 netdev->mtu = new_mtu;
3934
Alexander Duyck4c844852009-10-27 15:52:07 +00003935 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003936 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00003937
Auke Kok9d5c8242008-01-24 02:22:38 -08003938 if (netif_running(netdev))
3939 igb_up(adapter);
3940 else
3941 igb_reset(adapter);
3942
3943 clear_bit(__IGB_RESETTING, &adapter->state);
3944
3945 return 0;
3946}
3947
3948/**
3949 * igb_update_stats - Update the board statistics counters
3950 * @adapter: board private structure
3951 **/
3952
3953void igb_update_stats(struct igb_adapter *adapter)
3954{
Alexander Duyck128e45e2009-11-12 18:37:38 +00003955 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003956 struct e1000_hw *hw = &adapter->hw;
3957 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00003958 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003959 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003960 int i;
3961 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003962
3963#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3964
3965 /*
3966 * Prevent stats update while adapter is being reset, or if the pci
3967 * connection is down.
3968 */
3969 if (adapter->link_speed == 0)
3970 return;
3971 if (pci_channel_offline(pdev))
3972 return;
3973
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003974 bytes = 0;
3975 packets = 0;
3976 for (i = 0; i < adapter->num_rx_queues; i++) {
3977 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00003978 struct igb_ring *ring = adapter->rx_ring[i];
3979 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00003980 net_stats->rx_fifo_errors += rqdpc_tmp;
Alexander Duyck3025a442010-02-17 01:02:39 +00003981 bytes += ring->rx_stats.bytes;
3982 packets += ring->rx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003983 }
3984
Alexander Duyck128e45e2009-11-12 18:37:38 +00003985 net_stats->rx_bytes = bytes;
3986 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003987
3988 bytes = 0;
3989 packets = 0;
3990 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003991 struct igb_ring *ring = adapter->tx_ring[i];
3992 bytes += ring->tx_stats.bytes;
3993 packets += ring->tx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003994 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00003995 net_stats->tx_bytes = bytes;
3996 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003997
3998 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08003999 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4000 adapter->stats.gprc += rd32(E1000_GPRC);
4001 adapter->stats.gorc += rd32(E1000_GORCL);
4002 rd32(E1000_GORCH); /* clear GORCL */
4003 adapter->stats.bprc += rd32(E1000_BPRC);
4004 adapter->stats.mprc += rd32(E1000_MPRC);
4005 adapter->stats.roc += rd32(E1000_ROC);
4006
4007 adapter->stats.prc64 += rd32(E1000_PRC64);
4008 adapter->stats.prc127 += rd32(E1000_PRC127);
4009 adapter->stats.prc255 += rd32(E1000_PRC255);
4010 adapter->stats.prc511 += rd32(E1000_PRC511);
4011 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4012 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4013 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4014 adapter->stats.sec += rd32(E1000_SEC);
4015
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004016 mpc = rd32(E1000_MPC);
4017 adapter->stats.mpc += mpc;
4018 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004019 adapter->stats.scc += rd32(E1000_SCC);
4020 adapter->stats.ecol += rd32(E1000_ECOL);
4021 adapter->stats.mcc += rd32(E1000_MCC);
4022 adapter->stats.latecol += rd32(E1000_LATECOL);
4023 adapter->stats.dc += rd32(E1000_DC);
4024 adapter->stats.rlec += rd32(E1000_RLEC);
4025 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4026 adapter->stats.xontxc += rd32(E1000_XONTXC);
4027 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4028 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4029 adapter->stats.fcruc += rd32(E1000_FCRUC);
4030 adapter->stats.gptc += rd32(E1000_GPTC);
4031 adapter->stats.gotc += rd32(E1000_GOTCL);
4032 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004033 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004034 adapter->stats.ruc += rd32(E1000_RUC);
4035 adapter->stats.rfc += rd32(E1000_RFC);
4036 adapter->stats.rjc += rd32(E1000_RJC);
4037 adapter->stats.tor += rd32(E1000_TORH);
4038 adapter->stats.tot += rd32(E1000_TOTH);
4039 adapter->stats.tpr += rd32(E1000_TPR);
4040
4041 adapter->stats.ptc64 += rd32(E1000_PTC64);
4042 adapter->stats.ptc127 += rd32(E1000_PTC127);
4043 adapter->stats.ptc255 += rd32(E1000_PTC255);
4044 adapter->stats.ptc511 += rd32(E1000_PTC511);
4045 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4046 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4047
4048 adapter->stats.mptc += rd32(E1000_MPTC);
4049 adapter->stats.bptc += rd32(E1000_BPTC);
4050
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004051 adapter->stats.tpt += rd32(E1000_TPT);
4052 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004053
4054 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004055 /* read internal phy specific stats */
4056 reg = rd32(E1000_CTRL_EXT);
4057 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4058 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4059 adapter->stats.tncrs += rd32(E1000_TNCRS);
4060 }
4061
Auke Kok9d5c8242008-01-24 02:22:38 -08004062 adapter->stats.tsctc += rd32(E1000_TSCTC);
4063 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4064
4065 adapter->stats.iac += rd32(E1000_IAC);
4066 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4067 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4068 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4069 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4070 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4071 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4072 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4073 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4074
4075 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004076 net_stats->multicast = adapter->stats.mprc;
4077 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004078
4079 /* Rx Errors */
4080
4081 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004082 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004083 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004084 adapter->stats.crcerrs + adapter->stats.algnerrc +
4085 adapter->stats.ruc + adapter->stats.roc +
4086 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004087 net_stats->rx_length_errors = adapter->stats.ruc +
4088 adapter->stats.roc;
4089 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4090 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4091 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004092
4093 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004094 net_stats->tx_errors = adapter->stats.ecol +
4095 adapter->stats.latecol;
4096 net_stats->tx_aborted_errors = adapter->stats.ecol;
4097 net_stats->tx_window_errors = adapter->stats.latecol;
4098 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004099
4100 /* Tx Dropped needs to be maintained elsewhere */
4101
4102 /* Phy Stats */
4103 if (hw->phy.media_type == e1000_media_type_copper) {
4104 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004105 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004106 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4107 adapter->phy_stats.idle_errors += phy_tmp;
4108 }
4109 }
4110
4111 /* Management Stats */
4112 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4113 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4114 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4115}
4116
Auke Kok9d5c8242008-01-24 02:22:38 -08004117static irqreturn_t igb_msix_other(int irq, void *data)
4118{
Alexander Duyck047e0032009-10-27 15:49:27 +00004119 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004120 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004121 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004122 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004123
Alexander Duyck7f081d42010-01-07 17:41:00 +00004124 if (icr & E1000_ICR_DRSTA)
4125 schedule_work(&adapter->reset_task);
4126
Alexander Duyck047e0032009-10-27 15:49:27 +00004127 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004128 /* HW is reporting DMA is out of sync */
4129 adapter->stats.doosync++;
4130 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004131
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004132 /* Check for a mailbox event */
4133 if (icr & E1000_ICR_VMMB)
4134 igb_msg_task(adapter);
4135
4136 if (icr & E1000_ICR_LSC) {
4137 hw->mac.get_link_status = 1;
4138 /* guard against interrupt when we're going down */
4139 if (!test_bit(__IGB_DOWN, &adapter->state))
4140 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4141 }
4142
Alexander Duyck25568a52009-10-27 23:49:59 +00004143 if (adapter->vfs_allocated_count)
4144 wr32(E1000_IMS, E1000_IMS_LSC |
4145 E1000_IMS_VMMB |
4146 E1000_IMS_DOUTSYNC);
4147 else
4148 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004149 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004150
4151 return IRQ_HANDLED;
4152}
4153
Alexander Duyck047e0032009-10-27 15:49:27 +00004154static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004155{
Alexander Duyck26b39272010-02-17 01:00:41 +00004156 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004157 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004158
Alexander Duyck047e0032009-10-27 15:49:27 +00004159 if (!q_vector->set_itr)
4160 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004161
Alexander Duyck047e0032009-10-27 15:49:27 +00004162 if (!itr_val)
4163 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004164
Alexander Duyck26b39272010-02-17 01:00:41 +00004165 if (adapter->hw.mac.type == e1000_82575)
4166 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004167 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004168 itr_val |= 0x8000000;
4169
4170 writel(itr_val, q_vector->itr_register);
4171 q_vector->set_itr = 0;
4172}
4173
4174static irqreturn_t igb_msix_ring(int irq, void *data)
4175{
4176 struct igb_q_vector *q_vector = data;
4177
4178 /* Write the ITR value calculated from the previous interrupt. */
4179 igb_write_itr(q_vector);
4180
4181 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004182
Auke Kok9d5c8242008-01-24 02:22:38 -08004183 return IRQ_HANDLED;
4184}
4185
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004186#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004187static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004188{
Alexander Duyck047e0032009-10-27 15:49:27 +00004189 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004190 struct e1000_hw *hw = &adapter->hw;
4191 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004192
Alexander Duyck047e0032009-10-27 15:49:27 +00004193 if (q_vector->cpu == cpu)
4194 goto out_no_update;
4195
4196 if (q_vector->tx_ring) {
4197 int q = q_vector->tx_ring->reg_idx;
4198 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4199 if (hw->mac.type == e1000_82575) {
4200 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4201 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4202 } else {
4203 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4204 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4205 E1000_DCA_TXCTRL_CPUID_SHIFT;
4206 }
4207 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4208 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4209 }
4210 if (q_vector->rx_ring) {
4211 int q = q_vector->rx_ring->reg_idx;
4212 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4213 if (hw->mac.type == e1000_82575) {
4214 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4215 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4216 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004217 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004218 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004219 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004220 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004221 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4222 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4223 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4224 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004225 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004226 q_vector->cpu = cpu;
4227out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004228 put_cpu();
4229}
4230
4231static void igb_setup_dca(struct igb_adapter *adapter)
4232{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004233 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004234 int i;
4235
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004236 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004237 return;
4238
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004239 /* Always use CB2 mode, difference is masked in the CB driver. */
4240 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4241
Alexander Duyck047e0032009-10-27 15:49:27 +00004242 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004243 adapter->q_vector[i]->cpu = -1;
4244 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004245 }
4246}
4247
4248static int __igb_notify_dca(struct device *dev, void *data)
4249{
4250 struct net_device *netdev = dev_get_drvdata(dev);
4251 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004252 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004253 struct e1000_hw *hw = &adapter->hw;
4254 unsigned long event = *(unsigned long *)data;
4255
4256 switch (event) {
4257 case DCA_PROVIDER_ADD:
4258 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004259 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004260 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004261 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004262 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004263 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004264 igb_setup_dca(adapter);
4265 break;
4266 }
4267 /* Fall Through since DCA is disabled. */
4268 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004269 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004270 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004271 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004272 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004273 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004274 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004275 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004276 }
4277 break;
4278 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004279
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004280 return 0;
4281}
4282
4283static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4284 void *p)
4285{
4286 int ret_val;
4287
4288 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4289 __igb_notify_dca);
4290
4291 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4292}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004293#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004294
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004295static void igb_ping_all_vfs(struct igb_adapter *adapter)
4296{
4297 struct e1000_hw *hw = &adapter->hw;
4298 u32 ping;
4299 int i;
4300
4301 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4302 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004303 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004304 ping |= E1000_VT_MSGTYPE_CTS;
4305 igb_write_mbx(hw, &ping, 1, i);
4306 }
4307}
4308
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004309static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4310{
4311 struct e1000_hw *hw = &adapter->hw;
4312 u32 vmolr = rd32(E1000_VMOLR(vf));
4313 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4314
4315 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4316 IGB_VF_FLAG_MULTI_PROMISC);
4317 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4318
4319 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4320 vmolr |= E1000_VMOLR_MPME;
4321 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4322 } else {
4323 /*
4324 * if we have hashes and we are clearing a multicast promisc
4325 * flag we need to write the hashes to the MTA as this step
4326 * was previously skipped
4327 */
4328 if (vf_data->num_vf_mc_hashes > 30) {
4329 vmolr |= E1000_VMOLR_MPME;
4330 } else if (vf_data->num_vf_mc_hashes) {
4331 int j;
4332 vmolr |= E1000_VMOLR_ROMPE;
4333 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4334 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4335 }
4336 }
4337
4338 wr32(E1000_VMOLR(vf), vmolr);
4339
4340 /* there are flags left unprocessed, likely not supported */
4341 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4342 return -EINVAL;
4343
4344 return 0;
4345
4346}
4347
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004348static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4349 u32 *msgbuf, u32 vf)
4350{
4351 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4352 u16 *hash_list = (u16 *)&msgbuf[1];
4353 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4354 int i;
4355
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004356 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004357 * to this VF for later use to restore when the PF multi cast
4358 * list changes
4359 */
4360 vf_data->num_vf_mc_hashes = n;
4361
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004362 /* only up to 30 hash values supported */
4363 if (n > 30)
4364 n = 30;
4365
4366 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004367 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004368 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004369
4370 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004371 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004372
4373 return 0;
4374}
4375
4376static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4377{
4378 struct e1000_hw *hw = &adapter->hw;
4379 struct vf_data_storage *vf_data;
4380 int i, j;
4381
4382 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004383 u32 vmolr = rd32(E1000_VMOLR(i));
4384 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4385
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004386 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004387
4388 if ((vf_data->num_vf_mc_hashes > 30) ||
4389 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4390 vmolr |= E1000_VMOLR_MPME;
4391 } else if (vf_data->num_vf_mc_hashes) {
4392 vmolr |= E1000_VMOLR_ROMPE;
4393 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4394 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4395 }
4396 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004397 }
4398}
4399
4400static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4401{
4402 struct e1000_hw *hw = &adapter->hw;
4403 u32 pool_mask, reg, vid;
4404 int i;
4405
4406 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4407
4408 /* Find the vlan filter for this id */
4409 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4410 reg = rd32(E1000_VLVF(i));
4411
4412 /* remove the vf from the pool */
4413 reg &= ~pool_mask;
4414
4415 /* if pool is empty then remove entry from vfta */
4416 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4417 (reg & E1000_VLVF_VLANID_ENABLE)) {
4418 reg = 0;
4419 vid = reg & E1000_VLVF_VLANID_MASK;
4420 igb_vfta_set(hw, vid, false);
4421 }
4422
4423 wr32(E1000_VLVF(i), reg);
4424 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004425
4426 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004427}
4428
4429static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4430{
4431 struct e1000_hw *hw = &adapter->hw;
4432 u32 reg, i;
4433
Alexander Duyck51466232009-10-27 23:47:35 +00004434 /* The vlvf table only exists on 82576 hardware and newer */
4435 if (hw->mac.type < e1000_82576)
4436 return -1;
4437
4438 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004439 if (!adapter->vfs_allocated_count)
4440 return -1;
4441
4442 /* Find the vlan filter for this id */
4443 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4444 reg = rd32(E1000_VLVF(i));
4445 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4446 vid == (reg & E1000_VLVF_VLANID_MASK))
4447 break;
4448 }
4449
4450 if (add) {
4451 if (i == E1000_VLVF_ARRAY_SIZE) {
4452 /* Did not find a matching VLAN ID entry that was
4453 * enabled. Search for a free filter entry, i.e.
4454 * one without the enable bit set
4455 */
4456 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4457 reg = rd32(E1000_VLVF(i));
4458 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4459 break;
4460 }
4461 }
4462 if (i < E1000_VLVF_ARRAY_SIZE) {
4463 /* Found an enabled/available entry */
4464 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4465
4466 /* if !enabled we need to set this up in vfta */
4467 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004468 /* add VID to filter table */
4469 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004470 reg |= E1000_VLVF_VLANID_ENABLE;
4471 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004472 reg &= ~E1000_VLVF_VLANID_MASK;
4473 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004474 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004475
4476 /* do not modify RLPML for PF devices */
4477 if (vf >= adapter->vfs_allocated_count)
4478 return 0;
4479
4480 if (!adapter->vf_data[vf].vlans_enabled) {
4481 u32 size;
4482 reg = rd32(E1000_VMOLR(vf));
4483 size = reg & E1000_VMOLR_RLPML_MASK;
4484 size += 4;
4485 reg &= ~E1000_VMOLR_RLPML_MASK;
4486 reg |= size;
4487 wr32(E1000_VMOLR(vf), reg);
4488 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004489
Alexander Duyck51466232009-10-27 23:47:35 +00004490 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004491 return 0;
4492 }
4493 } else {
4494 if (i < E1000_VLVF_ARRAY_SIZE) {
4495 /* remove vf from the pool */
4496 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4497 /* if pool is empty then remove entry from vfta */
4498 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4499 reg = 0;
4500 igb_vfta_set(hw, vid, false);
4501 }
4502 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004503
4504 /* do not modify RLPML for PF devices */
4505 if (vf >= adapter->vfs_allocated_count)
4506 return 0;
4507
4508 adapter->vf_data[vf].vlans_enabled--;
4509 if (!adapter->vf_data[vf].vlans_enabled) {
4510 u32 size;
4511 reg = rd32(E1000_VMOLR(vf));
4512 size = reg & E1000_VMOLR_RLPML_MASK;
4513 size -= 4;
4514 reg &= ~E1000_VMOLR_RLPML_MASK;
4515 reg |= size;
4516 wr32(E1000_VMOLR(vf), reg);
4517 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004518 }
4519 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004520 return 0;
4521}
4522
4523static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4524{
4525 struct e1000_hw *hw = &adapter->hw;
4526
4527 if (vid)
4528 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4529 else
4530 wr32(E1000_VMVIR(vf), 0);
4531}
4532
4533static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4534 int vf, u16 vlan, u8 qos)
4535{
4536 int err = 0;
4537 struct igb_adapter *adapter = netdev_priv(netdev);
4538
4539 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4540 return -EINVAL;
4541 if (vlan || qos) {
4542 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4543 if (err)
4544 goto out;
4545 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4546 igb_set_vmolr(adapter, vf, !vlan);
4547 adapter->vf_data[vf].pf_vlan = vlan;
4548 adapter->vf_data[vf].pf_qos = qos;
4549 dev_info(&adapter->pdev->dev,
4550 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4551 if (test_bit(__IGB_DOWN, &adapter->state)) {
4552 dev_warn(&adapter->pdev->dev,
4553 "The VF VLAN has been set,"
4554 " but the PF device is not up.\n");
4555 dev_warn(&adapter->pdev->dev,
4556 "Bring the PF device up before"
4557 " attempting to use the VF device.\n");
4558 }
4559 } else {
4560 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4561 false, vf);
4562 igb_set_vmvir(adapter, vlan, vf);
4563 igb_set_vmolr(adapter, vf, true);
4564 adapter->vf_data[vf].pf_vlan = 0;
4565 adapter->vf_data[vf].pf_qos = 0;
4566 }
4567out:
4568 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004569}
4570
4571static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4572{
4573 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4574 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4575
4576 return igb_vlvf_set(adapter, vid, add, vf);
4577}
4578
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004579static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004580{
Williams, Mitch A8151d292010-02-10 01:44:24 +00004581 /* clear flags */
4582 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004583 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004584
4585 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004586 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004587
4588 /* reset vlans for device */
4589 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00004590 if (adapter->vf_data[vf].pf_vlan)
4591 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4592 adapter->vf_data[vf].pf_vlan,
4593 adapter->vf_data[vf].pf_qos);
4594 else
4595 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004596
4597 /* reset multicast table array for vf */
4598 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4599
4600 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004601 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004602}
4603
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004604static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4605{
4606 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4607
4608 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004609 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4610 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004611
4612 /* process remaining reset events */
4613 igb_vf_reset(adapter, vf);
4614}
4615
4616static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004617{
4618 struct e1000_hw *hw = &adapter->hw;
4619 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004620 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004621 u32 reg, msgbuf[3];
4622 u8 *addr = (u8 *)(&msgbuf[1]);
4623
4624 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004625 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004626
4627 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004628 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004629
4630 /* enable transmit and receive for vf */
4631 reg = rd32(E1000_VFTE);
4632 wr32(E1000_VFTE, reg | (1 << vf));
4633 reg = rd32(E1000_VFRE);
4634 wr32(E1000_VFRE, reg | (1 << vf));
4635
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004636 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004637
4638 /* reply to reset with ack and vf mac address */
4639 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4640 memcpy(addr, vf_mac, 6);
4641 igb_write_mbx(hw, msgbuf, 3, vf);
4642}
4643
4644static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4645{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004646 unsigned char *addr = (char *)&msg[1];
4647 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004648
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004649 if (is_valid_ether_addr(addr))
4650 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004651
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004652 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004653}
4654
4655static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4656{
4657 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004658 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004659 u32 msg = E1000_VT_MSGTYPE_NACK;
4660
4661 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004662 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4663 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004664 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004665 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004666 }
4667}
4668
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004669static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004670{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004671 struct pci_dev *pdev = adapter->pdev;
4672 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004673 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004674 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004675 s32 retval;
4676
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004677 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004678
Alexander Duyckfef45f42009-12-11 22:57:34 -08004679 if (retval) {
4680 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004681 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08004682 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4683 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4684 return;
4685 goto out;
4686 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004687
4688 /* this is a message we already processed, do nothing */
4689 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004690 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004691
4692 /*
4693 * until the vf completes a reset it should not be
4694 * allowed to start any configuration.
4695 */
4696
4697 if (msgbuf[0] == E1000_VF_RESET) {
4698 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004699 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004700 }
4701
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004702 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08004703 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4704 return;
4705 retval = -1;
4706 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004707 }
4708
4709 switch ((msgbuf[0] & 0xFFFF)) {
4710 case E1000_VF_SET_MAC_ADDR:
4711 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4712 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004713 case E1000_VF_SET_PROMISC:
4714 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4715 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004716 case E1000_VF_SET_MULTICAST:
4717 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4718 break;
4719 case E1000_VF_SET_LPE:
4720 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4721 break;
4722 case E1000_VF_SET_VLAN:
Williams, Mitch A8151d292010-02-10 01:44:24 +00004723 if (adapter->vf_data[vf].pf_vlan)
4724 retval = -1;
4725 else
4726 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004727 break;
4728 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00004729 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004730 retval = -1;
4731 break;
4732 }
4733
Alexander Duyckfef45f42009-12-11 22:57:34 -08004734 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4735out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004736 /* notify the VF of the results of what it sent us */
4737 if (retval)
4738 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4739 else
4740 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4741
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004742 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004743}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004744
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004745static void igb_msg_task(struct igb_adapter *adapter)
4746{
4747 struct e1000_hw *hw = &adapter->hw;
4748 u32 vf;
4749
4750 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4751 /* process any reset requests */
4752 if (!igb_check_for_rst(hw, vf))
4753 igb_vf_reset_event(adapter, vf);
4754
4755 /* process any messages pending */
4756 if (!igb_check_for_msg(hw, vf))
4757 igb_rcv_msg_from_vf(adapter, vf);
4758
4759 /* process any acks */
4760 if (!igb_check_for_ack(hw, vf))
4761 igb_rcv_ack_from_vf(adapter, vf);
4762 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004763}
4764
Auke Kok9d5c8242008-01-24 02:22:38 -08004765/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004766 * igb_set_uta - Set unicast filter table address
4767 * @adapter: board private structure
4768 *
4769 * The unicast table address is a register array of 32-bit registers.
4770 * The table is meant to be used in a way similar to how the MTA is used
4771 * however due to certain limitations in the hardware it is necessary to
4772 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4773 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4774 **/
4775static void igb_set_uta(struct igb_adapter *adapter)
4776{
4777 struct e1000_hw *hw = &adapter->hw;
4778 int i;
4779
4780 /* The UTA table only exists on 82576 hardware and newer */
4781 if (hw->mac.type < e1000_82576)
4782 return;
4783
4784 /* we only need to do this if VMDq is enabled */
4785 if (!adapter->vfs_allocated_count)
4786 return;
4787
4788 for (i = 0; i < hw->mac.uta_reg_count; i++)
4789 array_wr32(E1000_UTA, i, ~0);
4790}
4791
4792/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004793 * igb_intr_msi - Interrupt Handler
4794 * @irq: interrupt number
4795 * @data: pointer to a network interface device structure
4796 **/
4797static irqreturn_t igb_intr_msi(int irq, void *data)
4798{
Alexander Duyck047e0032009-10-27 15:49:27 +00004799 struct igb_adapter *adapter = data;
4800 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004801 struct e1000_hw *hw = &adapter->hw;
4802 /* read ICR disables interrupts using IAM */
4803 u32 icr = rd32(E1000_ICR);
4804
Alexander Duyck047e0032009-10-27 15:49:27 +00004805 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004806
Alexander Duyck7f081d42010-01-07 17:41:00 +00004807 if (icr & E1000_ICR_DRSTA)
4808 schedule_work(&adapter->reset_task);
4809
Alexander Duyck047e0032009-10-27 15:49:27 +00004810 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004811 /* HW is reporting DMA is out of sync */
4812 adapter->stats.doosync++;
4813 }
4814
Auke Kok9d5c8242008-01-24 02:22:38 -08004815 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4816 hw->mac.get_link_status = 1;
4817 if (!test_bit(__IGB_DOWN, &adapter->state))
4818 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4819 }
4820
Alexander Duyck047e0032009-10-27 15:49:27 +00004821 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004822
4823 return IRQ_HANDLED;
4824}
4825
4826/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004827 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004828 * @irq: interrupt number
4829 * @data: pointer to a network interface device structure
4830 **/
4831static irqreturn_t igb_intr(int irq, void *data)
4832{
Alexander Duyck047e0032009-10-27 15:49:27 +00004833 struct igb_adapter *adapter = data;
4834 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004835 struct e1000_hw *hw = &adapter->hw;
4836 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4837 * need for the IMC write */
4838 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004839 if (!icr)
4840 return IRQ_NONE; /* Not our interrupt */
4841
Alexander Duyck047e0032009-10-27 15:49:27 +00004842 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004843
4844 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4845 * not set, then the adapter didn't send an interrupt */
4846 if (!(icr & E1000_ICR_INT_ASSERTED))
4847 return IRQ_NONE;
4848
Alexander Duyck7f081d42010-01-07 17:41:00 +00004849 if (icr & E1000_ICR_DRSTA)
4850 schedule_work(&adapter->reset_task);
4851
Alexander Duyck047e0032009-10-27 15:49:27 +00004852 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004853 /* HW is reporting DMA is out of sync */
4854 adapter->stats.doosync++;
4855 }
4856
Auke Kok9d5c8242008-01-24 02:22:38 -08004857 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4858 hw->mac.get_link_status = 1;
4859 /* guard against interrupt when we're going down */
4860 if (!test_bit(__IGB_DOWN, &adapter->state))
4861 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4862 }
4863
Alexander Duyck047e0032009-10-27 15:49:27 +00004864 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004865
4866 return IRQ_HANDLED;
4867}
4868
Alexander Duyck047e0032009-10-27 15:49:27 +00004869static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004870{
Alexander Duyck047e0032009-10-27 15:49:27 +00004871 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004872 struct e1000_hw *hw = &adapter->hw;
4873
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004874 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4875 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004876 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004877 igb_set_itr(adapter);
4878 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004879 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004880 }
4881
4882 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4883 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004884 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004885 else
4886 igb_irq_enable(adapter);
4887 }
4888}
4889
Auke Kok9d5c8242008-01-24 02:22:38 -08004890/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004891 * igb_poll - NAPI Rx polling callback
4892 * @napi: napi polling structure
4893 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004894 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004895static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004896{
Alexander Duyck047e0032009-10-27 15:49:27 +00004897 struct igb_q_vector *q_vector = container_of(napi,
4898 struct igb_q_vector,
4899 napi);
4900 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004901
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004902#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004903 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4904 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004905#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004906 if (q_vector->tx_ring)
4907 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004908
Alexander Duyck047e0032009-10-27 15:49:27 +00004909 if (q_vector->rx_ring)
4910 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4911
4912 if (!tx_clean_complete)
4913 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004914
Alexander Duyck46544252009-02-19 20:39:04 -08004915 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004916 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004917 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004918 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004919 }
4920
4921 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004922}
Al Viro6d8126f2008-03-16 22:23:24 +00004923
Auke Kok9d5c8242008-01-24 02:22:38 -08004924/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004925 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004926 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004927 * @shhwtstamps: timestamp structure to update
4928 * @regval: unsigned 64bit system time value.
4929 *
4930 * We need to convert the system time value stored in the RX/TXSTMP registers
4931 * into a hwtstamp which can be used by the upper level timestamping functions
4932 */
4933static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4934 struct skb_shared_hwtstamps *shhwtstamps,
4935 u64 regval)
4936{
4937 u64 ns;
4938
Alexander Duyck55cac242009-11-19 12:42:21 +00004939 /*
4940 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4941 * 24 to match clock shift we setup earlier.
4942 */
4943 if (adapter->hw.mac.type == e1000_82580)
4944 regval <<= IGB_82580_TSYNC_SHIFT;
4945
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004946 ns = timecounter_cyc2time(&adapter->clock, regval);
4947 timecompare_update(&adapter->compare, ns);
4948 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4949 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4950 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4951}
4952
4953/**
4954 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4955 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004956 * @skb: packet that was just sent
4957 *
4958 * If we were asked to do hardware stamping and such a time stamp is
4959 * available, then it must have been for this skb here because we only
4960 * allow only one such packet into the queue.
4961 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004962static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004963{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004964 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004965 union skb_shared_tx *shtx = skb_tx(skb);
4966 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004967 struct skb_shared_hwtstamps shhwtstamps;
4968 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004969
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004970 /* if skb does not support hw timestamp or TX stamp not valid exit */
4971 if (likely(!shtx->hardware) ||
4972 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4973 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004974
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004975 regval = rd32(E1000_TXSTMPL);
4976 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4977
4978 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4979 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004980}
4981
4982/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004983 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004984 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004985 * returns true if ring is completely cleaned
4986 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004987static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004988{
Alexander Duyck047e0032009-10-27 15:49:27 +00004989 struct igb_adapter *adapter = q_vector->adapter;
4990 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004991 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004992 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004993 struct igb_buffer *buffer_info;
4994 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004995 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004996 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004997 unsigned int i, eop, count = 0;
4998 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08004999
Auke Kok9d5c8242008-01-24 02:22:38 -08005000 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005001 eop = tx_ring->buffer_info[i].next_to_watch;
5002 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5003
5004 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5005 (count < tx_ring->count)) {
5006 for (cleaned = false; !cleaned; count++) {
5007 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005008 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005009 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005010 skb = buffer_info->skb;
5011
5012 if (skb) {
5013 unsigned int segs, bytecount;
5014 /* gso_segs is currently only valid for tcp */
Nick Nunley40e90c22010-02-17 01:04:37 +00005015 segs = buffer_info->gso_segs;
Auke Kok9d5c8242008-01-24 02:22:38 -08005016 /* multiply data chunks by size of headers */
5017 bytecount = ((segs - 1) * skb_headlen(skb)) +
5018 skb->len;
5019 total_packets += segs;
5020 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005021
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005022 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005023 }
5024
Alexander Duyck80785292009-10-27 15:51:47 +00005025 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005026 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005027
5028 i++;
5029 if (i == tx_ring->count)
5030 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005031 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005032 eop = tx_ring->buffer_info[i].next_to_watch;
5033 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5034 }
5035
Auke Kok9d5c8242008-01-24 02:22:38 -08005036 tx_ring->next_to_clean = i;
5037
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005038 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005039 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005040 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005041 /* Make sure that anybody stopping the queue after this
5042 * sees the new next_to_clean.
5043 */
5044 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005045 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5046 !(test_bit(__IGB_DOWN, &adapter->state))) {
5047 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005048 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005049 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005050 }
5051
5052 if (tx_ring->detect_tx_hung) {
5053 /* Detect a transmit hang in hardware, this serializes the
5054 * check with the clearing of time_stamp and movement of i */
5055 tx_ring->detect_tx_hung = false;
5056 if (tx_ring->buffer_info[i].time_stamp &&
5057 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005058 (adapter->tx_timeout_factor * HZ)) &&
5059 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005060
Auke Kok9d5c8242008-01-24 02:22:38 -08005061 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00005062 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005063 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005064 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005065 " TDH <%x>\n"
5066 " TDT <%x>\n"
5067 " next_to_use <%x>\n"
5068 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005069 "buffer_info[next_to_clean]\n"
5070 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005071 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005072 " jiffies <%lx>\n"
5073 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005074 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005075 readl(tx_ring->head),
5076 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005077 tx_ring->next_to_use,
5078 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005079 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005080 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005081 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005082 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005083 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005084 }
5085 }
5086 tx_ring->total_bytes += total_bytes;
5087 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07005088 tx_ring->tx_stats.bytes += total_bytes;
5089 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005090 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005091}
5092
Auke Kok9d5c8242008-01-24 02:22:38 -08005093/**
5094 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005095 * @q_vector: structure containing interrupt and ring information
5096 * @skb: packet to send up
5097 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005098 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005099static void igb_receive_skb(struct igb_q_vector *q_vector,
5100 struct sk_buff *skb,
5101 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005102{
Alexander Duyck047e0032009-10-27 15:49:27 +00005103 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005104
Alexander Duyck047e0032009-10-27 15:49:27 +00005105 if (vlan_tag)
5106 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5107 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005108 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005109 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005110}
5111
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005112static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005113 u32 status_err, struct sk_buff *skb)
5114{
5115 skb->ip_summed = CHECKSUM_NONE;
5116
5117 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005118 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5119 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005120 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005121
Auke Kok9d5c8242008-01-24 02:22:38 -08005122 /* TCP/UDP checksum error bit is set */
5123 if (status_err &
5124 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005125 /*
5126 * work around errata with sctp packets where the TCPE aka
5127 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5128 * packets, (aka let the stack check the crc32c)
5129 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005130 if ((skb->len == 60) &&
5131 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005132 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005133
Auke Kok9d5c8242008-01-24 02:22:38 -08005134 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005135 return;
5136 }
5137 /* It must be a TCP or UDP packet with a valid checksum */
5138 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5139 skb->ip_summed = CHECKSUM_UNNECESSARY;
5140
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005141 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005142}
5143
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005144static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5145 struct sk_buff *skb)
5146{
5147 struct igb_adapter *adapter = q_vector->adapter;
5148 struct e1000_hw *hw = &adapter->hw;
5149 u64 regval;
5150
5151 /*
5152 * If this bit is set, then the RX registers contain the time stamp. No
5153 * other packet will be time stamped until we read these registers, so
5154 * read the registers to make them available again. Because only one
5155 * packet can be time stamped at a time, we know that the register
5156 * values must belong to this one here and therefore we don't need to
5157 * compare any of the additional attributes stored for it.
5158 *
5159 * If nothing went wrong, then it should have a skb_shared_tx that we
5160 * can turn into a skb_shared_hwtstamps.
5161 */
5162 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5163 return;
5164 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5165 return;
5166
5167 regval = rd32(E1000_RXSTMPL);
5168 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5169
5170 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5171}
Alexander Duyck4c844852009-10-27 15:52:07 +00005172static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005173 union e1000_adv_rx_desc *rx_desc)
5174{
5175 /* HW will not DMA in data larger than the given buffer, even if it
5176 * parses the (NFS, of course) header to be larger. In that case, it
5177 * fills the header buffer and spills the rest into the page.
5178 */
5179 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5180 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005181 if (hlen > rx_ring->rx_buffer_len)
5182 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005183 return hlen;
5184}
5185
Alexander Duyck047e0032009-10-27 15:49:27 +00005186static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5187 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005188{
Alexander Duyck047e0032009-10-27 15:49:27 +00005189 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005190 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00005191 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005192 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5193 struct igb_buffer *buffer_info , *next_buffer;
5194 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005195 bool cleaned = false;
5196 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005197 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005198 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005199 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005200 u32 staterr;
5201 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005202 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005203
5204 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005205 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005206 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5207 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5208
5209 while (staterr & E1000_RXD_STAT_DD) {
5210 if (*work_done >= budget)
5211 break;
5212 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005213
5214 skb = buffer_info->skb;
5215 prefetch(skb->data - NET_IP_ALIGN);
5216 buffer_info->skb = NULL;
5217
5218 i++;
5219 if (i == rx_ring->count)
5220 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005221
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005222 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5223 prefetch(next_rxd);
5224 next_buffer = &rx_ring->buffer_info[i];
5225
5226 length = le16_to_cpu(rx_desc->wb.upper.length);
5227 cleaned = true;
5228 cleaned_count++;
5229
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005230 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005231 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005232 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005233 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005234 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005235 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005236 skb_put(skb, length);
5237 goto send_up;
5238 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005239 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005240 }
5241
5242 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005243 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005244 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005245 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005246
5247 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5248 buffer_info->page,
5249 buffer_info->page_offset,
5250 length);
5251
Alexander Duyckd1eff352009-11-12 18:38:35 +00005252 if ((page_count(buffer_info->page) != 1) ||
5253 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005254 buffer_info->page = NULL;
5255 else
5256 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005257
5258 skb->len += length;
5259 skb->data_len += length;
5260 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005261 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005262
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005263 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005264 buffer_info->skb = next_buffer->skb;
5265 buffer_info->dma = next_buffer->dma;
5266 next_buffer->skb = skb;
5267 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005268 goto next_desc;
5269 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005270send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005271 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5272 dev_kfree_skb_irq(skb);
5273 goto next_desc;
5274 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005275
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005276 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005277 total_bytes += skb->len;
5278 total_packets++;
5279
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005280 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005281
5282 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005283 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005284
Alexander Duyck047e0032009-10-27 15:49:27 +00005285 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5286 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5287
5288 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005289
Auke Kok9d5c8242008-01-24 02:22:38 -08005290next_desc:
5291 rx_desc->wb.upper.status_error = 0;
5292
5293 /* return some buffers to hardware, one at a time is too slow */
5294 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005295 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005296 cleaned_count = 0;
5297 }
5298
5299 /* use prefetched values */
5300 rx_desc = next_rxd;
5301 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005302 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5303 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005304
Auke Kok9d5c8242008-01-24 02:22:38 -08005305 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005306 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005307
5308 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005309 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005310
5311 rx_ring->total_packets += total_packets;
5312 rx_ring->total_bytes += total_bytes;
5313 rx_ring->rx_stats.packets += total_packets;
5314 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005315 return cleaned;
5316}
5317
Auke Kok9d5c8242008-01-24 02:22:38 -08005318/**
5319 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5320 * @adapter: address of board private structure
5321 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005322void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005323{
Alexander Duycke694e962009-10-27 15:53:06 +00005324 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005325 union e1000_adv_rx_desc *rx_desc;
5326 struct igb_buffer *buffer_info;
5327 struct sk_buff *skb;
5328 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005329 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005330
5331 i = rx_ring->next_to_use;
5332 buffer_info = &rx_ring->buffer_info[i];
5333
Alexander Duyck4c844852009-10-27 15:52:07 +00005334 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005335
Auke Kok9d5c8242008-01-24 02:22:38 -08005336 while (cleaned_count--) {
5337 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5338
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005339 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005340 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005341 buffer_info->page = netdev_alloc_page(netdev);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005342 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005343 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005344 goto no_buffers;
5345 }
5346 buffer_info->page_offset = 0;
5347 } else {
5348 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005349 }
5350 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005351 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005352 buffer_info->page_offset,
5353 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005354 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005355 if (pci_dma_mapping_error(rx_ring->pdev,
5356 buffer_info->page_dma)) {
5357 buffer_info->page_dma = 0;
5358 rx_ring->rx_stats.alloc_failed++;
5359 goto no_buffers;
5360 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005361 }
5362
Alexander Duyck42d07812009-10-27 23:51:16 +00005363 skb = buffer_info->skb;
5364 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005365 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005366 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005367 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005368 goto no_buffers;
5369 }
5370
Auke Kok9d5c8242008-01-24 02:22:38 -08005371 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005372 }
5373 if (!buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00005374 buffer_info->dma = pci_map_single(rx_ring->pdev,
5375 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005376 bufsz,
5377 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005378 if (pci_dma_mapping_error(rx_ring->pdev,
5379 buffer_info->dma)) {
5380 buffer_info->dma = 0;
5381 rx_ring->rx_stats.alloc_failed++;
5382 goto no_buffers;
5383 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005384 }
5385 /* Refresh the desc even if buffer_addrs didn't change because
5386 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005387 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005388 rx_desc->read.pkt_addr =
5389 cpu_to_le64(buffer_info->page_dma);
5390 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5391 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005392 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005393 rx_desc->read.hdr_addr = 0;
5394 }
5395
5396 i++;
5397 if (i == rx_ring->count)
5398 i = 0;
5399 buffer_info = &rx_ring->buffer_info[i];
5400 }
5401
5402no_buffers:
5403 if (rx_ring->next_to_use != i) {
5404 rx_ring->next_to_use = i;
5405 if (i == 0)
5406 i = (rx_ring->count - 1);
5407 else
5408 i--;
5409
5410 /* Force memory writes to complete before letting h/w
5411 * know there are new descriptors to fetch. (Only
5412 * applicable for weak-ordered memory model archs,
5413 * such as IA-64). */
5414 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005415 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005416 }
5417}
5418
5419/**
5420 * igb_mii_ioctl -
5421 * @netdev:
5422 * @ifreq:
5423 * @cmd:
5424 **/
5425static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5426{
5427 struct igb_adapter *adapter = netdev_priv(netdev);
5428 struct mii_ioctl_data *data = if_mii(ifr);
5429
5430 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5431 return -EOPNOTSUPP;
5432
5433 switch (cmd) {
5434 case SIOCGMIIPHY:
5435 data->phy_id = adapter->hw.phy.addr;
5436 break;
5437 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005438 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5439 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005440 return -EIO;
5441 break;
5442 case SIOCSMIIREG:
5443 default:
5444 return -EOPNOTSUPP;
5445 }
5446 return 0;
5447}
5448
5449/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005450 * igb_hwtstamp_ioctl - control hardware time stamping
5451 * @netdev:
5452 * @ifreq:
5453 * @cmd:
5454 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005455 * Outgoing time stamping can be enabled and disabled. Play nice and
5456 * disable it when requested, although it shouldn't case any overhead
5457 * when no packet needs it. At most one packet in the queue may be
5458 * marked for time stamping, otherwise it would be impossible to tell
5459 * for sure to which packet the hardware time stamp belongs.
5460 *
5461 * Incoming time stamping has to be configured via the hardware
5462 * filters. Not all combinations are supported, in particular event
5463 * type has to be specified. Matching the kind of event packet is
5464 * not supported, with the exception of "all V2 events regardless of
5465 * level 2 or 4".
5466 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005467 **/
5468static int igb_hwtstamp_ioctl(struct net_device *netdev,
5469 struct ifreq *ifr, int cmd)
5470{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005471 struct igb_adapter *adapter = netdev_priv(netdev);
5472 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005473 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005474 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5475 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005476 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005477 bool is_l4 = false;
5478 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005479 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005480
5481 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5482 return -EFAULT;
5483
5484 /* reserved for future extensions */
5485 if (config.flags)
5486 return -EINVAL;
5487
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005488 switch (config.tx_type) {
5489 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005490 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005491 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005492 break;
5493 default:
5494 return -ERANGE;
5495 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005496
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005497 switch (config.rx_filter) {
5498 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005499 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005500 break;
5501 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5502 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5503 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5504 case HWTSTAMP_FILTER_ALL:
5505 /*
5506 * register TSYNCRXCFG must be set, therefore it is not
5507 * possible to time stamp both Sync and Delay_Req messages
5508 * => fall back to time stamping all packets
5509 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005510 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005511 config.rx_filter = HWTSTAMP_FILTER_ALL;
5512 break;
5513 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005514 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005515 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005516 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005517 break;
5518 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005519 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005520 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005521 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005522 break;
5523 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5524 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005525 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005526 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005527 is_l2 = true;
5528 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005529 config.rx_filter = HWTSTAMP_FILTER_SOME;
5530 break;
5531 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5532 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005533 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005534 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005535 is_l2 = true;
5536 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005537 config.rx_filter = HWTSTAMP_FILTER_SOME;
5538 break;
5539 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5540 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5541 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005542 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005543 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005544 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005545 break;
5546 default:
5547 return -ERANGE;
5548 }
5549
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005550 if (hw->mac.type == e1000_82575) {
5551 if (tsync_rx_ctl | tsync_tx_ctl)
5552 return -EINVAL;
5553 return 0;
5554 }
5555
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005556 /* enable/disable TX */
5557 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005558 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5559 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005560 wr32(E1000_TSYNCTXCTL, regval);
5561
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005562 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005563 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005564 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5565 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005566 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005567
5568 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005569 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5570
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005571 /* define ethertype filter for timestamped packets */
5572 if (is_l2)
5573 wr32(E1000_ETQF(3),
5574 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5575 E1000_ETQF_1588 | /* enable timestamping */
5576 ETH_P_1588)); /* 1588 eth protocol type */
5577 else
5578 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005579
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005580#define PTP_PORT 319
5581 /* L4 Queue Filter[3]: filter by destination port and protocol */
5582 if (is_l4) {
5583 u32 ftqf = (IPPROTO_UDP /* UDP */
5584 | E1000_FTQF_VF_BP /* VF not compared */
5585 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5586 | E1000_FTQF_MASK); /* mask all inputs */
5587 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005588
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005589 wr32(E1000_IMIR(3), htons(PTP_PORT));
5590 wr32(E1000_IMIREXT(3),
5591 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5592 if (hw->mac.type == e1000_82576) {
5593 /* enable source port check */
5594 wr32(E1000_SPQF(3), htons(PTP_PORT));
5595 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5596 }
5597 wr32(E1000_FTQF(3), ftqf);
5598 } else {
5599 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5600 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005601 wrfl();
5602
5603 adapter->hwtstamp_config = config;
5604
5605 /* clear TX/RX time stamp registers, just to be sure */
5606 regval = rd32(E1000_TXSTMPH);
5607 regval = rd32(E1000_RXSTMPH);
5608
5609 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5610 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005611}
5612
5613/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005614 * igb_ioctl -
5615 * @netdev:
5616 * @ifreq:
5617 * @cmd:
5618 **/
5619static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5620{
5621 switch (cmd) {
5622 case SIOCGMIIPHY:
5623 case SIOCGMIIREG:
5624 case SIOCSMIIREG:
5625 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005626 case SIOCSHWTSTAMP:
5627 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005628 default:
5629 return -EOPNOTSUPP;
5630 }
5631}
5632
Alexander Duyck009bc062009-07-23 18:08:35 +00005633s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5634{
5635 struct igb_adapter *adapter = hw->back;
5636 u16 cap_offset;
5637
5638 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5639 if (!cap_offset)
5640 return -E1000_ERR_CONFIG;
5641
5642 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5643
5644 return 0;
5645}
5646
5647s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5648{
5649 struct igb_adapter *adapter = hw->back;
5650 u16 cap_offset;
5651
5652 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5653 if (!cap_offset)
5654 return -E1000_ERR_CONFIG;
5655
5656 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5657
5658 return 0;
5659}
5660
Auke Kok9d5c8242008-01-24 02:22:38 -08005661static void igb_vlan_rx_register(struct net_device *netdev,
5662 struct vlan_group *grp)
5663{
5664 struct igb_adapter *adapter = netdev_priv(netdev);
5665 struct e1000_hw *hw = &adapter->hw;
5666 u32 ctrl, rctl;
5667
5668 igb_irq_disable(adapter);
5669 adapter->vlgrp = grp;
5670
5671 if (grp) {
5672 /* enable VLAN tag insert/strip */
5673 ctrl = rd32(E1000_CTRL);
5674 ctrl |= E1000_CTRL_VME;
5675 wr32(E1000_CTRL, ctrl);
5676
Alexander Duyck51466232009-10-27 23:47:35 +00005677 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005678 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005679 rctl &= ~E1000_RCTL_CFIEN;
5680 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005681 } else {
5682 /* disable VLAN tag insert/strip */
5683 ctrl = rd32(E1000_CTRL);
5684 ctrl &= ~E1000_CTRL_VME;
5685 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005686 }
5687
Alexander Duycke1739522009-02-19 20:39:44 -08005688 igb_rlpml_set(adapter);
5689
Auke Kok9d5c8242008-01-24 02:22:38 -08005690 if (!test_bit(__IGB_DOWN, &adapter->state))
5691 igb_irq_enable(adapter);
5692}
5693
5694static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5695{
5696 struct igb_adapter *adapter = netdev_priv(netdev);
5697 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005698 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005699
Alexander Duyck51466232009-10-27 23:47:35 +00005700 /* attempt to add filter to vlvf array */
5701 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005702
Alexander Duyck51466232009-10-27 23:47:35 +00005703 /* add the filter since PF can receive vlans w/o entry in vlvf */
5704 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005705}
5706
5707static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5708{
5709 struct igb_adapter *adapter = netdev_priv(netdev);
5710 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005711 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005712 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005713
5714 igb_irq_disable(adapter);
5715 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5716
5717 if (!test_bit(__IGB_DOWN, &adapter->state))
5718 igb_irq_enable(adapter);
5719
Alexander Duyck51466232009-10-27 23:47:35 +00005720 /* remove vlan from VLVF table array */
5721 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005722
Alexander Duyck51466232009-10-27 23:47:35 +00005723 /* if vid was not present in VLVF just remove it from table */
5724 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005725 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005726}
5727
5728static void igb_restore_vlan(struct igb_adapter *adapter)
5729{
5730 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5731
5732 if (adapter->vlgrp) {
5733 u16 vid;
5734 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5735 if (!vlan_group_get_device(adapter->vlgrp, vid))
5736 continue;
5737 igb_vlan_rx_add_vid(adapter->netdev, vid);
5738 }
5739 }
5740}
5741
5742int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5743{
Alexander Duyck090b1792009-10-27 23:51:55 +00005744 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005745 struct e1000_mac_info *mac = &adapter->hw.mac;
5746
5747 mac->autoneg = 0;
5748
Auke Kok9d5c8242008-01-24 02:22:38 -08005749 switch (spddplx) {
5750 case SPEED_10 + DUPLEX_HALF:
5751 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5752 break;
5753 case SPEED_10 + DUPLEX_FULL:
5754 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5755 break;
5756 case SPEED_100 + DUPLEX_HALF:
5757 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5758 break;
5759 case SPEED_100 + DUPLEX_FULL:
5760 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5761 break;
5762 case SPEED_1000 + DUPLEX_FULL:
5763 mac->autoneg = 1;
5764 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5765 break;
5766 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5767 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005768 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005769 return -EINVAL;
5770 }
5771 return 0;
5772}
5773
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005774static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005775{
5776 struct net_device *netdev = pci_get_drvdata(pdev);
5777 struct igb_adapter *adapter = netdev_priv(netdev);
5778 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005779 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005780 u32 wufc = adapter->wol;
5781#ifdef CONFIG_PM
5782 int retval = 0;
5783#endif
5784
5785 netif_device_detach(netdev);
5786
Alexander Duycka88f10e2008-07-08 15:13:38 -07005787 if (netif_running(netdev))
5788 igb_close(netdev);
5789
Alexander Duyck047e0032009-10-27 15:49:27 +00005790 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005791
5792#ifdef CONFIG_PM
5793 retval = pci_save_state(pdev);
5794 if (retval)
5795 return retval;
5796#endif
5797
5798 status = rd32(E1000_STATUS);
5799 if (status & E1000_STATUS_LU)
5800 wufc &= ~E1000_WUFC_LNKC;
5801
5802 if (wufc) {
5803 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005804 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005805
5806 /* turn on all-multi mode if wake on multicast is enabled */
5807 if (wufc & E1000_WUFC_MC) {
5808 rctl = rd32(E1000_RCTL);
5809 rctl |= E1000_RCTL_MPE;
5810 wr32(E1000_RCTL, rctl);
5811 }
5812
5813 ctrl = rd32(E1000_CTRL);
5814 /* advertise wake from D3Cold */
5815 #define E1000_CTRL_ADVD3WUC 0x00100000
5816 /* phy power management enable */
5817 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5818 ctrl |= E1000_CTRL_ADVD3WUC;
5819 wr32(E1000_CTRL, ctrl);
5820
Auke Kok9d5c8242008-01-24 02:22:38 -08005821 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00005822 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005823
5824 wr32(E1000_WUC, E1000_WUC_PME_EN);
5825 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005826 } else {
5827 wr32(E1000_WUC, 0);
5828 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005829 }
5830
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005831 *enable_wake = wufc || adapter->en_mng_pt;
5832 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00005833 igb_power_down_link(adapter);
5834 else
5835 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005836
5837 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5838 * would have already happened in close and is redundant. */
5839 igb_release_hw_control(adapter);
5840
5841 pci_disable_device(pdev);
5842
Auke Kok9d5c8242008-01-24 02:22:38 -08005843 return 0;
5844}
5845
5846#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005847static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5848{
5849 int retval;
5850 bool wake;
5851
5852 retval = __igb_shutdown(pdev, &wake);
5853 if (retval)
5854 return retval;
5855
5856 if (wake) {
5857 pci_prepare_to_sleep(pdev);
5858 } else {
5859 pci_wake_from_d3(pdev, false);
5860 pci_set_power_state(pdev, PCI_D3hot);
5861 }
5862
5863 return 0;
5864}
5865
Auke Kok9d5c8242008-01-24 02:22:38 -08005866static int igb_resume(struct pci_dev *pdev)
5867{
5868 struct net_device *netdev = pci_get_drvdata(pdev);
5869 struct igb_adapter *adapter = netdev_priv(netdev);
5870 struct e1000_hw *hw = &adapter->hw;
5871 u32 err;
5872
5873 pci_set_power_state(pdev, PCI_D0);
5874 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00005875 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005876
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005877 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005878 if (err) {
5879 dev_err(&pdev->dev,
5880 "igb: Cannot enable PCI device from suspend\n");
5881 return err;
5882 }
5883 pci_set_master(pdev);
5884
5885 pci_enable_wake(pdev, PCI_D3hot, 0);
5886 pci_enable_wake(pdev, PCI_D3cold, 0);
5887
Alexander Duyck047e0032009-10-27 15:49:27 +00005888 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005889 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5890 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005891 }
5892
Auke Kok9d5c8242008-01-24 02:22:38 -08005893 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005894
5895 /* let the f/w know that the h/w is now under the control of the
5896 * driver. */
5897 igb_get_hw_control(adapter);
5898
Auke Kok9d5c8242008-01-24 02:22:38 -08005899 wr32(E1000_WUS, ~0);
5900
Alexander Duycka88f10e2008-07-08 15:13:38 -07005901 if (netif_running(netdev)) {
5902 err = igb_open(netdev);
5903 if (err)
5904 return err;
5905 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005906
5907 netif_device_attach(netdev);
5908
Auke Kok9d5c8242008-01-24 02:22:38 -08005909 return 0;
5910}
5911#endif
5912
5913static void igb_shutdown(struct pci_dev *pdev)
5914{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005915 bool wake;
5916
5917 __igb_shutdown(pdev, &wake);
5918
5919 if (system_state == SYSTEM_POWER_OFF) {
5920 pci_wake_from_d3(pdev, wake);
5921 pci_set_power_state(pdev, PCI_D3hot);
5922 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005923}
5924
5925#ifdef CONFIG_NET_POLL_CONTROLLER
5926/*
5927 * Polling 'interrupt' - used by things like netconsole to send skbs
5928 * without having to re-enable interrupts. It's not called while
5929 * the interrupt routine is executing.
5930 */
5931static void igb_netpoll(struct net_device *netdev)
5932{
5933 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005934 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005935 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005936
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005937 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005938 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005939 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005940 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005941 return;
5942 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005943
Alexander Duyck047e0032009-10-27 15:49:27 +00005944 for (i = 0; i < adapter->num_q_vectors; i++) {
5945 struct igb_q_vector *q_vector = adapter->q_vector[i];
5946 wr32(E1000_EIMC, q_vector->eims_value);
5947 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005948 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005949}
5950#endif /* CONFIG_NET_POLL_CONTROLLER */
5951
5952/**
5953 * igb_io_error_detected - called when PCI error is detected
5954 * @pdev: Pointer to PCI device
5955 * @state: The current pci connection state
5956 *
5957 * This function is called after a PCI bus error affecting
5958 * this device has been detected.
5959 */
5960static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5961 pci_channel_state_t state)
5962{
5963 struct net_device *netdev = pci_get_drvdata(pdev);
5964 struct igb_adapter *adapter = netdev_priv(netdev);
5965
5966 netif_device_detach(netdev);
5967
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005968 if (state == pci_channel_io_perm_failure)
5969 return PCI_ERS_RESULT_DISCONNECT;
5970
Auke Kok9d5c8242008-01-24 02:22:38 -08005971 if (netif_running(netdev))
5972 igb_down(adapter);
5973 pci_disable_device(pdev);
5974
5975 /* Request a slot slot reset. */
5976 return PCI_ERS_RESULT_NEED_RESET;
5977}
5978
5979/**
5980 * igb_io_slot_reset - called after the pci bus has been reset.
5981 * @pdev: Pointer to PCI device
5982 *
5983 * Restart the card from scratch, as if from a cold-boot. Implementation
5984 * resembles the first-half of the igb_resume routine.
5985 */
5986static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5987{
5988 struct net_device *netdev = pci_get_drvdata(pdev);
5989 struct igb_adapter *adapter = netdev_priv(netdev);
5990 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005991 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005992 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005993
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005994 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005995 dev_err(&pdev->dev,
5996 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005997 result = PCI_ERS_RESULT_DISCONNECT;
5998 } else {
5999 pci_set_master(pdev);
6000 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006001 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006002
6003 pci_enable_wake(pdev, PCI_D3hot, 0);
6004 pci_enable_wake(pdev, PCI_D3cold, 0);
6005
6006 igb_reset(adapter);
6007 wr32(E1000_WUS, ~0);
6008 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006009 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006010
Jeff Kirsherea943d42008-12-11 20:34:19 -08006011 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6012 if (err) {
6013 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6014 "failed 0x%0x\n", err);
6015 /* non-fatal, continue */
6016 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006017
Alexander Duyck40a914f2008-11-27 00:24:37 -08006018 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006019}
6020
6021/**
6022 * igb_io_resume - called when traffic can start flowing again.
6023 * @pdev: Pointer to PCI device
6024 *
6025 * This callback is called when the error recovery driver tells us that
6026 * its OK to resume normal operation. Implementation resembles the
6027 * second-half of the igb_resume routine.
6028 */
6029static void igb_io_resume(struct pci_dev *pdev)
6030{
6031 struct net_device *netdev = pci_get_drvdata(pdev);
6032 struct igb_adapter *adapter = netdev_priv(netdev);
6033
Auke Kok9d5c8242008-01-24 02:22:38 -08006034 if (netif_running(netdev)) {
6035 if (igb_up(adapter)) {
6036 dev_err(&pdev->dev, "igb_up failed after reset\n");
6037 return;
6038 }
6039 }
6040
6041 netif_device_attach(netdev);
6042
6043 /* let the f/w know that the h/w is now under the control of the
6044 * driver. */
6045 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006046}
6047
Alexander Duyck26ad9172009-10-05 06:32:49 +00006048static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6049 u8 qsel)
6050{
6051 u32 rar_low, rar_high;
6052 struct e1000_hw *hw = &adapter->hw;
6053
6054 /* HW expects these in little endian so we reverse the byte order
6055 * from network order (big endian) to little endian
6056 */
6057 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6058 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6059 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6060
6061 /* Indicate to hardware the Address is Valid. */
6062 rar_high |= E1000_RAH_AV;
6063
6064 if (hw->mac.type == e1000_82575)
6065 rar_high |= E1000_RAH_POOL_1 * qsel;
6066 else
6067 rar_high |= E1000_RAH_POOL_1 << qsel;
6068
6069 wr32(E1000_RAL(index), rar_low);
6070 wrfl();
6071 wr32(E1000_RAH(index), rar_high);
6072 wrfl();
6073}
6074
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006075static int igb_set_vf_mac(struct igb_adapter *adapter,
6076 int vf, unsigned char *mac_addr)
6077{
6078 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006079 /* VF MAC addresses start at end of receive addresses and moves
6080 * torwards the first, as a result a collision should not be possible */
6081 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006082
Alexander Duyck37680112009-02-19 20:40:30 -08006083 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006084
Alexander Duyck26ad9172009-10-05 06:32:49 +00006085 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006086
6087 return 0;
6088}
6089
Williams, Mitch A8151d292010-02-10 01:44:24 +00006090static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6091{
6092 struct igb_adapter *adapter = netdev_priv(netdev);
6093 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6094 return -EINVAL;
6095 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6096 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6097 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6098 " change effective.");
6099 if (test_bit(__IGB_DOWN, &adapter->state)) {
6100 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6101 " but the PF device is not up.\n");
6102 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6103 " attempting to use the VF device.\n");
6104 }
6105 return igb_set_vf_mac(adapter, vf, mac);
6106}
6107
6108static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6109{
6110 return -EOPNOTSUPP;
6111}
6112
6113static int igb_ndo_get_vf_config(struct net_device *netdev,
6114 int vf, struct ifla_vf_info *ivi)
6115{
6116 struct igb_adapter *adapter = netdev_priv(netdev);
6117 if (vf >= adapter->vfs_allocated_count)
6118 return -EINVAL;
6119 ivi->vf = vf;
6120 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6121 ivi->tx_rate = 0;
6122 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6123 ivi->qos = adapter->vf_data[vf].pf_qos;
6124 return 0;
6125}
6126
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006127static void igb_vmm_control(struct igb_adapter *adapter)
6128{
6129 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006130 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006131
Alexander Duyckd4960302009-10-27 15:53:45 +00006132 /* replication is not supported for 82575 */
6133 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006134 return;
6135
Alexander Duyck10d8e902009-10-27 15:54:04 +00006136 /* enable replication vlan tag stripping */
6137 reg = rd32(E1000_RPLOLR);
6138 reg |= E1000_RPLOLR_STRVLAN;
6139 wr32(E1000_RPLOLR, reg);
6140
6141 /* notify HW that the MAC is adding vlan tags */
6142 reg = rd32(E1000_DTXCTL);
6143 reg |= E1000_DTXCTL_VLAN_ADDED;
6144 wr32(E1000_DTXCTL, reg);
6145
Alexander Duyckd4960302009-10-27 15:53:45 +00006146 if (adapter->vfs_allocated_count) {
6147 igb_vmdq_set_loopback_pf(hw, true);
6148 igb_vmdq_set_replication_pf(hw, true);
6149 } else {
6150 igb_vmdq_set_loopback_pf(hw, false);
6151 igb_vmdq_set_replication_pf(hw, false);
6152 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006153}
6154
Auke Kok9d5c8242008-01-24 02:22:38 -08006155/* igb_main.c */