blob: 9b3c51ab1758fb5474cf52cdcf11b67681f869da [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080036#include <net/checksum.h>
37#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070043#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080044#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080047#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070048#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070049#include <linux/dca.h>
50#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include "igb.h"
52
Alexander Duyck55cac242009-11-19 12:42:21 +000053#define DRV_VERSION "2.1.0-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080054char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000058static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080059
Auke Kok9d5c8242008-01-24 02:22:38 -080060static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info,
62};
63
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000064static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyck55cac242009-11-19 12:42:21 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
81 /* required last entry */
82 {0, }
83};
84
85MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
86
87void igb_reset(struct igb_adapter *);
88static int igb_setup_all_tx_resources(struct igb_adapter *);
89static int igb_setup_all_rx_resources(struct igb_adapter *);
90static void igb_free_all_tx_resources(struct igb_adapter *);
91static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000092static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080093void igb_update_stats(struct igb_adapter *);
94static int igb_probe(struct pci_dev *, const struct pci_device_id *);
95static void __devexit igb_remove(struct pci_dev *pdev);
96static int igb_sw_init(struct igb_adapter *);
97static int igb_open(struct net_device *);
98static int igb_close(struct net_device *);
99static void igb_configure_tx(struct igb_adapter *);
100static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800101static void igb_clean_all_tx_rings(struct igb_adapter *);
102static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700103static void igb_clean_tx_ring(struct igb_ring *);
104static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000105static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800106static void igb_update_phy_info(unsigned long);
107static void igb_watchdog(unsigned long);
108static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000109static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800110static struct net_device_stats *igb_get_stats(struct net_device *);
111static int igb_change_mtu(struct net_device *, int);
112static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000113static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static irqreturn_t igb_intr(int irq, void *);
115static irqreturn_t igb_intr_msi(int irq, void *);
116static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700118#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000119static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700120static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700121#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000122static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700123static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000124static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
126static void igb_tx_timeout(struct net_device *);
127static void igb_reset_task(struct work_struct *);
128static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
129static void igb_vlan_rx_add_vid(struct net_device *, u16);
130static void igb_vlan_rx_kill_vid(struct net_device *, u16);
131static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000132static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800133static void igb_ping_all_vfs(struct igb_adapter *);
134static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800135static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000136static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800137static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000138static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
139static int igb_ndo_set_vf_vlan(struct net_device *netdev,
140 int vf, u16 vlan, u8 qos);
141static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
142static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
143 struct ifla_vf_info *ivi);
Auke Kok9d5c8242008-01-24 02:22:38 -0800144
Auke Kok9d5c8242008-01-24 02:22:38 -0800145#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000146static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800147static int igb_resume(struct pci_dev *);
148#endif
149static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700150#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700151static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
152static struct notifier_block dca_notifier = {
153 .notifier_call = igb_notify_dca,
154 .next = NULL,
155 .priority = 0
156};
157#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800158#ifdef CONFIG_NET_POLL_CONTROLLER
159/* for netdump / net console */
160static void igb_netpoll(struct net_device *);
161#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800162#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000163static unsigned int max_vfs = 0;
164module_param(max_vfs, uint, 0);
165MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
166 "per physical function");
167#endif /* CONFIG_PCI_IOV */
168
Auke Kok9d5c8242008-01-24 02:22:38 -0800169static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
170 pci_channel_state_t);
171static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
172static void igb_io_resume(struct pci_dev *);
173
174static struct pci_error_handlers igb_err_handler = {
175 .error_detected = igb_io_error_detected,
176 .slot_reset = igb_io_slot_reset,
177 .resume = igb_io_resume,
178};
179
180
181static struct pci_driver igb_driver = {
182 .name = igb_driver_name,
183 .id_table = igb_pci_tbl,
184 .probe = igb_probe,
185 .remove = __devexit_p(igb_remove),
186#ifdef CONFIG_PM
187 /* Power Managment Hooks */
188 .suspend = igb_suspend,
189 .resume = igb_resume,
190#endif
191 .shutdown = igb_shutdown,
192 .err_handler = &igb_err_handler
193};
194
195MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
196MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
197MODULE_LICENSE("GPL");
198MODULE_VERSION(DRV_VERSION);
199
Patrick Ohly38c845c2009-02-12 05:03:41 +0000200/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000201 * igb_read_clock - read raw cycle counter (to be used by time counter)
202 */
203static cycle_t igb_read_clock(const struct cyclecounter *tc)
204{
205 struct igb_adapter *adapter =
206 container_of(tc, struct igb_adapter, cycles);
207 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000208 u64 stamp = 0;
209 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000210
Alexander Duyck55cac242009-11-19 12:42:21 +0000211 /*
212 * The timestamp latches on lowest register read. For the 82580
213 * the lowest register is SYSTIMR instead of SYSTIML. However we never
214 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
215 */
216 if (hw->mac.type == e1000_82580) {
217 stamp = rd32(E1000_SYSTIMR) >> 8;
218 shift = IGB_82580_TSYNC_SHIFT;
219 }
220
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000221 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
222 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000223 return stamp;
224}
225
Auke Kok9d5c8242008-01-24 02:22:38 -0800226#ifdef DEBUG
227/**
228 * igb_get_hw_dev_name - return device name string
229 * used by hardware layer to print debugging information
230 **/
231char *igb_get_hw_dev_name(struct e1000_hw *hw)
232{
233 struct igb_adapter *adapter = hw->back;
234 return adapter->netdev->name;
235}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000236
237/**
238 * igb_get_time_str - format current NIC and system time as string
239 */
240static char *igb_get_time_str(struct igb_adapter *adapter,
241 char buffer[160])
242{
243 cycle_t hw = adapter->cycles.read(&adapter->cycles);
244 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec sys;
246 struct timespec delta;
247 getnstimeofday(&sys);
248
249 delta = timespec_sub(nic, sys);
250
251 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000252 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000254 (long)nic.tv_sec, nic.tv_nsec,
255 (long)sys.tv_sec, sys.tv_nsec,
256 (long)delta.tv_sec, delta.tv_nsec);
257
258 return buffer;
259}
Auke Kok9d5c8242008-01-24 02:22:38 -0800260#endif
261
262/**
263 * igb_init_module - Driver Registration Routine
264 *
265 * igb_init_module is the first routine called when the driver is
266 * loaded. All it does is register with the PCI subsystem.
267 **/
268static int __init igb_init_module(void)
269{
270 int ret;
271 printk(KERN_INFO "%s - version %s\n",
272 igb_driver_string, igb_driver_version);
273
274 printk(KERN_INFO "%s\n", igb_copyright);
275
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700276#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700277 dca_register_notify(&dca_notifier);
278#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800279 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800280 return ret;
281}
282
283module_init(igb_init_module);
284
285/**
286 * igb_exit_module - Driver Exit Cleanup Routine
287 *
288 * igb_exit_module is called just before the driver is removed
289 * from memory.
290 **/
291static void __exit igb_exit_module(void)
292{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700293#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700294 dca_unregister_notify(&dca_notifier);
295#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800296 pci_unregister_driver(&igb_driver);
297}
298
299module_exit(igb_exit_module);
300
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800301#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
302/**
303 * igb_cache_ring_register - Descriptor ring to register mapping
304 * @adapter: board private structure to initialize
305 *
306 * Once we know the feature-set enabled for the device, we'll cache
307 * the register offset the descriptor ring is assigned to.
308 **/
309static void igb_cache_ring_register(struct igb_adapter *adapter)
310{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000311 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000312 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800313
314 switch (adapter->hw.mac.type) {
315 case e1000_82576:
316 /* The queues are allocated for virtualization such that VF 0
317 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
318 * In order to avoid collision we start at the first free queue
319 * and continue consuming queues in the same sequence
320 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000321 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000322 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000323 adapter->rx_ring[i]->reg_idx = rbase_offset +
324 Q_IDX_82576(i);
Alexander Duycka99955f2009-11-12 18:37:19 +0000325 for (; j < adapter->rss_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000326 adapter->tx_ring[j]->reg_idx = rbase_offset +
327 Q_IDX_82576(j);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000328 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800329 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000330 case e1000_82580:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800331 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000332 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000333 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000334 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000335 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800336 break;
337 }
338}
339
Alexander Duyck047e0032009-10-27 15:49:27 +0000340static void igb_free_queues(struct igb_adapter *adapter)
341{
Alexander Duyck3025a442010-02-17 01:02:39 +0000342 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000343
Alexander Duyck3025a442010-02-17 01:02:39 +0000344 for (i = 0; i < adapter->num_tx_queues; i++) {
345 kfree(adapter->tx_ring[i]);
346 adapter->tx_ring[i] = NULL;
347 }
348 for (i = 0; i < adapter->num_rx_queues; i++) {
349 kfree(adapter->rx_ring[i]);
350 adapter->rx_ring[i] = NULL;
351 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000352 adapter->num_rx_queues = 0;
353 adapter->num_tx_queues = 0;
354}
355
Auke Kok9d5c8242008-01-24 02:22:38 -0800356/**
357 * igb_alloc_queues - Allocate memory for all rings
358 * @adapter: board private structure to initialize
359 *
360 * We allocate one ring per queue at run-time since we don't know the
361 * number of queues at compile-time.
362 **/
363static int igb_alloc_queues(struct igb_adapter *adapter)
364{
Alexander Duyck3025a442010-02-17 01:02:39 +0000365 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800366 int i;
367
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700368 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000369 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
370 if (!ring)
371 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800372 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700373 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000374 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000375 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000376 /* For 82575, context index must be unique per ring. */
377 if (adapter->hw.mac.type == e1000_82575)
378 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000379 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700380 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000381
Auke Kok9d5c8242008-01-24 02:22:38 -0800382 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000383 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
384 if (!ring)
385 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800386 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700387 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000388 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000389 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000390 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000391 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
392 /* set flag indicating ring supports SCTP checksum offload */
393 if (adapter->hw.mac.type >= e1000_82576)
394 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000395 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800396 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800397
398 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000399
Auke Kok9d5c8242008-01-24 02:22:38 -0800400 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800401
Alexander Duyck047e0032009-10-27 15:49:27 +0000402err:
403 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700404
Alexander Duyck047e0032009-10-27 15:49:27 +0000405 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700406}
407
Auke Kok9d5c8242008-01-24 02:22:38 -0800408#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000409static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800410{
411 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000412 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800413 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700414 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000415 int rx_queue = IGB_N0_QUEUE;
416 int tx_queue = IGB_N0_QUEUE;
417
418 if (q_vector->rx_ring)
419 rx_queue = q_vector->rx_ring->reg_idx;
420 if (q_vector->tx_ring)
421 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700422
423 switch (hw->mac.type) {
424 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800425 /* The 82575 assigns vectors using a bitmask, which matches the
426 bitmask for the EICR/EIMS/EIMC registers. To assign one
427 or more queues to a vector, we write the appropriate bits
428 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000429 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800430 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000431 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800432 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000433 if (!adapter->msix_entries && msix_vector == 0)
434 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800435 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000436 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700437 break;
438 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800439 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700440 Each queue has a single entry in the table to which we write
441 a vector number along with a "valid" bit. Sadly, the layout
442 of the table is somewhat counterintuitive. */
443 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000444 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700445 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000446 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800447 /* vector goes into low byte of register */
448 ivar = ivar & 0xFFFFFF00;
449 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000450 } else {
451 /* vector goes into third byte of register */
452 ivar = ivar & 0xFF00FFFF;
453 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700454 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700455 array_wr32(E1000_IVAR0, index, ivar);
456 }
457 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000458 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700459 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000460 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800461 /* vector goes into second byte of register */
462 ivar = ivar & 0xFFFF00FF;
463 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000464 } else {
465 /* vector goes into high byte of register */
466 ivar = ivar & 0x00FFFFFF;
467 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700468 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700469 array_wr32(E1000_IVAR0, index, ivar);
470 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000471 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700472 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000473 case e1000_82580:
474 /* 82580 uses the same table-based approach as 82576 but has fewer
475 entries as a result we carry over for queues greater than 4. */
476 if (rx_queue > IGB_N0_QUEUE) {
477 index = (rx_queue >> 1);
478 ivar = array_rd32(E1000_IVAR0, index);
479 if (rx_queue & 0x1) {
480 /* vector goes into third byte of register */
481 ivar = ivar & 0xFF00FFFF;
482 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
483 } else {
484 /* vector goes into low byte of register */
485 ivar = ivar & 0xFFFFFF00;
486 ivar |= msix_vector | E1000_IVAR_VALID;
487 }
488 array_wr32(E1000_IVAR0, index, ivar);
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 index = (tx_queue >> 1);
492 ivar = array_rd32(E1000_IVAR0, index);
493 if (tx_queue & 0x1) {
494 /* vector goes into high byte of register */
495 ivar = ivar & 0x00FFFFFF;
496 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
497 } else {
498 /* vector goes into second byte of register */
499 ivar = ivar & 0xFFFF00FF;
500 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
501 }
502 array_wr32(E1000_IVAR0, index, ivar);
503 }
504 q_vector->eims_value = 1 << msix_vector;
505 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700506 default:
507 BUG();
508 break;
509 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000510
511 /* add q_vector eims value to global eims_enable_mask */
512 adapter->eims_enable_mask |= q_vector->eims_value;
513
514 /* configure q_vector to set itr on first interrupt */
515 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800516}
517
518/**
519 * igb_configure_msix - Configure MSI-X hardware
520 *
521 * igb_configure_msix sets up the hardware to properly
522 * generate MSI-X interrupts.
523 **/
524static void igb_configure_msix(struct igb_adapter *adapter)
525{
526 u32 tmp;
527 int i, vector = 0;
528 struct e1000_hw *hw = &adapter->hw;
529
530 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800531
532 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700533 switch (hw->mac.type) {
534 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800535 tmp = rd32(E1000_CTRL_EXT);
536 /* enable MSI-X PBA support*/
537 tmp |= E1000_CTRL_EXT_PBA_CLR;
538
539 /* Auto-Mask interrupts upon ICR read. */
540 tmp |= E1000_CTRL_EXT_EIAME;
541 tmp |= E1000_CTRL_EXT_IRCA;
542
543 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000544
545 /* enable msix_other interrupt */
546 array_wr32(E1000_MSIXBM(0), vector++,
547 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700548 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800549
Alexander Duyck2d064c02008-07-08 15:10:12 -0700550 break;
551
552 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000553 case e1000_82580:
Alexander Duyck047e0032009-10-27 15:49:27 +0000554 /* Turn on MSI-X capability first, or our settings
555 * won't stick. And it will take days to debug. */
556 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
557 E1000_GPIE_PBA | E1000_GPIE_EIAME |
558 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700559
Alexander Duyck047e0032009-10-27 15:49:27 +0000560 /* enable msix_other interrupt */
561 adapter->eims_other = 1 << vector;
562 tmp = (vector++ | E1000_IVAR_VALID) << 8;
563
564 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700565 break;
566 default:
567 /* do nothing, since nothing else supports MSI-X */
568 break;
569 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000570
571 adapter->eims_enable_mask |= adapter->eims_other;
572
Alexander Duyck26b39272010-02-17 01:00:41 +0000573 for (i = 0; i < adapter->num_q_vectors; i++)
574 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000575
Auke Kok9d5c8242008-01-24 02:22:38 -0800576 wrfl();
577}
578
579/**
580 * igb_request_msix - Initialize MSI-X interrupts
581 *
582 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
583 * kernel.
584 **/
585static int igb_request_msix(struct igb_adapter *adapter)
586{
587 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000588 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800589 int i, err = 0, vector = 0;
590
Auke Kok9d5c8242008-01-24 02:22:38 -0800591 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800592 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800593 if (err)
594 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000595 vector++;
596
597 for (i = 0; i < adapter->num_q_vectors; i++) {
598 struct igb_q_vector *q_vector = adapter->q_vector[i];
599
600 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
601
602 if (q_vector->rx_ring && q_vector->tx_ring)
603 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
604 q_vector->rx_ring->queue_index);
605 else if (q_vector->tx_ring)
606 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
607 q_vector->tx_ring->queue_index);
608 else if (q_vector->rx_ring)
609 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
610 q_vector->rx_ring->queue_index);
611 else
612 sprintf(q_vector->name, "%s-unused", netdev->name);
613
614 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800615 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000616 q_vector);
617 if (err)
618 goto out;
619 vector++;
620 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800621
Auke Kok9d5c8242008-01-24 02:22:38 -0800622 igb_configure_msix(adapter);
623 return 0;
624out:
625 return err;
626}
627
628static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
629{
630 if (adapter->msix_entries) {
631 pci_disable_msix(adapter->pdev);
632 kfree(adapter->msix_entries);
633 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000634 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800635 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000636 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800637}
638
Alexander Duyck047e0032009-10-27 15:49:27 +0000639/**
640 * igb_free_q_vectors - Free memory allocated for interrupt vectors
641 * @adapter: board private structure to initialize
642 *
643 * This function frees the memory allocated to the q_vectors. In addition if
644 * NAPI is enabled it will delete any references to the NAPI struct prior
645 * to freeing the q_vector.
646 **/
647static void igb_free_q_vectors(struct igb_adapter *adapter)
648{
649 int v_idx;
650
651 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
652 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
653 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000654 if (!q_vector)
655 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000656 netif_napi_del(&q_vector->napi);
657 kfree(q_vector);
658 }
659 adapter->num_q_vectors = 0;
660}
661
662/**
663 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
664 *
665 * This function resets the device so that it has 0 rx queues, tx queues, and
666 * MSI-X interrupts allocated.
667 */
668static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
669{
670 igb_free_queues(adapter);
671 igb_free_q_vectors(adapter);
672 igb_reset_interrupt_capability(adapter);
673}
Auke Kok9d5c8242008-01-24 02:22:38 -0800674
675/**
676 * igb_set_interrupt_capability - set MSI or MSI-X if supported
677 *
678 * Attempt to configure interrupts using the best available
679 * capabilities of the hardware and kernel.
680 **/
681static void igb_set_interrupt_capability(struct igb_adapter *adapter)
682{
683 int err;
684 int numvecs, i;
685
Alexander Duyck83b71802009-02-06 23:15:45 +0000686 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +0000687 adapter->num_rx_queues = adapter->rss_queues;
688 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +0000689
Alexander Duyck047e0032009-10-27 15:49:27 +0000690 /* start with one vector for every rx queue */
691 numvecs = adapter->num_rx_queues;
692
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800693 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +0000694 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
695 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +0000696
697 /* store the number of vectors reserved for queues */
698 adapter->num_q_vectors = numvecs;
699
700 /* add 1 vector for link status interrupts */
701 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800702 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
703 GFP_KERNEL);
704 if (!adapter->msix_entries)
705 goto msi_only;
706
707 for (i = 0; i < numvecs; i++)
708 adapter->msix_entries[i].entry = i;
709
710 err = pci_enable_msix(adapter->pdev,
711 adapter->msix_entries,
712 numvecs);
713 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700714 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800715
716 igb_reset_interrupt_capability(adapter);
717
718 /* If we can't do MSI-X, try MSI */
719msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000720#ifdef CONFIG_PCI_IOV
721 /* disable SR-IOV for non MSI-X configurations */
722 if (adapter->vf_data) {
723 struct e1000_hw *hw = &adapter->hw;
724 /* disable iov and allow time for transactions to clear */
725 pci_disable_sriov(adapter->pdev);
726 msleep(500);
727
728 kfree(adapter->vf_data);
729 adapter->vf_data = NULL;
730 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
731 msleep(100);
732 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
733 }
734#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000735 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +0000736 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000737 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800738 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700739 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000740 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800741 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700742 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700743out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700744 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700745 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800746 return;
747}
748
749/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000750 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
751 * @adapter: board private structure to initialize
752 *
753 * We allocate one q_vector per queue interrupt. If allocation fails we
754 * return -ENOMEM.
755 **/
756static int igb_alloc_q_vectors(struct igb_adapter *adapter)
757{
758 struct igb_q_vector *q_vector;
759 struct e1000_hw *hw = &adapter->hw;
760 int v_idx;
761
762 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
763 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
764 if (!q_vector)
765 goto err_out;
766 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
768 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000769 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
770 adapter->q_vector[v_idx] = q_vector;
771 }
772 return 0;
773
774err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000775 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000776 return -ENOMEM;
777}
778
779static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
780 int ring_idx, int v_idx)
781{
Alexander Duyck3025a442010-02-17 01:02:39 +0000782 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000783
Alexander Duyck3025a442010-02-17 01:02:39 +0000784 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000785 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000786 q_vector->itr_val = adapter->rx_itr_setting;
787 if (q_vector->itr_val && q_vector->itr_val <= 3)
788 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000789}
790
791static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
792 int ring_idx, int v_idx)
793{
Alexander Duyck3025a442010-02-17 01:02:39 +0000794 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000795
Alexander Duyck3025a442010-02-17 01:02:39 +0000796 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000797 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000798 q_vector->itr_val = adapter->tx_itr_setting;
799 if (q_vector->itr_val && q_vector->itr_val <= 3)
800 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000801}
802
803/**
804 * igb_map_ring_to_vector - maps allocated queues to vectors
805 *
806 * This function maps the recently allocated queues to vectors.
807 **/
808static int igb_map_ring_to_vector(struct igb_adapter *adapter)
809{
810 int i;
811 int v_idx = 0;
812
813 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
814 (adapter->num_q_vectors < adapter->num_tx_queues))
815 return -ENOMEM;
816
817 if (adapter->num_q_vectors >=
818 (adapter->num_rx_queues + adapter->num_tx_queues)) {
819 for (i = 0; i < adapter->num_rx_queues; i++)
820 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
821 for (i = 0; i < adapter->num_tx_queues; i++)
822 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
823 } else {
824 for (i = 0; i < adapter->num_rx_queues; i++) {
825 if (i < adapter->num_tx_queues)
826 igb_map_tx_ring_to_vector(adapter, i, v_idx);
827 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
828 }
829 for (; i < adapter->num_tx_queues; i++)
830 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
831 }
832 return 0;
833}
834
835/**
836 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
837 *
838 * This function initializes the interrupts and allocates all of the queues.
839 **/
840static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
841{
842 struct pci_dev *pdev = adapter->pdev;
843 int err;
844
845 igb_set_interrupt_capability(adapter);
846
847 err = igb_alloc_q_vectors(adapter);
848 if (err) {
849 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
850 goto err_alloc_q_vectors;
851 }
852
853 err = igb_alloc_queues(adapter);
854 if (err) {
855 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
856 goto err_alloc_queues;
857 }
858
859 err = igb_map_ring_to_vector(adapter);
860 if (err) {
861 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
862 goto err_map_queues;
863 }
864
865
866 return 0;
867err_map_queues:
868 igb_free_queues(adapter);
869err_alloc_queues:
870 igb_free_q_vectors(adapter);
871err_alloc_q_vectors:
872 igb_reset_interrupt_capability(adapter);
873 return err;
874}
875
876/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800877 * igb_request_irq - initialize interrupts
878 *
879 * Attempts to configure interrupts using the best available
880 * capabilities of the hardware and kernel.
881 **/
882static int igb_request_irq(struct igb_adapter *adapter)
883{
884 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000885 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800886 int err = 0;
887
888 if (adapter->msix_entries) {
889 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700890 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800891 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800892 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000893 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800894 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700895 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800896 igb_free_all_tx_resources(adapter);
897 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000898 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800899 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000900 adapter->num_q_vectors = 1;
901 err = igb_alloc_q_vectors(adapter);
902 if (err) {
903 dev_err(&pdev->dev,
904 "Unable to allocate memory for vectors\n");
905 goto request_done;
906 }
907 err = igb_alloc_queues(adapter);
908 if (err) {
909 dev_err(&pdev->dev,
910 "Unable to allocate memory for queues\n");
911 igb_free_q_vectors(adapter);
912 goto request_done;
913 }
914 igb_setup_all_tx_resources(adapter);
915 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700916 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000917 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800918 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700919
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700920 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -0800921 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000922 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800923 if (!err)
924 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000925
Auke Kok9d5c8242008-01-24 02:22:38 -0800926 /* fall back to legacy interrupts */
927 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700928 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800929 }
930
Joe Perchesa0607fd2009-11-18 23:29:17 -0800931 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000932 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800933
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800934 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800935 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
936 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800937
938request_done:
939 return err;
940}
941
942static void igb_free_irq(struct igb_adapter *adapter)
943{
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 if (adapter->msix_entries) {
945 int vector = 0, i;
946
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800948
Alexander Duyck047e0032009-10-27 15:49:27 +0000949 for (i = 0; i < adapter->num_q_vectors; i++) {
950 struct igb_q_vector *q_vector = adapter->q_vector[i];
951 free_irq(adapter->msix_entries[vector++].vector,
952 q_vector);
953 }
954 } else {
955 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800956 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800957}
958
959/**
960 * igb_irq_disable - Mask off interrupt generation on the NIC
961 * @adapter: board private structure
962 **/
963static void igb_irq_disable(struct igb_adapter *adapter)
964{
965 struct e1000_hw *hw = &adapter->hw;
966
Alexander Duyck25568a52009-10-27 23:49:59 +0000967 /*
968 * we need to be careful when disabling interrupts. The VFs are also
969 * mapped into these registers and so clearing the bits can cause
970 * issues on the VF drivers so we only need to clear what we set
971 */
Auke Kok9d5c8242008-01-24 02:22:38 -0800972 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000973 u32 regval = rd32(E1000_EIAM);
974 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
975 wr32(E1000_EIMC, adapter->eims_enable_mask);
976 regval = rd32(E1000_EIAC);
977 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800978 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700979
980 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800981 wr32(E1000_IMC, ~0);
982 wrfl();
983 synchronize_irq(adapter->pdev->irq);
984}
985
986/**
987 * igb_irq_enable - Enable default interrupt generation settings
988 * @adapter: board private structure
989 **/
990static void igb_irq_enable(struct igb_adapter *adapter)
991{
992 struct e1000_hw *hw = &adapter->hw;
993
994 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +0000995 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000996 u32 regval = rd32(E1000_EIAC);
997 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
998 regval = rd32(E1000_EIAM);
999 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001000 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001001 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001002 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001003 ims |= E1000_IMS_VMMB;
1004 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001005 if (adapter->hw.mac.type == e1000_82580)
1006 ims |= E1000_IMS_DRSTA;
1007
Alexander Duyck25568a52009-10-27 23:49:59 +00001008 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001009 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001010 wr32(E1000_IMS, IMS_ENABLE_MASK |
1011 E1000_IMS_DRSTA);
1012 wr32(E1000_IAM, IMS_ENABLE_MASK |
1013 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001014 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001015}
1016
1017static void igb_update_mng_vlan(struct igb_adapter *adapter)
1018{
Alexander Duyck51466232009-10-27 23:47:35 +00001019 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001020 u16 vid = adapter->hw.mng_cookie.vlan_id;
1021 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001022
Alexander Duyck51466232009-10-27 23:47:35 +00001023 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1024 /* add VID to filter table */
1025 igb_vfta_set(hw, vid, true);
1026 adapter->mng_vlan_id = vid;
1027 } else {
1028 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1029 }
1030
1031 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1032 (vid != old_vid) &&
1033 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1034 /* remove VID from filter table */
1035 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001036 }
1037}
1038
1039/**
1040 * igb_release_hw_control - release control of the h/w to f/w
1041 * @adapter: address of board private structure
1042 *
1043 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1044 * For ASF and Pass Through versions of f/w this means that the
1045 * driver is no longer loaded.
1046 *
1047 **/
1048static void igb_release_hw_control(struct igb_adapter *adapter)
1049{
1050 struct e1000_hw *hw = &adapter->hw;
1051 u32 ctrl_ext;
1052
1053 /* Let firmware take over control of h/w */
1054 ctrl_ext = rd32(E1000_CTRL_EXT);
1055 wr32(E1000_CTRL_EXT,
1056 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1057}
1058
Auke Kok9d5c8242008-01-24 02:22:38 -08001059/**
1060 * igb_get_hw_control - get control of the h/w from f/w
1061 * @adapter: address of board private structure
1062 *
1063 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1064 * For ASF and Pass Through versions of f/w this means that
1065 * the driver is loaded.
1066 *
1067 **/
1068static void igb_get_hw_control(struct igb_adapter *adapter)
1069{
1070 struct e1000_hw *hw = &adapter->hw;
1071 u32 ctrl_ext;
1072
1073 /* Let firmware know the driver has taken over */
1074 ctrl_ext = rd32(E1000_CTRL_EXT);
1075 wr32(E1000_CTRL_EXT,
1076 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1077}
1078
Auke Kok9d5c8242008-01-24 02:22:38 -08001079/**
1080 * igb_configure - configure the hardware for RX and TX
1081 * @adapter: private board structure
1082 **/
1083static void igb_configure(struct igb_adapter *adapter)
1084{
1085 struct net_device *netdev = adapter->netdev;
1086 int i;
1087
1088 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001089 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001090
1091 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001092
Alexander Duyck85b430b2009-10-27 15:50:29 +00001093 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001094 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001095 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001096
1097 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001098 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001099
1100 igb_rx_fifo_flush_82575(&adapter->hw);
1101
Alexander Duyckc493ea42009-03-20 00:16:50 +00001102 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001103 * at least 1 descriptor unused to make sure
1104 * next_to_use != next_to_clean */
1105 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001106 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001107 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001108 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001109}
1110
Nick Nunley88a268c2010-02-17 01:01:59 +00001111/**
1112 * igb_power_up_link - Power up the phy/serdes link
1113 * @adapter: address of board private structure
1114 **/
1115void igb_power_up_link(struct igb_adapter *adapter)
1116{
1117 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1118 igb_power_up_phy_copper(&adapter->hw);
1119 else
1120 igb_power_up_serdes_link_82575(&adapter->hw);
1121}
1122
1123/**
1124 * igb_power_down_link - Power down the phy/serdes link
1125 * @adapter: address of board private structure
1126 */
1127static void igb_power_down_link(struct igb_adapter *adapter)
1128{
1129 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1130 igb_power_down_phy_copper_82575(&adapter->hw);
1131 else
1132 igb_shutdown_serdes_link_82575(&adapter->hw);
1133}
Auke Kok9d5c8242008-01-24 02:22:38 -08001134
1135/**
1136 * igb_up - Open the interface and prepare it to handle traffic
1137 * @adapter: board private structure
1138 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001139int igb_up(struct igb_adapter *adapter)
1140{
1141 struct e1000_hw *hw = &adapter->hw;
1142 int i;
1143
1144 /* hardware has been reset, we need to reload some things */
1145 igb_configure(adapter);
1146
1147 clear_bit(__IGB_DOWN, &adapter->state);
1148
Alexander Duyck047e0032009-10-27 15:49:27 +00001149 for (i = 0; i < adapter->num_q_vectors; i++) {
1150 struct igb_q_vector *q_vector = adapter->q_vector[i];
1151 napi_enable(&q_vector->napi);
1152 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001153 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001154 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001155 else
1156 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001157
1158 /* Clear any pending interrupts. */
1159 rd32(E1000_ICR);
1160 igb_irq_enable(adapter);
1161
Alexander Duyckd4960302009-10-27 15:53:45 +00001162 /* notify VFs that reset has been completed */
1163 if (adapter->vfs_allocated_count) {
1164 u32 reg_data = rd32(E1000_CTRL_EXT);
1165 reg_data |= E1000_CTRL_EXT_PFRSTD;
1166 wr32(E1000_CTRL_EXT, reg_data);
1167 }
1168
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001169 netif_tx_start_all_queues(adapter->netdev);
1170
Alexander Duyck25568a52009-10-27 23:49:59 +00001171 /* start the watchdog. */
1172 hw->mac.get_link_status = 1;
1173 schedule_work(&adapter->watchdog_task);
1174
Auke Kok9d5c8242008-01-24 02:22:38 -08001175 return 0;
1176}
1177
1178void igb_down(struct igb_adapter *adapter)
1179{
Auke Kok9d5c8242008-01-24 02:22:38 -08001180 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001181 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001182 u32 tctl, rctl;
1183 int i;
1184
1185 /* signal that we're down so the interrupt handler does not
1186 * reschedule our watchdog timer */
1187 set_bit(__IGB_DOWN, &adapter->state);
1188
1189 /* disable receives in the hardware */
1190 rctl = rd32(E1000_RCTL);
1191 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1192 /* flush and sleep below */
1193
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001194 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001195
1196 /* disable transmits in the hardware */
1197 tctl = rd32(E1000_TCTL);
1198 tctl &= ~E1000_TCTL_EN;
1199 wr32(E1000_TCTL, tctl);
1200 /* flush both disables and wait for them to finish */
1201 wrfl();
1202 msleep(10);
1203
Alexander Duyck047e0032009-10-27 15:49:27 +00001204 for (i = 0; i < adapter->num_q_vectors; i++) {
1205 struct igb_q_vector *q_vector = adapter->q_vector[i];
1206 napi_disable(&q_vector->napi);
1207 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001208
Auke Kok9d5c8242008-01-24 02:22:38 -08001209 igb_irq_disable(adapter);
1210
1211 del_timer_sync(&adapter->watchdog_timer);
1212 del_timer_sync(&adapter->phy_info_timer);
1213
Auke Kok9d5c8242008-01-24 02:22:38 -08001214 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001215
1216 /* record the stats before reset*/
1217 igb_update_stats(adapter);
1218
Auke Kok9d5c8242008-01-24 02:22:38 -08001219 adapter->link_speed = 0;
1220 adapter->link_duplex = 0;
1221
Jeff Kirsher30236822008-06-24 17:01:15 -07001222 if (!pci_channel_offline(adapter->pdev))
1223 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001224 igb_clean_all_tx_rings(adapter);
1225 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001226#ifdef CONFIG_IGB_DCA
1227
1228 /* since we reset the hardware DCA settings were cleared */
1229 igb_setup_dca(adapter);
1230#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001231}
1232
1233void igb_reinit_locked(struct igb_adapter *adapter)
1234{
1235 WARN_ON(in_interrupt());
1236 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1237 msleep(1);
1238 igb_down(adapter);
1239 igb_up(adapter);
1240 clear_bit(__IGB_RESETTING, &adapter->state);
1241}
1242
1243void igb_reset(struct igb_adapter *adapter)
1244{
Alexander Duyck090b1792009-10-27 23:51:55 +00001245 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001246 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001247 struct e1000_mac_info *mac = &hw->mac;
1248 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001249 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1250 u16 hwm;
1251
1252 /* Repartition Pba for greater than 9k mtu
1253 * To take effect CTRL.RST is required.
1254 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001255 switch (mac->type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001256 case e1000_82580:
1257 pba = rd32(E1000_RXPBS);
1258 pba = igb_rxpbs_adjust_82580(pba);
1259 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001260 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001261 pba = rd32(E1000_RXPBS);
1262 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001263 break;
1264 case e1000_82575:
1265 default:
1266 pba = E1000_PBA_34K;
1267 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001268 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001269
Alexander Duyck2d064c02008-07-08 15:10:12 -07001270 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1271 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001272 /* adjust PBA for jumbo frames */
1273 wr32(E1000_PBA, pba);
1274
1275 /* To maintain wire speed transmits, the Tx FIFO should be
1276 * large enough to accommodate two full transmit packets,
1277 * rounded up to the next 1KB and expressed in KB. Likewise,
1278 * the Rx FIFO should be large enough to accommodate at least
1279 * one full receive packet and is similarly rounded up and
1280 * expressed in KB. */
1281 pba = rd32(E1000_PBA);
1282 /* upper 16 bits has Tx packet buffer allocation size in KB */
1283 tx_space = pba >> 16;
1284 /* lower 16 bits has Rx packet buffer allocation size in KB */
1285 pba &= 0xffff;
1286 /* the tx fifo also stores 16 bytes of information about the tx
1287 * but don't include ethernet FCS because hardware appends it */
1288 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001289 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001290 ETH_FCS_LEN) * 2;
1291 min_tx_space = ALIGN(min_tx_space, 1024);
1292 min_tx_space >>= 10;
1293 /* software strips receive CRC, so leave room for it */
1294 min_rx_space = adapter->max_frame_size;
1295 min_rx_space = ALIGN(min_rx_space, 1024);
1296 min_rx_space >>= 10;
1297
1298 /* If current Tx allocation is less than the min Tx FIFO size,
1299 * and the min Tx FIFO size is less than the current Rx FIFO
1300 * allocation, take space away from current Rx allocation */
1301 if (tx_space < min_tx_space &&
1302 ((min_tx_space - tx_space) < pba)) {
1303 pba = pba - (min_tx_space - tx_space);
1304
1305 /* if short on rx space, rx wins and must trump tx
1306 * adjustment */
1307 if (pba < min_rx_space)
1308 pba = min_rx_space;
1309 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001310 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001312
1313 /* flow control settings */
1314 /* The high water mark must be low enough to fit one full frame
1315 * (or the size used for early receive) above it in the Rx FIFO.
1316 * Set it to the lower of:
1317 * - 90% of the Rx FIFO size, or
1318 * - the full Rx FIFO size minus one full frame */
1319 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001320 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001321
Alexander Duyckd405ea32009-12-23 13:21:27 +00001322 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1323 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001324 fc->pause_time = 0xFFFF;
1325 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001326 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001328 /* disable receive for all VFs and wait one second */
1329 if (adapter->vfs_allocated_count) {
1330 int i;
1331 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001332 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001333
1334 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001335 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001336
1337 /* disable transmits and receives */
1338 wr32(E1000_VFRE, 0);
1339 wr32(E1000_VFTE, 0);
1340 }
1341
Auke Kok9d5c8242008-01-24 02:22:38 -08001342 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001343 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001344 wr32(E1000_WUC, 0);
1345
Alexander Duyck330a6d62009-10-27 23:51:35 +00001346 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001347 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001348
Alexander Duyck55cac242009-11-19 12:42:21 +00001349 if (hw->mac.type == e1000_82580) {
1350 u32 reg = rd32(E1000_PCIEMISC);
1351 wr32(E1000_PCIEMISC,
1352 reg & ~E1000_PCIEMISC_LX_DECISION);
1353 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001354 if (!netif_running(adapter->netdev))
1355 igb_power_down_link(adapter);
1356
Auke Kok9d5c8242008-01-24 02:22:38 -08001357 igb_update_mng_vlan(adapter);
1358
1359 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1360 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1361
Alexander Duyck330a6d62009-10-27 23:51:35 +00001362 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001363}
1364
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001365static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001366 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001367 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001368 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001369 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001370 .ndo_set_rx_mode = igb_set_rx_mode,
1371 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001372 .ndo_set_mac_address = igb_set_mac,
1373 .ndo_change_mtu = igb_change_mtu,
1374 .ndo_do_ioctl = igb_ioctl,
1375 .ndo_tx_timeout = igb_tx_timeout,
1376 .ndo_validate_addr = eth_validate_addr,
1377 .ndo_vlan_rx_register = igb_vlan_rx_register,
1378 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1379 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001380 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1381 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1382 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1383 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001384#ifdef CONFIG_NET_POLL_CONTROLLER
1385 .ndo_poll_controller = igb_netpoll,
1386#endif
1387};
1388
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001389/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001390 * igb_probe - Device Initialization Routine
1391 * @pdev: PCI device information struct
1392 * @ent: entry in igb_pci_tbl
1393 *
1394 * Returns 0 on success, negative on failure
1395 *
1396 * igb_probe initializes an adapter identified by a pci_dev structure.
1397 * The OS initialization, configuring of the adapter private structure,
1398 * and a hardware reset occur.
1399 **/
1400static int __devinit igb_probe(struct pci_dev *pdev,
1401 const struct pci_device_id *ent)
1402{
1403 struct net_device *netdev;
1404 struct igb_adapter *adapter;
1405 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001406 u16 eeprom_data = 0;
1407 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001408 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1409 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001410 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001411 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1412 u32 part_num;
1413
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001414 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001415 if (err)
1416 return err;
1417
1418 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001420 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001421 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001422 if (!err)
1423 pci_using_dac = 1;
1424 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001425 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001426 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001427 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001428 if (err) {
1429 dev_err(&pdev->dev, "No usable DMA "
1430 "configuration, aborting\n");
1431 goto err_dma;
1432 }
1433 }
1434 }
1435
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001436 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1437 IORESOURCE_MEM),
1438 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001439 if (err)
1440 goto err_pci_reg;
1441
Frans Pop19d5afd2009-10-02 10:04:12 -07001442 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001443
Auke Kok9d5c8242008-01-24 02:22:38 -08001444 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001445 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001446
1447 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001448 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1449 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001450 if (!netdev)
1451 goto err_alloc_etherdev;
1452
1453 SET_NETDEV_DEV(netdev, &pdev->dev);
1454
1455 pci_set_drvdata(pdev, netdev);
1456 adapter = netdev_priv(netdev);
1457 adapter->netdev = netdev;
1458 adapter->pdev = pdev;
1459 hw = &adapter->hw;
1460 hw->back = adapter;
1461 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1462
1463 mmio_start = pci_resource_start(pdev, 0);
1464 mmio_len = pci_resource_len(pdev, 0);
1465
1466 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001467 hw->hw_addr = ioremap(mmio_start, mmio_len);
1468 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001469 goto err_ioremap;
1470
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001471 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001473 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001474
1475 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1476
1477 netdev->mem_start = mmio_start;
1478 netdev->mem_end = mmio_start + mmio_len;
1479
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 /* PCI config space info */
1481 hw->vendor_id = pdev->vendor;
1482 hw->device_id = pdev->device;
1483 hw->revision_id = pdev->revision;
1484 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1485 hw->subsystem_device_id = pdev->subsystem_device;
1486
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 /* Copy the default MAC, PHY and NVM function pointers */
1488 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1489 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1490 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1491 /* Initialize skew-specific constants */
1492 err = ei->get_invariants(hw);
1493 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001494 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001495
Alexander Duyck450c87c2009-02-06 23:22:11 +00001496 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001497 err = igb_sw_init(adapter);
1498 if (err)
1499 goto err_sw_init;
1500
1501 igb_get_bus_info_pcie(hw);
1502
1503 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001504
1505 /* Copper options */
1506 if (hw->phy.media_type == e1000_media_type_copper) {
1507 hw->phy.mdix = AUTO_ALL_MODES;
1508 hw->phy.disable_polarity_correction = false;
1509 hw->phy.ms_type = e1000_ms_hw_default;
1510 }
1511
1512 if (igb_check_reset_block(hw))
1513 dev_info(&pdev->dev,
1514 "PHY reset is blocked due to SOL/IDER session.\n");
1515
1516 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001517 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001518 NETIF_F_HW_VLAN_TX |
1519 NETIF_F_HW_VLAN_RX |
1520 NETIF_F_HW_VLAN_FILTER;
1521
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001522 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001523 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001524 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001525 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001526
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001527 netdev->vlan_features |= NETIF_F_TSO;
1528 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001529 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001530 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001531 netdev->vlan_features |= NETIF_F_SG;
1532
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 if (pci_using_dac)
1534 netdev->features |= NETIF_F_HIGHDMA;
1535
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001536 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001537 netdev->features |= NETIF_F_SCTP_CSUM;
1538
Alexander Duyck330a6d62009-10-27 23:51:35 +00001539 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001540
1541 /* before reading the NVM, reset the controller to put the device in a
1542 * known good starting state */
1543 hw->mac.ops.reset_hw(hw);
1544
1545 /* make sure the NVM is good */
1546 if (igb_validate_nvm_checksum(hw) < 0) {
1547 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1548 err = -EIO;
1549 goto err_eeprom;
1550 }
1551
1552 /* copy the MAC address out of the NVM */
1553 if (hw->mac.ops.read_mac_addr(hw))
1554 dev_err(&pdev->dev, "NVM Read Error\n");
1555
1556 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1557 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1558
1559 if (!is_valid_ether_addr(netdev->perm_addr)) {
1560 dev_err(&pdev->dev, "Invalid MAC Address\n");
1561 err = -EIO;
1562 goto err_eeprom;
1563 }
1564
Alexander Duyck0e340482009-03-20 00:17:08 +00001565 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1566 (unsigned long) adapter);
1567 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1568 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001569
1570 INIT_WORK(&adapter->reset_task, igb_reset_task);
1571 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1572
Alexander Duyck450c87c2009-02-06 23:22:11 +00001573 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001574 adapter->fc_autoneg = true;
1575 hw->mac.autoneg = true;
1576 hw->phy.autoneg_advertised = 0x2f;
1577
Alexander Duyck0cce1192009-07-23 18:10:24 +00001578 hw->fc.requested_mode = e1000_fc_default;
1579 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001580
Auke Kok9d5c8242008-01-24 02:22:38 -08001581 igb_validate_mdi_setting(hw);
1582
Auke Kok9d5c8242008-01-24 02:22:38 -08001583 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1584 * enable the ACPI Magic Packet filter
1585 */
1586
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001587 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001588 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001589 else if (hw->mac.type == e1000_82580)
1590 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1591 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1592 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001593 else if (hw->bus.func == 1)
1594 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001595
1596 if (eeprom_data & eeprom_apme_mask)
1597 adapter->eeprom_wol |= E1000_WUFC_MAG;
1598
1599 /* now that we have the eeprom settings, apply the special cases where
1600 * the eeprom may be wrong or the board simply won't support wake on
1601 * lan on a particular port */
1602 switch (pdev->device) {
1603 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1604 adapter->eeprom_wol = 0;
1605 break;
1606 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001607 case E1000_DEV_ID_82576_FIBER:
1608 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001609 /* Wake events only supported on port A for dual fiber
1610 * regardless of eeprom setting */
1611 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1612 adapter->eeprom_wol = 0;
1613 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001614 case E1000_DEV_ID_82576_QUAD_COPPER:
1615 /* if quad port adapter, disable WoL on all but port A */
1616 if (global_quad_port_a != 0)
1617 adapter->eeprom_wol = 0;
1618 else
1619 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1620 /* Reset for multiple quad port adapters */
1621 if (++global_quad_port_a == 4)
1622 global_quad_port_a = 0;
1623 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001624 }
1625
1626 /* initialize the wol settings based on the eeprom settings */
1627 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001628 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001629
1630 /* reset the hardware with the new settings */
1631 igb_reset(adapter);
1632
1633 /* let the f/w know that the h/w is now under the control of the
1634 * driver. */
1635 igb_get_hw_control(adapter);
1636
Auke Kok9d5c8242008-01-24 02:22:38 -08001637 strcpy(netdev->name, "eth%d");
1638 err = register_netdev(netdev);
1639 if (err)
1640 goto err_register;
1641
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001642 /* carrier off reporting is important to ethtool even BEFORE open */
1643 netif_carrier_off(netdev);
1644
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001645#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001646 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001647 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001648 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001649 igb_setup_dca(adapter);
1650 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001651
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001652#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001653 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1654 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001655 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001656 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001657 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1658 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001659 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1660 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1661 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1662 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001663 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001664
1665 igb_read_part_num(hw, &part_num);
1666 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1667 (part_num >> 8), (part_num & 0xff));
1668
1669 dev_info(&pdev->dev,
1670 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1671 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001672 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001673 adapter->num_rx_queues, adapter->num_tx_queues);
1674
Auke Kok9d5c8242008-01-24 02:22:38 -08001675 return 0;
1676
1677err_register:
1678 igb_release_hw_control(adapter);
1679err_eeprom:
1680 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001681 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001682
1683 if (hw->flash_address)
1684 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001685err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001686 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001687 iounmap(hw->hw_addr);
1688err_ioremap:
1689 free_netdev(netdev);
1690err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00001691 pci_release_selected_regions(pdev,
1692 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001693err_pci_reg:
1694err_dma:
1695 pci_disable_device(pdev);
1696 return err;
1697}
1698
1699/**
1700 * igb_remove - Device Removal Routine
1701 * @pdev: PCI device information struct
1702 *
1703 * igb_remove is called by the PCI subsystem to alert the driver
1704 * that it should release a PCI device. The could be caused by a
1705 * Hot-Plug event, or because the driver is going to be removed from
1706 * memory.
1707 **/
1708static void __devexit igb_remove(struct pci_dev *pdev)
1709{
1710 struct net_device *netdev = pci_get_drvdata(pdev);
1711 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001712 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001713
1714 /* flush_scheduled work may reschedule our watchdog task, so
1715 * explicitly disable watchdog tasks from being rescheduled */
1716 set_bit(__IGB_DOWN, &adapter->state);
1717 del_timer_sync(&adapter->watchdog_timer);
1718 del_timer_sync(&adapter->phy_info_timer);
1719
1720 flush_scheduled_work();
1721
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001722#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001723 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001724 dev_info(&pdev->dev, "DCA disabled\n");
1725 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001726 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001727 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001728 }
1729#endif
1730
Auke Kok9d5c8242008-01-24 02:22:38 -08001731 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1732 * would have already happened in close and is redundant. */
1733 igb_release_hw_control(adapter);
1734
1735 unregister_netdev(netdev);
1736
Alexander Duyck047e0032009-10-27 15:49:27 +00001737 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001738
Alexander Duyck37680112009-02-19 20:40:30 -08001739#ifdef CONFIG_PCI_IOV
1740 /* reclaim resources allocated to VFs */
1741 if (adapter->vf_data) {
1742 /* disable iov and allow time for transactions to clear */
1743 pci_disable_sriov(pdev);
1744 msleep(500);
1745
1746 kfree(adapter->vf_data);
1747 adapter->vf_data = NULL;
1748 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1749 msleep(100);
1750 dev_info(&pdev->dev, "IOV Disabled\n");
1751 }
1752#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00001753
Alexander Duyck28b07592009-02-06 23:20:31 +00001754 iounmap(hw->hw_addr);
1755 if (hw->flash_address)
1756 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00001757 pci_release_selected_regions(pdev,
1758 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001759
1760 free_netdev(netdev);
1761
Frans Pop19d5afd2009-10-02 10:04:12 -07001762 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001763
Auke Kok9d5c8242008-01-24 02:22:38 -08001764 pci_disable_device(pdev);
1765}
1766
1767/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001768 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1769 * @adapter: board private structure to initialize
1770 *
1771 * This function initializes the vf specific data storage and then attempts to
1772 * allocate the VFs. The reason for ordering it this way is because it is much
1773 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1774 * the memory for the VFs.
1775 **/
1776static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1777{
1778#ifdef CONFIG_PCI_IOV
1779 struct pci_dev *pdev = adapter->pdev;
1780
1781 if (adapter->vfs_allocated_count > 7)
1782 adapter->vfs_allocated_count = 7;
1783
1784 if (adapter->vfs_allocated_count) {
1785 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1786 sizeof(struct vf_data_storage),
1787 GFP_KERNEL);
1788 /* if allocation failed then we do not support SR-IOV */
1789 if (!adapter->vf_data) {
1790 adapter->vfs_allocated_count = 0;
1791 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1792 "Data Storage\n");
1793 }
1794 }
1795
1796 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1797 kfree(adapter->vf_data);
1798 adapter->vf_data = NULL;
1799#endif /* CONFIG_PCI_IOV */
1800 adapter->vfs_allocated_count = 0;
1801#ifdef CONFIG_PCI_IOV
1802 } else {
1803 unsigned char mac_addr[ETH_ALEN];
1804 int i;
1805 dev_info(&pdev->dev, "%d vfs allocated\n",
1806 adapter->vfs_allocated_count);
1807 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1808 random_ether_addr(mac_addr);
1809 igb_set_vf_mac(adapter, i, mac_addr);
1810 }
1811 }
1812#endif /* CONFIG_PCI_IOV */
1813}
1814
Alexander Duyck115f4592009-11-12 18:37:00 +00001815
1816/**
1817 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1818 * @adapter: board private structure to initialize
1819 *
1820 * igb_init_hw_timer initializes the function pointer and values for the hw
1821 * timer found in hardware.
1822 **/
1823static void igb_init_hw_timer(struct igb_adapter *adapter)
1824{
1825 struct e1000_hw *hw = &adapter->hw;
1826
1827 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001828 case e1000_82580:
1829 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1830 adapter->cycles.read = igb_read_clock;
1831 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1832 adapter->cycles.mult = 1;
1833 /*
1834 * The 82580 timesync updates the system timer every 8ns by 8ns
1835 * and the value cannot be shifted. Instead we need to shift
1836 * the registers to generate a 64bit timer value. As a result
1837 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1838 * 24 in order to generate a larger value for synchronization.
1839 */
1840 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1841 /* disable system timer temporarily by setting bit 31 */
1842 wr32(E1000_TSAUXC, 0x80000000);
1843 wrfl();
1844
1845 /* Set registers so that rollover occurs soon to test this. */
1846 wr32(E1000_SYSTIMR, 0x00000000);
1847 wr32(E1000_SYSTIML, 0x80000000);
1848 wr32(E1000_SYSTIMH, 0x000000FF);
1849 wrfl();
1850
1851 /* enable system timer by clearing bit 31 */
1852 wr32(E1000_TSAUXC, 0x0);
1853 wrfl();
1854
1855 timecounter_init(&adapter->clock,
1856 &adapter->cycles,
1857 ktime_to_ns(ktime_get_real()));
1858 /*
1859 * Synchronize our NIC clock against system wall clock. NIC
1860 * time stamp reading requires ~3us per sample, each sample
1861 * was pretty stable even under load => only require 10
1862 * samples for each offset comparison.
1863 */
1864 memset(&adapter->compare, 0, sizeof(adapter->compare));
1865 adapter->compare.source = &adapter->clock;
1866 adapter->compare.target = ktime_get_real;
1867 adapter->compare.num_samples = 10;
1868 timecompare_update(&adapter->compare, 0);
1869 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00001870 case e1000_82576:
1871 /*
1872 * Initialize hardware timer: we keep it running just in case
1873 * that some program needs it later on.
1874 */
1875 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1876 adapter->cycles.read = igb_read_clock;
1877 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1878 adapter->cycles.mult = 1;
1879 /**
1880 * Scale the NIC clock cycle by a large factor so that
1881 * relatively small clock corrections can be added or
1882 * substracted at each clock tick. The drawbacks of a large
1883 * factor are a) that the clock register overflows more quickly
1884 * (not such a big deal) and b) that the increment per tick has
1885 * to fit into 24 bits. As a result we need to use a shift of
1886 * 19 so we can fit a value of 16 into the TIMINCA register.
1887 */
1888 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1889 wr32(E1000_TIMINCA,
1890 (1 << E1000_TIMINCA_16NS_SHIFT) |
1891 (16 << IGB_82576_TSYNC_SHIFT));
1892
1893 /* Set registers so that rollover occurs soon to test this. */
1894 wr32(E1000_SYSTIML, 0x00000000);
1895 wr32(E1000_SYSTIMH, 0xFF800000);
1896 wrfl();
1897
1898 timecounter_init(&adapter->clock,
1899 &adapter->cycles,
1900 ktime_to_ns(ktime_get_real()));
1901 /*
1902 * Synchronize our NIC clock against system wall clock. NIC
1903 * time stamp reading requires ~3us per sample, each sample
1904 * was pretty stable even under load => only require 10
1905 * samples for each offset comparison.
1906 */
1907 memset(&adapter->compare, 0, sizeof(adapter->compare));
1908 adapter->compare.source = &adapter->clock;
1909 adapter->compare.target = ktime_get_real;
1910 adapter->compare.num_samples = 10;
1911 timecompare_update(&adapter->compare, 0);
1912 break;
1913 case e1000_82575:
1914 /* 82575 does not support timesync */
1915 default:
1916 break;
1917 }
1918
1919}
1920
Alexander Duycka6b623e2009-10-27 23:47:53 +00001921/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001922 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1923 * @adapter: board private structure to initialize
1924 *
1925 * igb_sw_init initializes the Adapter private data structure.
1926 * Fields are initialized based on PCI device information and
1927 * OS network device settings (MTU size).
1928 **/
1929static int __devinit igb_sw_init(struct igb_adapter *adapter)
1930{
1931 struct e1000_hw *hw = &adapter->hw;
1932 struct net_device *netdev = adapter->netdev;
1933 struct pci_dev *pdev = adapter->pdev;
1934
1935 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1936
Alexander Duyck68fd9912008-11-20 00:48:10 -08001937 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1938 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001939 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1940 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1941
Auke Kok9d5c8242008-01-24 02:22:38 -08001942 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1943 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1944
Alexander Duycka6b623e2009-10-27 23:47:53 +00001945#ifdef CONFIG_PCI_IOV
1946 if (hw->mac.type == e1000_82576)
1947 adapter->vfs_allocated_count = max_vfs;
1948
1949#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00001950 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1951
1952 /*
1953 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1954 * then we should combine the queues into a queue pair in order to
1955 * conserve interrupts due to limited supply
1956 */
1957 if ((adapter->rss_queues > 4) ||
1958 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1959 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1960
Alexander Duycka6b623e2009-10-27 23:47:53 +00001961 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001962 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001963 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1964 return -ENOMEM;
1965 }
1966
Alexander Duyck115f4592009-11-12 18:37:00 +00001967 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00001968 igb_probe_vfs(adapter);
1969
Auke Kok9d5c8242008-01-24 02:22:38 -08001970 /* Explicitly disable IRQ since the NIC can be in any state. */
1971 igb_irq_disable(adapter);
1972
1973 set_bit(__IGB_DOWN, &adapter->state);
1974 return 0;
1975}
1976
1977/**
1978 * igb_open - Called when a network interface is made active
1979 * @netdev: network interface device structure
1980 *
1981 * Returns 0 on success, negative value on failure
1982 *
1983 * The open entry point is called when a network interface is made
1984 * active by the system (IFF_UP). At this point all resources needed
1985 * for transmit and receive operations are allocated, the interrupt
1986 * handler is registered with the OS, the watchdog timer is started,
1987 * and the stack is notified that the interface is ready.
1988 **/
1989static int igb_open(struct net_device *netdev)
1990{
1991 struct igb_adapter *adapter = netdev_priv(netdev);
1992 struct e1000_hw *hw = &adapter->hw;
1993 int err;
1994 int i;
1995
1996 /* disallow open during test */
1997 if (test_bit(__IGB_TESTING, &adapter->state))
1998 return -EBUSY;
1999
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002000 netif_carrier_off(netdev);
2001
Auke Kok9d5c8242008-01-24 02:22:38 -08002002 /* allocate transmit descriptors */
2003 err = igb_setup_all_tx_resources(adapter);
2004 if (err)
2005 goto err_setup_tx;
2006
2007 /* allocate receive descriptors */
2008 err = igb_setup_all_rx_resources(adapter);
2009 if (err)
2010 goto err_setup_rx;
2011
Nick Nunley88a268c2010-02-17 01:01:59 +00002012 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002013
Auke Kok9d5c8242008-01-24 02:22:38 -08002014 /* before we allocate an interrupt, we must be ready to handle it.
2015 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2016 * as soon as we call pci_request_irq, so we have to setup our
2017 * clean_rx handler before we do so. */
2018 igb_configure(adapter);
2019
2020 err = igb_request_irq(adapter);
2021 if (err)
2022 goto err_req_irq;
2023
2024 /* From here on the code is the same as igb_up() */
2025 clear_bit(__IGB_DOWN, &adapter->state);
2026
Alexander Duyck047e0032009-10-27 15:49:27 +00002027 for (i = 0; i < adapter->num_q_vectors; i++) {
2028 struct igb_q_vector *q_vector = adapter->q_vector[i];
2029 napi_enable(&q_vector->napi);
2030 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002031
2032 /* Clear any pending interrupts. */
2033 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002034
2035 igb_irq_enable(adapter);
2036
Alexander Duyckd4960302009-10-27 15:53:45 +00002037 /* notify VFs that reset has been completed */
2038 if (adapter->vfs_allocated_count) {
2039 u32 reg_data = rd32(E1000_CTRL_EXT);
2040 reg_data |= E1000_CTRL_EXT_PFRSTD;
2041 wr32(E1000_CTRL_EXT, reg_data);
2042 }
2043
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002044 netif_tx_start_all_queues(netdev);
2045
Alexander Duyck25568a52009-10-27 23:49:59 +00002046 /* start the watchdog. */
2047 hw->mac.get_link_status = 1;
2048 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002049
2050 return 0;
2051
2052err_req_irq:
2053 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002054 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002055 igb_free_all_rx_resources(adapter);
2056err_setup_rx:
2057 igb_free_all_tx_resources(adapter);
2058err_setup_tx:
2059 igb_reset(adapter);
2060
2061 return err;
2062}
2063
2064/**
2065 * igb_close - Disables a network interface
2066 * @netdev: network interface device structure
2067 *
2068 * Returns 0, this is not allowed to fail
2069 *
2070 * The close entry point is called when an interface is de-activated
2071 * by the OS. The hardware is still under the driver's control, but
2072 * needs to be disabled. A global MAC reset is issued to stop the
2073 * hardware, and all transmit and receive resources are freed.
2074 **/
2075static int igb_close(struct net_device *netdev)
2076{
2077 struct igb_adapter *adapter = netdev_priv(netdev);
2078
2079 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2080 igb_down(adapter);
2081
2082 igb_free_irq(adapter);
2083
2084 igb_free_all_tx_resources(adapter);
2085 igb_free_all_rx_resources(adapter);
2086
Auke Kok9d5c8242008-01-24 02:22:38 -08002087 return 0;
2088}
2089
2090/**
2091 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002092 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2093 *
2094 * Return 0 on success, negative on failure
2095 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002096int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002097{
Alexander Duyck80785292009-10-27 15:51:47 +00002098 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002099 int size;
2100
2101 size = sizeof(struct igb_buffer) * tx_ring->count;
2102 tx_ring->buffer_info = vmalloc(size);
2103 if (!tx_ring->buffer_info)
2104 goto err;
2105 memset(tx_ring->buffer_info, 0, size);
2106
2107 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002108 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002109 tx_ring->size = ALIGN(tx_ring->size, 4096);
2110
Alexander Duyck439705e2009-10-27 23:49:20 +00002111 tx_ring->desc = pci_alloc_consistent(pdev,
2112 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08002113 &tx_ring->dma);
2114
2115 if (!tx_ring->desc)
2116 goto err;
2117
Auke Kok9d5c8242008-01-24 02:22:38 -08002118 tx_ring->next_to_use = 0;
2119 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002120 return 0;
2121
2122err:
2123 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002124 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002125 "Unable to allocate memory for the transmit descriptor ring\n");
2126 return -ENOMEM;
2127}
2128
2129/**
2130 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2131 * (Descriptors) for all queues
2132 * @adapter: board private structure
2133 *
2134 * Return 0 on success, negative on failure
2135 **/
2136static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2137{
Alexander Duyck439705e2009-10-27 23:49:20 +00002138 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002139 int i, err = 0;
2140
2141 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002142 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002143 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002144 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002145 "Allocation for Tx Queue %u failed\n", i);
2146 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002147 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002148 break;
2149 }
2150 }
2151
Alexander Duycka99955f2009-11-12 18:37:19 +00002152 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002153 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002154 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002155 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002156 return err;
2157}
2158
2159/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002160 * igb_setup_tctl - configure the transmit control registers
2161 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002162 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002163void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002164{
Auke Kok9d5c8242008-01-24 02:22:38 -08002165 struct e1000_hw *hw = &adapter->hw;
2166 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002167
Alexander Duyck85b430b2009-10-27 15:50:29 +00002168 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2169 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002170
2171 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002172 tctl = rd32(E1000_TCTL);
2173 tctl &= ~E1000_TCTL_CT;
2174 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2175 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2176
2177 igb_config_collision_dist(hw);
2178
Auke Kok9d5c8242008-01-24 02:22:38 -08002179 /* Enable transmits */
2180 tctl |= E1000_TCTL_EN;
2181
2182 wr32(E1000_TCTL, tctl);
2183}
2184
2185/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002186 * igb_configure_tx_ring - Configure transmit ring after Reset
2187 * @adapter: board private structure
2188 * @ring: tx ring to configure
2189 *
2190 * Configure a transmit ring after a reset.
2191 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002192void igb_configure_tx_ring(struct igb_adapter *adapter,
2193 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002194{
2195 struct e1000_hw *hw = &adapter->hw;
2196 u32 txdctl;
2197 u64 tdba = ring->dma;
2198 int reg_idx = ring->reg_idx;
2199
2200 /* disable the queue */
2201 txdctl = rd32(E1000_TXDCTL(reg_idx));
2202 wr32(E1000_TXDCTL(reg_idx),
2203 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2204 wrfl();
2205 mdelay(10);
2206
2207 wr32(E1000_TDLEN(reg_idx),
2208 ring->count * sizeof(union e1000_adv_tx_desc));
2209 wr32(E1000_TDBAL(reg_idx),
2210 tdba & 0x00000000ffffffffULL);
2211 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2212
Alexander Duyckfce99e32009-10-27 15:51:27 +00002213 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2214 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2215 writel(0, ring->head);
2216 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002217
2218 txdctl |= IGB_TX_PTHRESH;
2219 txdctl |= IGB_TX_HTHRESH << 8;
2220 txdctl |= IGB_TX_WTHRESH << 16;
2221
2222 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2223 wr32(E1000_TXDCTL(reg_idx), txdctl);
2224}
2225
2226/**
2227 * igb_configure_tx - Configure transmit Unit after Reset
2228 * @adapter: board private structure
2229 *
2230 * Configure the Tx unit of the MAC after a reset.
2231 **/
2232static void igb_configure_tx(struct igb_adapter *adapter)
2233{
2234 int i;
2235
2236 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002237 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002238}
2239
2240/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002241 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002242 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2243 *
2244 * Returns 0 on success, negative on failure
2245 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002246int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002247{
Alexander Duyck80785292009-10-27 15:51:47 +00002248 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002249 int size, desc_len;
2250
2251 size = sizeof(struct igb_buffer) * rx_ring->count;
2252 rx_ring->buffer_info = vmalloc(size);
2253 if (!rx_ring->buffer_info)
2254 goto err;
2255 memset(rx_ring->buffer_info, 0, size);
2256
2257 desc_len = sizeof(union e1000_adv_rx_desc);
2258
2259 /* Round up to nearest 4K */
2260 rx_ring->size = rx_ring->count * desc_len;
2261 rx_ring->size = ALIGN(rx_ring->size, 4096);
2262
2263 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2264 &rx_ring->dma);
2265
2266 if (!rx_ring->desc)
2267 goto err;
2268
2269 rx_ring->next_to_clean = 0;
2270 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002271
Auke Kok9d5c8242008-01-24 02:22:38 -08002272 return 0;
2273
2274err:
2275 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002276 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002277 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002278 "the receive descriptor ring\n");
2279 return -ENOMEM;
2280}
2281
2282/**
2283 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2284 * (Descriptors) for all queues
2285 * @adapter: board private structure
2286 *
2287 * Return 0 on success, negative on failure
2288 **/
2289static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2290{
Alexander Duyck439705e2009-10-27 23:49:20 +00002291 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002292 int i, err = 0;
2293
2294 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002295 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002296 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002297 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002298 "Allocation for Rx Queue %u failed\n", i);
2299 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002300 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002301 break;
2302 }
2303 }
2304
2305 return err;
2306}
2307
2308/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002309 * igb_setup_mrqc - configure the multiple receive queue control registers
2310 * @adapter: Board private structure
2311 **/
2312static void igb_setup_mrqc(struct igb_adapter *adapter)
2313{
2314 struct e1000_hw *hw = &adapter->hw;
2315 u32 mrqc, rxcsum;
2316 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2317 union e1000_reta {
2318 u32 dword;
2319 u8 bytes[4];
2320 } reta;
2321 static const u8 rsshash[40] = {
2322 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2323 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2324 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2325 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2326
2327 /* Fill out hash function seeds */
2328 for (j = 0; j < 10; j++) {
2329 u32 rsskey = rsshash[(j * 4)];
2330 rsskey |= rsshash[(j * 4) + 1] << 8;
2331 rsskey |= rsshash[(j * 4) + 2] << 16;
2332 rsskey |= rsshash[(j * 4) + 3] << 24;
2333 array_wr32(E1000_RSSRK(0), j, rsskey);
2334 }
2335
Alexander Duycka99955f2009-11-12 18:37:19 +00002336 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002337
2338 if (adapter->vfs_allocated_count) {
2339 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2340 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00002341 case e1000_82580:
2342 num_rx_queues = 1;
2343 shift = 0;
2344 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002345 case e1000_82576:
2346 shift = 3;
2347 num_rx_queues = 2;
2348 break;
2349 case e1000_82575:
2350 shift = 2;
2351 shift2 = 6;
2352 default:
2353 break;
2354 }
2355 } else {
2356 if (hw->mac.type == e1000_82575)
2357 shift = 6;
2358 }
2359
2360 for (j = 0; j < (32 * 4); j++) {
2361 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2362 if (shift2)
2363 reta.bytes[j & 3] |= num_rx_queues << shift2;
2364 if ((j & 3) == 3)
2365 wr32(E1000_RETA(j >> 2), reta.dword);
2366 }
2367
2368 /*
2369 * Disable raw packet checksumming so that RSS hash is placed in
2370 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2371 * offloads as they are enabled by default
2372 */
2373 rxcsum = rd32(E1000_RXCSUM);
2374 rxcsum |= E1000_RXCSUM_PCSD;
2375
2376 if (adapter->hw.mac.type >= e1000_82576)
2377 /* Enable Receive Checksum Offload for SCTP */
2378 rxcsum |= E1000_RXCSUM_CRCOFL;
2379
2380 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2381 wr32(E1000_RXCSUM, rxcsum);
2382
2383 /* If VMDq is enabled then we set the appropriate mode for that, else
2384 * we default to RSS so that an RSS hash is calculated per packet even
2385 * if we are only using one queue */
2386 if (adapter->vfs_allocated_count) {
2387 if (hw->mac.type > e1000_82575) {
2388 /* Set the default pool for the PF's first queue */
2389 u32 vtctl = rd32(E1000_VT_CTL);
2390 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2391 E1000_VT_CTL_DISABLE_DEF_POOL);
2392 vtctl |= adapter->vfs_allocated_count <<
2393 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2394 wr32(E1000_VT_CTL, vtctl);
2395 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002396 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002397 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2398 else
2399 mrqc = E1000_MRQC_ENABLE_VMDQ;
2400 } else {
2401 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2402 }
2403 igb_vmm_control(adapter);
2404
2405 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2406 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2407 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2408 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2409 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2410 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2411 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2412 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2413
2414 wr32(E1000_MRQC, mrqc);
2415}
2416
2417/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002418 * igb_setup_rctl - configure the receive control registers
2419 * @adapter: Board private structure
2420 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002421void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002422{
2423 struct e1000_hw *hw = &adapter->hw;
2424 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002425
2426 rctl = rd32(E1000_RCTL);
2427
2428 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002429 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002430
Alexander Duyck69d728b2008-11-25 01:04:03 -08002431 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002432 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002433
Auke Kok87cb7e82008-07-08 15:08:29 -07002434 /*
2435 * enable stripping of CRC. It's unlikely this will break BMC
2436 * redirection as it did with e1000. Newer features require
2437 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002438 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002439 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002440
Alexander Duyck559e9c42009-10-27 23:52:50 +00002441 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002442 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002443
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002444 /* enable LPE to prevent packets larger than max_frame_size */
2445 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002446
Alexander Duyck952f72a2009-10-27 15:51:07 +00002447 /* disable queue 0 to prevent tail write w/o re-config */
2448 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002449
Alexander Duycke1739522009-02-19 20:39:44 -08002450 /* Attention!!! For SR-IOV PF driver operations you must enable
2451 * queue drop for all VF and PF queues to prevent head of line blocking
2452 * if an un-trusted VF does not provide descriptors to hardware.
2453 */
2454 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002455 /* set all queue drop enable bits */
2456 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002457 }
2458
Auke Kok9d5c8242008-01-24 02:22:38 -08002459 wr32(E1000_RCTL, rctl);
2460}
2461
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002462static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2463 int vfn)
2464{
2465 struct e1000_hw *hw = &adapter->hw;
2466 u32 vmolr;
2467
2468 /* if it isn't the PF check to see if VFs are enabled and
2469 * increase the size to support vlan tags */
2470 if (vfn < adapter->vfs_allocated_count &&
2471 adapter->vf_data[vfn].vlans_enabled)
2472 size += VLAN_TAG_SIZE;
2473
2474 vmolr = rd32(E1000_VMOLR(vfn));
2475 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2476 vmolr |= size | E1000_VMOLR_LPE;
2477 wr32(E1000_VMOLR(vfn), vmolr);
2478
2479 return 0;
2480}
2481
Auke Kok9d5c8242008-01-24 02:22:38 -08002482/**
Alexander Duycke1739522009-02-19 20:39:44 -08002483 * igb_rlpml_set - set maximum receive packet size
2484 * @adapter: board private structure
2485 *
2486 * Configure maximum receivable packet size.
2487 **/
2488static void igb_rlpml_set(struct igb_adapter *adapter)
2489{
2490 u32 max_frame_size = adapter->max_frame_size;
2491 struct e1000_hw *hw = &adapter->hw;
2492 u16 pf_id = adapter->vfs_allocated_count;
2493
2494 if (adapter->vlgrp)
2495 max_frame_size += VLAN_TAG_SIZE;
2496
2497 /* if vfs are enabled we set RLPML to the largest possible request
2498 * size and set the VMOLR RLPML to the size we need */
2499 if (pf_id) {
2500 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002501 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002502 }
2503
2504 wr32(E1000_RLPML, max_frame_size);
2505}
2506
Williams, Mitch A8151d292010-02-10 01:44:24 +00002507static inline void igb_set_vmolr(struct igb_adapter *adapter,
2508 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002509{
2510 struct e1000_hw *hw = &adapter->hw;
2511 u32 vmolr;
2512
2513 /*
2514 * This register exists only on 82576 and newer so if we are older then
2515 * we should exit and do nothing
2516 */
2517 if (hw->mac.type < e1000_82576)
2518 return;
2519
2520 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002521 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2522 if (aupe)
2523 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2524 else
2525 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002526
2527 /* clear all bits that might not be set */
2528 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2529
Alexander Duycka99955f2009-11-12 18:37:19 +00002530 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002531 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2532 /*
2533 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2534 * multicast packets
2535 */
2536 if (vfn <= adapter->vfs_allocated_count)
2537 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2538
2539 wr32(E1000_VMOLR(vfn), vmolr);
2540}
2541
Alexander Duycke1739522009-02-19 20:39:44 -08002542/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002543 * igb_configure_rx_ring - Configure a receive ring after Reset
2544 * @adapter: board private structure
2545 * @ring: receive ring to be configured
2546 *
2547 * Configure the Rx unit of the MAC after a reset.
2548 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002549void igb_configure_rx_ring(struct igb_adapter *adapter,
2550 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002551{
2552 struct e1000_hw *hw = &adapter->hw;
2553 u64 rdba = ring->dma;
2554 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002555 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002556
2557 /* disable the queue */
2558 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2559 wr32(E1000_RXDCTL(reg_idx),
2560 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2561
2562 /* Set DMA base address registers */
2563 wr32(E1000_RDBAL(reg_idx),
2564 rdba & 0x00000000ffffffffULL);
2565 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2566 wr32(E1000_RDLEN(reg_idx),
2567 ring->count * sizeof(union e1000_adv_rx_desc));
2568
2569 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002570 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2571 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2572 writel(0, ring->head);
2573 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002574
Alexander Duyck952f72a2009-10-27 15:51:07 +00002575 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002576 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2577 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002578 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2579#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2580 srrctl |= IGB_RXBUFFER_16384 >>
2581 E1000_SRRCTL_BSIZEPKT_SHIFT;
2582#else
2583 srrctl |= (PAGE_SIZE / 2) >>
2584 E1000_SRRCTL_BSIZEPKT_SHIFT;
2585#endif
2586 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2587 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002588 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002589 E1000_SRRCTL_BSIZEPKT_SHIFT;
2590 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2591 }
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00002592 /* Only set Drop Enable if we are supporting multiple queues */
2593 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2594 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002595
2596 wr32(E1000_SRRCTL(reg_idx), srrctl);
2597
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002598 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002599 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002600
Alexander Duyck85b430b2009-10-27 15:50:29 +00002601 /* enable receive descriptor fetching */
2602 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2603 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2604 rxdctl &= 0xFFF00000;
2605 rxdctl |= IGB_RX_PTHRESH;
2606 rxdctl |= IGB_RX_HTHRESH << 8;
2607 rxdctl |= IGB_RX_WTHRESH << 16;
2608 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2609}
2610
2611/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002612 * igb_configure_rx - Configure receive Unit after Reset
2613 * @adapter: board private structure
2614 *
2615 * Configure the Rx unit of the MAC after a reset.
2616 **/
2617static void igb_configure_rx(struct igb_adapter *adapter)
2618{
Hannes Eder91075842009-02-18 19:36:04 -08002619 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002620
Alexander Duyck68d480c2009-10-05 06:33:08 +00002621 /* set UTA to appropriate mode */
2622 igb_set_uta(adapter);
2623
Alexander Duyck26ad9172009-10-05 06:32:49 +00002624 /* set the correct pool for the PF default MAC address in entry 0 */
2625 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2626 adapter->vfs_allocated_count);
2627
Alexander Duyck06cf2662009-10-27 15:53:25 +00002628 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2629 * the Base and Length of the Rx Descriptor Ring */
2630 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002631 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002632}
2633
2634/**
2635 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002636 * @tx_ring: Tx descriptor ring for a specific queue
2637 *
2638 * Free all transmit software resources
2639 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002640void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002641{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002642 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002643
2644 vfree(tx_ring->buffer_info);
2645 tx_ring->buffer_info = NULL;
2646
Alexander Duyck439705e2009-10-27 23:49:20 +00002647 /* if not set, then don't free */
2648 if (!tx_ring->desc)
2649 return;
2650
Alexander Duyck80785292009-10-27 15:51:47 +00002651 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2652 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002653
2654 tx_ring->desc = NULL;
2655}
2656
2657/**
2658 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2659 * @adapter: board private structure
2660 *
2661 * Free all transmit software resources
2662 **/
2663static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2664{
2665 int i;
2666
2667 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002668 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002669}
2670
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002671void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2672 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002673{
Alexander Duyck6366ad32009-12-02 16:47:18 +00002674 if (buffer_info->dma) {
2675 if (buffer_info->mapped_as_page)
2676 pci_unmap_page(tx_ring->pdev,
2677 buffer_info->dma,
2678 buffer_info->length,
2679 PCI_DMA_TODEVICE);
2680 else
2681 pci_unmap_single(tx_ring->pdev,
2682 buffer_info->dma,
2683 buffer_info->length,
2684 PCI_DMA_TODEVICE);
2685 buffer_info->dma = 0;
2686 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002687 if (buffer_info->skb) {
2688 dev_kfree_skb_any(buffer_info->skb);
2689 buffer_info->skb = NULL;
2690 }
2691 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00002692 buffer_info->length = 0;
2693 buffer_info->next_to_watch = 0;
2694 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002695}
2696
2697/**
2698 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002699 * @tx_ring: ring to be cleaned
2700 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002701static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002702{
2703 struct igb_buffer *buffer_info;
2704 unsigned long size;
2705 unsigned int i;
2706
2707 if (!tx_ring->buffer_info)
2708 return;
2709 /* Free all the Tx ring sk_buffs */
2710
2711 for (i = 0; i < tx_ring->count; i++) {
2712 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002713 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002714 }
2715
2716 size = sizeof(struct igb_buffer) * tx_ring->count;
2717 memset(tx_ring->buffer_info, 0, size);
2718
2719 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 memset(tx_ring->desc, 0, tx_ring->size);
2721
2722 tx_ring->next_to_use = 0;
2723 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002724}
2725
2726/**
2727 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2728 * @adapter: board private structure
2729 **/
2730static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2731{
2732 int i;
2733
2734 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002735 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002736}
2737
2738/**
2739 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002740 * @rx_ring: ring to clean the resources from
2741 *
2742 * Free all receive software resources
2743 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002744void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002745{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002746 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002747
2748 vfree(rx_ring->buffer_info);
2749 rx_ring->buffer_info = NULL;
2750
Alexander Duyck439705e2009-10-27 23:49:20 +00002751 /* if not set, then don't free */
2752 if (!rx_ring->desc)
2753 return;
2754
Alexander Duyck80785292009-10-27 15:51:47 +00002755 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2756 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002757
2758 rx_ring->desc = NULL;
2759}
2760
2761/**
2762 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2763 * @adapter: board private structure
2764 *
2765 * Free all receive software resources
2766 **/
2767static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2768{
2769 int i;
2770
2771 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002772 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002773}
2774
2775/**
2776 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002777 * @rx_ring: ring to free buffers from
2778 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002779static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002780{
2781 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002782 unsigned long size;
2783 unsigned int i;
2784
2785 if (!rx_ring->buffer_info)
2786 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002787
Auke Kok9d5c8242008-01-24 02:22:38 -08002788 /* Free all the Rx ring sk_buffs */
2789 for (i = 0; i < rx_ring->count; i++) {
2790 buffer_info = &rx_ring->buffer_info[i];
2791 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002792 pci_unmap_single(rx_ring->pdev,
2793 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002794 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002795 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002796 buffer_info->dma = 0;
2797 }
2798
2799 if (buffer_info->skb) {
2800 dev_kfree_skb(buffer_info->skb);
2801 buffer_info->skb = NULL;
2802 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002803 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002804 pci_unmap_page(rx_ring->pdev,
2805 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002806 PAGE_SIZE / 2,
2807 PCI_DMA_FROMDEVICE);
2808 buffer_info->page_dma = 0;
2809 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002810 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002811 put_page(buffer_info->page);
2812 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002813 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002814 }
2815 }
2816
Auke Kok9d5c8242008-01-24 02:22:38 -08002817 size = sizeof(struct igb_buffer) * rx_ring->count;
2818 memset(rx_ring->buffer_info, 0, size);
2819
2820 /* Zero out the descriptor ring */
2821 memset(rx_ring->desc, 0, rx_ring->size);
2822
2823 rx_ring->next_to_clean = 0;
2824 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002825}
2826
2827/**
2828 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2829 * @adapter: board private structure
2830 **/
2831static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2832{
2833 int i;
2834
2835 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002836 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002837}
2838
2839/**
2840 * igb_set_mac - Change the Ethernet Address of the NIC
2841 * @netdev: network interface device structure
2842 * @p: pointer to an address structure
2843 *
2844 * Returns 0 on success, negative on failure
2845 **/
2846static int igb_set_mac(struct net_device *netdev, void *p)
2847{
2848 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002849 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002850 struct sockaddr *addr = p;
2851
2852 if (!is_valid_ether_addr(addr->sa_data))
2853 return -EADDRNOTAVAIL;
2854
2855 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002856 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002857
Alexander Duyck26ad9172009-10-05 06:32:49 +00002858 /* set the correct pool for the new PF MAC address in entry 0 */
2859 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2860 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002861
Auke Kok9d5c8242008-01-24 02:22:38 -08002862 return 0;
2863}
2864
2865/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002866 * igb_write_mc_addr_list - write multicast addresses to MTA
2867 * @netdev: network interface device structure
2868 *
2869 * Writes multicast address list to the MTA hash table.
2870 * Returns: -ENOMEM on failure
2871 * 0 on no addresses written
2872 * X on writing X addresses to MTA
2873 **/
2874static int igb_write_mc_addr_list(struct net_device *netdev)
2875{
2876 struct igb_adapter *adapter = netdev_priv(netdev);
2877 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko48e2f182010-02-22 09:22:26 +00002878 struct dev_mc_list *mc_ptr;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002879 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002880 int i;
2881
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002882 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002883 /* nothing to program, so clear mc list */
2884 igb_update_mc_addr_list(hw, NULL, 0);
2885 igb_restore_vf_multicasts(adapter);
2886 return 0;
2887 }
2888
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002889 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002890 if (!mta_list)
2891 return -ENOMEM;
2892
Alexander Duyck68d480c2009-10-05 06:33:08 +00002893 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00002894 i = 0;
2895 netdev_for_each_mc_addr(mc_ptr, netdev)
2896 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002897
Alexander Duyck68d480c2009-10-05 06:33:08 +00002898 igb_update_mc_addr_list(hw, mta_list, i);
2899 kfree(mta_list);
2900
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002901 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002902}
2903
2904/**
2905 * igb_write_uc_addr_list - write unicast addresses to RAR table
2906 * @netdev: network interface device structure
2907 *
2908 * Writes unicast address list to the RAR table.
2909 * Returns: -ENOMEM on failure/insufficient address space
2910 * 0 on no addresses written
2911 * X on writing X addresses to the RAR table
2912 **/
2913static int igb_write_uc_addr_list(struct net_device *netdev)
2914{
2915 struct igb_adapter *adapter = netdev_priv(netdev);
2916 struct e1000_hw *hw = &adapter->hw;
2917 unsigned int vfn = adapter->vfs_allocated_count;
2918 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2919 int count = 0;
2920
2921 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002922 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00002923 return -ENOMEM;
2924
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002925 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002926 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002927
2928 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002929 if (!rar_entries)
2930 break;
2931 igb_rar_set_qsel(adapter, ha->addr,
2932 rar_entries--,
2933 vfn);
2934 count++;
2935 }
2936 }
2937 /* write the addresses in reverse order to avoid write combining */
2938 for (; rar_entries > 0 ; rar_entries--) {
2939 wr32(E1000_RAH(rar_entries), 0);
2940 wr32(E1000_RAL(rar_entries), 0);
2941 }
2942 wrfl();
2943
2944 return count;
2945}
2946
2947/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002948 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002949 * @netdev: network interface device structure
2950 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002951 * The set_rx_mode entry point is called whenever the unicast or multicast
2952 * address lists or the network interface flags are updated. This routine is
2953 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002954 * promiscuous mode, and all-multi behavior.
2955 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002956static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002957{
2958 struct igb_adapter *adapter = netdev_priv(netdev);
2959 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002960 unsigned int vfn = adapter->vfs_allocated_count;
2961 u32 rctl, vmolr = 0;
2962 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002963
2964 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002965 rctl = rd32(E1000_RCTL);
2966
Alexander Duyck68d480c2009-10-05 06:33:08 +00002967 /* clear the effected bits */
2968 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2969
Patrick McHardy746b9f02008-07-16 20:15:45 -07002970 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002971 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002972 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002973 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002974 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002975 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002976 vmolr |= E1000_VMOLR_MPME;
2977 } else {
2978 /*
2979 * Write addresses to the MTA, if the attempt fails
2980 * then we should just turn on promiscous mode so
2981 * that we can at least receive multicast traffic
2982 */
2983 count = igb_write_mc_addr_list(netdev);
2984 if (count < 0) {
2985 rctl |= E1000_RCTL_MPE;
2986 vmolr |= E1000_VMOLR_MPME;
2987 } else if (count) {
2988 vmolr |= E1000_VMOLR_ROMPE;
2989 }
2990 }
2991 /*
2992 * Write addresses to available RAR registers, if there is not
2993 * sufficient space to store all the addresses then enable
2994 * unicast promiscous mode
2995 */
2996 count = igb_write_uc_addr_list(netdev);
2997 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002998 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002999 vmolr |= E1000_VMOLR_ROPE;
3000 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003001 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003002 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003003 wr32(E1000_RCTL, rctl);
3004
Alexander Duyck68d480c2009-10-05 06:33:08 +00003005 /*
3006 * In order to support SR-IOV and eventually VMDq it is necessary to set
3007 * the VMOLR to enable the appropriate modes. Without this workaround
3008 * we will have issues with VLAN tag stripping not being done for frames
3009 * that are only arriving because we are the default pool
3010 */
3011 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003012 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003013
Alexander Duyck68d480c2009-10-05 06:33:08 +00003014 vmolr |= rd32(E1000_VMOLR(vfn)) &
3015 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3016 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003017 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003018}
3019
3020/* Need to wait a few seconds after link up to get diagnostic information from
3021 * the phy */
3022static void igb_update_phy_info(unsigned long data)
3023{
3024 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003025 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003026}
3027
3028/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003029 * igb_has_link - check shared code for link and determine up/down
3030 * @adapter: pointer to driver private info
3031 **/
Nick Nunley31455352010-02-17 01:01:21 +00003032bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003033{
3034 struct e1000_hw *hw = &adapter->hw;
3035 bool link_active = false;
3036 s32 ret_val = 0;
3037
3038 /* get_link_status is set on LSC (link status) interrupt or
3039 * rx sequence error interrupt. get_link_status will stay
3040 * false until the e1000_check_for_link establishes link
3041 * for copper adapters ONLY
3042 */
3043 switch (hw->phy.media_type) {
3044 case e1000_media_type_copper:
3045 if (hw->mac.get_link_status) {
3046 ret_val = hw->mac.ops.check_for_link(hw);
3047 link_active = !hw->mac.get_link_status;
3048 } else {
3049 link_active = true;
3050 }
3051 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003052 case e1000_media_type_internal_serdes:
3053 ret_val = hw->mac.ops.check_for_link(hw);
3054 link_active = hw->mac.serdes_has_link;
3055 break;
3056 default:
3057 case e1000_media_type_unknown:
3058 break;
3059 }
3060
3061 return link_active;
3062}
3063
3064/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003065 * igb_watchdog - Timer Call-back
3066 * @data: pointer to adapter cast into an unsigned long
3067 **/
3068static void igb_watchdog(unsigned long data)
3069{
3070 struct igb_adapter *adapter = (struct igb_adapter *)data;
3071 /* Do the rest outside of interrupt context */
3072 schedule_work(&adapter->watchdog_task);
3073}
3074
3075static void igb_watchdog_task(struct work_struct *work)
3076{
3077 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003078 struct igb_adapter,
3079 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003080 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003081 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003082 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003083 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003084
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003085 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003086 if (link) {
3087 if (!netif_carrier_ok(netdev)) {
3088 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003089 hw->mac.ops.get_speed_and_duplex(hw,
3090 &adapter->link_speed,
3091 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003092
3093 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003094 /* Links status message must follow this format */
3095 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003096 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003097 netdev->name,
3098 adapter->link_speed,
3099 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003100 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003101 ((ctrl & E1000_CTRL_TFCE) &&
3102 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3103 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3104 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003105
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003106 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003107 adapter->tx_timeout_factor = 1;
3108 switch (adapter->link_speed) {
3109 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003110 adapter->tx_timeout_factor = 14;
3111 break;
3112 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003113 /* maybe add some timeout factor ? */
3114 break;
3115 }
3116
3117 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003118
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003119 igb_ping_all_vfs(adapter);
3120
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003121 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003122 if (!test_bit(__IGB_DOWN, &adapter->state))
3123 mod_timer(&adapter->phy_info_timer,
3124 round_jiffies(jiffies + 2 * HZ));
3125 }
3126 } else {
3127 if (netif_carrier_ok(netdev)) {
3128 adapter->link_speed = 0;
3129 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003130 /* Links status message must follow this format */
3131 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3132 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003133 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003134
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003135 igb_ping_all_vfs(adapter);
3136
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003137 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003138 if (!test_bit(__IGB_DOWN, &adapter->state))
3139 mod_timer(&adapter->phy_info_timer,
3140 round_jiffies(jiffies + 2 * HZ));
3141 }
3142 }
3143
Auke Kok9d5c8242008-01-24 02:22:38 -08003144 igb_update_stats(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003145
Alexander Duyckdbabb062009-11-12 18:38:16 +00003146 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003147 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003148 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003149 /* We've lost link, so the controller stops DMA,
3150 * but we've got queued Tx work that's never going
3151 * to get done, so reset controller to flush Tx.
3152 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003153 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3154 adapter->tx_timeout_count++;
3155 schedule_work(&adapter->reset_task);
3156 /* return immediately since reset is imminent */
3157 return;
3158 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003159 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003160
Alexander Duyckdbabb062009-11-12 18:38:16 +00003161 /* Force detection of hung controller every watchdog period */
3162 tx_ring->detect_tx_hung = true;
3163 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003164
Auke Kok9d5c8242008-01-24 02:22:38 -08003165 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003166 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003167 u32 eics = 0;
3168 for (i = 0; i < adapter->num_q_vectors; i++) {
3169 struct igb_q_vector *q_vector = adapter->q_vector[i];
3170 eics |= q_vector->eims_value;
3171 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003172 wr32(E1000_EICS, eics);
3173 } else {
3174 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3175 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003176
Auke Kok9d5c8242008-01-24 02:22:38 -08003177 /* Reset the timer */
3178 if (!test_bit(__IGB_DOWN, &adapter->state))
3179 mod_timer(&adapter->watchdog_timer,
3180 round_jiffies(jiffies + 2 * HZ));
3181}
3182
3183enum latency_range {
3184 lowest_latency = 0,
3185 low_latency = 1,
3186 bulk_latency = 2,
3187 latency_invalid = 255
3188};
3189
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003190/**
3191 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3192 *
3193 * Stores a new ITR value based on strictly on packet size. This
3194 * algorithm is less sophisticated than that used in igb_update_itr,
3195 * due to the difficulty of synchronizing statistics across multiple
3196 * receive rings. The divisors and thresholds used by this fuction
3197 * were determined based on theoretical maximum wire speed and testing
3198 * data, in order to minimize response time while increasing bulk
3199 * throughput.
3200 * This functionality is controlled by the InterruptThrottleRate module
3201 * parameter (see igb_param.c)
3202 * NOTE: This function is called only when operating in a multiqueue
3203 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003204 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003205 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003206static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003207{
Alexander Duyck047e0032009-10-27 15:49:27 +00003208 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003209 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003210 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003211
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003212 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3213 * ints/sec - ITR timer value of 120 ticks.
3214 */
3215 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003216 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003217 goto set_itr_val;
3218 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003219
3220 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3221 struct igb_ring *ring = q_vector->rx_ring;
3222 avg_wire_size = ring->total_bytes / ring->total_packets;
3223 }
3224
3225 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3226 struct igb_ring *ring = q_vector->tx_ring;
3227 avg_wire_size = max_t(u32, avg_wire_size,
3228 (ring->total_bytes /
3229 ring->total_packets));
3230 }
3231
3232 /* if avg_wire_size isn't set no work was done */
3233 if (!avg_wire_size)
3234 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003235
3236 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3237 avg_wire_size += 24;
3238
3239 /* Don't starve jumbo frames */
3240 avg_wire_size = min(avg_wire_size, 3000);
3241
3242 /* Give a little boost to mid-size frames */
3243 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3244 new_val = avg_wire_size / 3;
3245 else
3246 new_val = avg_wire_size / 2;
3247
Nick Nunleyabe1c362010-02-17 01:03:19 +00003248 /* when in itr mode 3 do not exceed 20K ints/sec */
3249 if (adapter->rx_itr_setting == 3 && new_val < 196)
3250 new_val = 196;
3251
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003252set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003253 if (new_val != q_vector->itr_val) {
3254 q_vector->itr_val = new_val;
3255 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003256 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003257clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003258 if (q_vector->rx_ring) {
3259 q_vector->rx_ring->total_bytes = 0;
3260 q_vector->rx_ring->total_packets = 0;
3261 }
3262 if (q_vector->tx_ring) {
3263 q_vector->tx_ring->total_bytes = 0;
3264 q_vector->tx_ring->total_packets = 0;
3265 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003266}
3267
3268/**
3269 * igb_update_itr - update the dynamic ITR value based on statistics
3270 * Stores a new ITR value based on packets and byte
3271 * counts during the last interrupt. The advantage of per interrupt
3272 * computation is faster updates and more accurate ITR for the current
3273 * traffic pattern. Constants in this function were computed
3274 * based on theoretical maximum wire speed and thresholds were set based
3275 * on testing data as well as attempting to minimize response time
3276 * while increasing bulk throughput.
3277 * this functionality is controlled by the InterruptThrottleRate module
3278 * parameter (see igb_param.c)
3279 * NOTE: These calculations are only valid when operating in a single-
3280 * queue environment.
3281 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003282 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003283 * @packets: the number of packets during this measurement interval
3284 * @bytes: the number of bytes during this measurement interval
3285 **/
3286static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3287 int packets, int bytes)
3288{
3289 unsigned int retval = itr_setting;
3290
3291 if (packets == 0)
3292 goto update_itr_done;
3293
3294 switch (itr_setting) {
3295 case lowest_latency:
3296 /* handle TSO and jumbo frames */
3297 if (bytes/packets > 8000)
3298 retval = bulk_latency;
3299 else if ((packets < 5) && (bytes > 512))
3300 retval = low_latency;
3301 break;
3302 case low_latency: /* 50 usec aka 20000 ints/s */
3303 if (bytes > 10000) {
3304 /* this if handles the TSO accounting */
3305 if (bytes/packets > 8000) {
3306 retval = bulk_latency;
3307 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3308 retval = bulk_latency;
3309 } else if ((packets > 35)) {
3310 retval = lowest_latency;
3311 }
3312 } else if (bytes/packets > 2000) {
3313 retval = bulk_latency;
3314 } else if (packets <= 2 && bytes < 512) {
3315 retval = lowest_latency;
3316 }
3317 break;
3318 case bulk_latency: /* 250 usec aka 4000 ints/s */
3319 if (bytes > 25000) {
3320 if (packets > 35)
3321 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003322 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003323 retval = low_latency;
3324 }
3325 break;
3326 }
3327
3328update_itr_done:
3329 return retval;
3330}
3331
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003332static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003333{
Alexander Duyck047e0032009-10-27 15:49:27 +00003334 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003335 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003336 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003337
3338 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3339 if (adapter->link_speed != SPEED_1000) {
3340 current_itr = 0;
3341 new_itr = 4000;
3342 goto set_itr_now;
3343 }
3344
3345 adapter->rx_itr = igb_update_itr(adapter,
3346 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003347 q_vector->rx_ring->total_packets,
3348 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003349
Alexander Duyck047e0032009-10-27 15:49:27 +00003350 adapter->tx_itr = igb_update_itr(adapter,
3351 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003352 q_vector->tx_ring->total_packets,
3353 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003354 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003355
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003356 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003357 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003358 current_itr = low_latency;
3359
Auke Kok9d5c8242008-01-24 02:22:38 -08003360 switch (current_itr) {
3361 /* counts and packets in update_itr are dependent on these numbers */
3362 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003363 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003364 break;
3365 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003366 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003367 break;
3368 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003369 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003370 break;
3371 default:
3372 break;
3373 }
3374
3375set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003376 q_vector->rx_ring->total_bytes = 0;
3377 q_vector->rx_ring->total_packets = 0;
3378 q_vector->tx_ring->total_bytes = 0;
3379 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003380
Alexander Duyck047e0032009-10-27 15:49:27 +00003381 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003382 /* this attempts to bias the interrupt rate towards Bulk
3383 * by adding intermediate steps when interrupt rate is
3384 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003385 new_itr = new_itr > q_vector->itr_val ?
3386 max((new_itr * q_vector->itr_val) /
3387 (new_itr + (q_vector->itr_val >> 2)),
3388 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003389 new_itr;
3390 /* Don't write the value here; it resets the adapter's
3391 * internal timer, and causes us to delay far longer than
3392 * we should between interrupts. Instead, we write the ITR
3393 * value at the beginning of the next interrupt so the timing
3394 * ends up being correct.
3395 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003396 q_vector->itr_val = new_itr;
3397 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003398 }
3399
3400 return;
3401}
3402
Auke Kok9d5c8242008-01-24 02:22:38 -08003403#define IGB_TX_FLAGS_CSUM 0x00000001
3404#define IGB_TX_FLAGS_VLAN 0x00000002
3405#define IGB_TX_FLAGS_TSO 0x00000004
3406#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003407#define IGB_TX_FLAGS_TSTAMP 0x00000010
3408#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3409#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003410
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003411static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003412 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3413{
3414 struct e1000_adv_tx_context_desc *context_desc;
3415 unsigned int i;
3416 int err;
3417 struct igb_buffer *buffer_info;
3418 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003419 u32 mss_l4len_idx;
3420 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003421
3422 if (skb_header_cloned(skb)) {
3423 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3424 if (err)
3425 return err;
3426 }
3427
3428 l4len = tcp_hdrlen(skb);
3429 *hdr_len += l4len;
3430
3431 if (skb->protocol == htons(ETH_P_IP)) {
3432 struct iphdr *iph = ip_hdr(skb);
3433 iph->tot_len = 0;
3434 iph->check = 0;
3435 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3436 iph->daddr, 0,
3437 IPPROTO_TCP,
3438 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003439 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003440 ipv6_hdr(skb)->payload_len = 0;
3441 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3442 &ipv6_hdr(skb)->daddr,
3443 0, IPPROTO_TCP, 0);
3444 }
3445
3446 i = tx_ring->next_to_use;
3447
3448 buffer_info = &tx_ring->buffer_info[i];
3449 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3450 /* VLAN MACLEN IPLEN */
3451 if (tx_flags & IGB_TX_FLAGS_VLAN)
3452 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3453 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3454 *hdr_len += skb_network_offset(skb);
3455 info |= skb_network_header_len(skb);
3456 *hdr_len += skb_network_header_len(skb);
3457 context_desc->vlan_macip_lens = cpu_to_le32(info);
3458
3459 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3460 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3461
3462 if (skb->protocol == htons(ETH_P_IP))
3463 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3464 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3465
3466 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3467
3468 /* MSS L4LEN IDX */
3469 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3470 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3471
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003472 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003473 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3474 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003475
3476 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3477 context_desc->seqnum_seed = 0;
3478
3479 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003480 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003481 buffer_info->dma = 0;
3482 i++;
3483 if (i == tx_ring->count)
3484 i = 0;
3485
3486 tx_ring->next_to_use = i;
3487
3488 return true;
3489}
3490
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003491static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3492 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003493{
3494 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003495 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003496 struct igb_buffer *buffer_info;
3497 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003498 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003499
3500 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3501 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3502 i = tx_ring->next_to_use;
3503 buffer_info = &tx_ring->buffer_info[i];
3504 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3505
3506 if (tx_flags & IGB_TX_FLAGS_VLAN)
3507 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003508
Auke Kok9d5c8242008-01-24 02:22:38 -08003509 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3510 if (skb->ip_summed == CHECKSUM_PARTIAL)
3511 info |= skb_network_header_len(skb);
3512
3513 context_desc->vlan_macip_lens = cpu_to_le32(info);
3514
3515 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3516
3517 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003518 __be16 protocol;
3519
3520 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3521 const struct vlan_ethhdr *vhdr =
3522 (const struct vlan_ethhdr*)skb->data;
3523
3524 protocol = vhdr->h_vlan_encapsulated_proto;
3525 } else {
3526 protocol = skb->protocol;
3527 }
3528
3529 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003530 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003531 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003532 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3533 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003534 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3535 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003536 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003537 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003538 /* XXX what about other V6 headers?? */
3539 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3540 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003541 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3542 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003543 break;
3544 default:
3545 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003546 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003547 "partial checksum but proto=%x!\n",
3548 skb->protocol);
3549 break;
3550 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003551 }
3552
3553 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3554 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003555 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003556 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003557 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003558
3559 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003560 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003561 buffer_info->dma = 0;
3562
3563 i++;
3564 if (i == tx_ring->count)
3565 i = 0;
3566 tx_ring->next_to_use = i;
3567
3568 return true;
3569 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003570 return false;
3571}
3572
3573#define IGB_MAX_TXD_PWR 16
3574#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3575
Alexander Duyck80785292009-10-27 15:51:47 +00003576static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003577 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003578{
3579 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003580 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003581 unsigned int len = skb_headlen(skb);
3582 unsigned int count = 0, i;
3583 unsigned int f;
3584
3585 i = tx_ring->next_to_use;
3586
3587 buffer_info = &tx_ring->buffer_info[i];
3588 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3589 buffer_info->length = len;
3590 /* set time_stamp *before* dma to help avoid a possible race */
3591 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003592 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003593 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3594 PCI_DMA_TODEVICE);
3595 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3596 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08003597
3598 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3599 struct skb_frag_struct *frag;
3600
Alexander Duyck85811452010-01-23 01:35:00 -08003601 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003602 i++;
3603 if (i == tx_ring->count)
3604 i = 0;
3605
Auke Kok9d5c8242008-01-24 02:22:38 -08003606 frag = &skb_shinfo(skb)->frags[f];
3607 len = frag->size;
3608
3609 buffer_info = &tx_ring->buffer_info[i];
3610 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3611 buffer_info->length = len;
3612 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003613 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003614 buffer_info->mapped_as_page = true;
3615 buffer_info->dma = pci_map_page(pdev,
3616 frag->page,
3617 frag->page_offset,
3618 len,
3619 PCI_DMA_TODEVICE);
3620 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3621 goto dma_error;
3622
Auke Kok9d5c8242008-01-24 02:22:38 -08003623 }
3624
Auke Kok9d5c8242008-01-24 02:22:38 -08003625 tx_ring->buffer_info[i].skb = skb;
Nick Nunley40e90c22010-02-17 01:04:37 +00003626 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003627 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003628
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003629 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003630
3631dma_error:
3632 dev_err(&pdev->dev, "TX DMA map failed\n");
3633
3634 /* clear timestamp and dma mappings for failed buffer_info mapping */
3635 buffer_info->dma = 0;
3636 buffer_info->time_stamp = 0;
3637 buffer_info->length = 0;
3638 buffer_info->next_to_watch = 0;
3639 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003640
3641 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00003642 while (count--) {
3643 if (i == 0)
3644 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003645 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003646 buffer_info = &tx_ring->buffer_info[i];
3647 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3648 }
3649
3650 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003651}
3652
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003653static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Nick Nunley91d4ee32010-02-17 01:04:56 +00003654 u32 tx_flags, int count, u32 paylen,
Auke Kok9d5c8242008-01-24 02:22:38 -08003655 u8 hdr_len)
3656{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003657 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003658 struct igb_buffer *buffer_info;
3659 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003660 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08003661
3662 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3663 E1000_ADVTXD_DCMD_DEXT);
3664
3665 if (tx_flags & IGB_TX_FLAGS_VLAN)
3666 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3667
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003668 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3669 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3670
Auke Kok9d5c8242008-01-24 02:22:38 -08003671 if (tx_flags & IGB_TX_FLAGS_TSO) {
3672 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3673
3674 /* insert tcp checksum */
3675 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3676
3677 /* insert ip checksum */
3678 if (tx_flags & IGB_TX_FLAGS_IPV4)
3679 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3680
3681 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3682 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3683 }
3684
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003685 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3686 (tx_flags & (IGB_TX_FLAGS_CSUM |
3687 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003688 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003689 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003690
3691 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3692
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003693 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08003694 buffer_info = &tx_ring->buffer_info[i];
3695 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3696 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3697 tx_desc->read.cmd_type_len =
3698 cpu_to_le32(cmd_type_len | buffer_info->length);
3699 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003700 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08003701 i++;
3702 if (i == tx_ring->count)
3703 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003704 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003705
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003706 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003707 /* Force memory writes to complete before letting h/w
3708 * know there are new descriptors to fetch. (Only
3709 * applicable for weak-ordered memory model archs,
3710 * such as IA-64). */
3711 wmb();
3712
3713 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003714 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 /* we need this if more than one processor can write to our tail
3716 * at a time, it syncronizes IO on IA64/Altix systems */
3717 mmiowb();
3718}
3719
Alexander Duycke694e962009-10-27 15:53:06 +00003720static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003721{
Alexander Duycke694e962009-10-27 15:53:06 +00003722 struct net_device *netdev = tx_ring->netdev;
3723
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003724 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003725
Auke Kok9d5c8242008-01-24 02:22:38 -08003726 /* Herbert's original patch had:
3727 * smp_mb__after_netif_stop_queue();
3728 * but since that doesn't exist yet, just open code it. */
3729 smp_mb();
3730
3731 /* We need to check again in a case another CPU has just
3732 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003733 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003734 return -EBUSY;
3735
3736 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003737 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003738 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003739 return 0;
3740}
3741
Nick Nunley717ba082010-02-17 01:04:18 +00003742static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003743{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003744 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003745 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003746 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003747}
3748
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003749netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3750 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003751{
Alexander Duycke694e962009-10-27 15:53:06 +00003752 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003753 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003754 u32 tx_flags = 0;
3755 u16 first;
3756 u8 hdr_len = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003757 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003758
Auke Kok9d5c8242008-01-24 02:22:38 -08003759 /* need: 1 descriptor per page,
3760 * + 2 desc gap to keep tail from touching head,
3761 * + 1 desc for skb->data,
3762 * + 1 desc for context descriptor,
3763 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003764 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003765 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003766 return NETDEV_TX_BUSY;
3767 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003768
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003769 if (unlikely(shtx->hardware)) {
3770 shtx->in_progress = 1;
3771 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003772 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003773
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003774 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003775 tx_flags |= IGB_TX_FLAGS_VLAN;
3776 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3777 }
3778
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003779 if (skb->protocol == htons(ETH_P_IP))
3780 tx_flags |= IGB_TX_FLAGS_IPV4;
3781
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003782 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003783 if (skb_is_gso(skb)) {
3784 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003785
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003786 if (tso < 0) {
3787 dev_kfree_skb_any(skb);
3788 return NETDEV_TX_OK;
3789 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003790 }
3791
3792 if (tso)
3793 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003794 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003795 (skb->ip_summed == CHECKSUM_PARTIAL))
3796 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003797
Alexander Duyck65689fe2009-03-20 00:17:43 +00003798 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003799 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00003800 * has occured and we need to rewind the descriptor queue
3801 */
Alexander Duyck80785292009-10-27 15:51:47 +00003802 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003803 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003804 dev_kfree_skb_any(skb);
3805 tx_ring->buffer_info[first].time_stamp = 0;
3806 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003807 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003808 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003809
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003810 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3811
3812 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003813 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003814
Auke Kok9d5c8242008-01-24 02:22:38 -08003815 return NETDEV_TX_OK;
3816}
3817
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003818static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3819 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003820{
3821 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003822 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003823 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003824
3825 if (test_bit(__IGB_DOWN, &adapter->state)) {
3826 dev_kfree_skb_any(skb);
3827 return NETDEV_TX_OK;
3828 }
3829
3830 if (skb->len <= 0) {
3831 dev_kfree_skb_any(skb);
3832 return NETDEV_TX_OK;
3833 }
3834
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003835 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003836 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003837
3838 /* This goes back to the question of how to logically map a tx queue
3839 * to a flow. Right now, performance is impacted slightly negatively
3840 * if using multiple tx queues. If the stack breaks away from a
3841 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003842 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003843}
3844
3845/**
3846 * igb_tx_timeout - Respond to a Tx Hang
3847 * @netdev: network interface device structure
3848 **/
3849static void igb_tx_timeout(struct net_device *netdev)
3850{
3851 struct igb_adapter *adapter = netdev_priv(netdev);
3852 struct e1000_hw *hw = &adapter->hw;
3853
3854 /* Do the reset outside of interrupt context */
3855 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003856
Alexander Duyck55cac242009-11-19 12:42:21 +00003857 if (hw->mac.type == e1000_82580)
3858 hw->dev_spec._82575.global_device_reset = true;
3859
Auke Kok9d5c8242008-01-24 02:22:38 -08003860 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003861 wr32(E1000_EICS,
3862 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003863}
3864
3865static void igb_reset_task(struct work_struct *work)
3866{
3867 struct igb_adapter *adapter;
3868 adapter = container_of(work, struct igb_adapter, reset_task);
3869
3870 igb_reinit_locked(adapter);
3871}
3872
3873/**
3874 * igb_get_stats - Get System Network Statistics
3875 * @netdev: network interface device structure
3876 *
3877 * Returns the address of the device statistics structure.
3878 * The statistics are actually updated from the timer callback.
3879 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003880static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003881{
Auke Kok9d5c8242008-01-24 02:22:38 -08003882 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003883 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003884}
3885
3886/**
3887 * igb_change_mtu - Change the Maximum Transfer Unit
3888 * @netdev: network interface device structure
3889 * @new_mtu: new value for maximum frame size
3890 *
3891 * Returns 0 on success, negative on failure
3892 **/
3893static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3894{
3895 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00003896 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003898 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003899
Alexander Duyckc809d222009-10-27 23:52:13 +00003900 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003901 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003902 return -EINVAL;
3903 }
3904
Auke Kok9d5c8242008-01-24 02:22:38 -08003905 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003906 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003907 return -EINVAL;
3908 }
3909
3910 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3911 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003912
Auke Kok9d5c8242008-01-24 02:22:38 -08003913 /* igb_down has a dependency on max_frame_size */
3914 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00003915
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3917 * means we reserve 2 more, this pushes us to allocate from the next
3918 * larger slab size.
3919 * i.e. RXBUFFER_2048 --> size-4096 slab
3920 */
3921
Alexander Duyck7d95b712009-10-27 15:50:08 +00003922 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003923 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003924 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003925 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003926 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003927 rx_buffer_len = IGB_RXBUFFER_128;
3928
3929 if (netif_running(netdev))
3930 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003931
Alexander Duyck090b1792009-10-27 23:51:55 +00003932 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 netdev->mtu, new_mtu);
3934 netdev->mtu = new_mtu;
3935
Alexander Duyck4c844852009-10-27 15:52:07 +00003936 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003937 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00003938
Auke Kok9d5c8242008-01-24 02:22:38 -08003939 if (netif_running(netdev))
3940 igb_up(adapter);
3941 else
3942 igb_reset(adapter);
3943
3944 clear_bit(__IGB_RESETTING, &adapter->state);
3945
3946 return 0;
3947}
3948
3949/**
3950 * igb_update_stats - Update the board statistics counters
3951 * @adapter: board private structure
3952 **/
3953
3954void igb_update_stats(struct igb_adapter *adapter)
3955{
Alexander Duyck128e45e2009-11-12 18:37:38 +00003956 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003957 struct e1000_hw *hw = &adapter->hw;
3958 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00003959 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003960 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003961 int i;
3962 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003963
3964#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3965
3966 /*
3967 * Prevent stats update while adapter is being reset, or if the pci
3968 * connection is down.
3969 */
3970 if (adapter->link_speed == 0)
3971 return;
3972 if (pci_channel_offline(pdev))
3973 return;
3974
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003975 bytes = 0;
3976 packets = 0;
3977 for (i = 0; i < adapter->num_rx_queues; i++) {
3978 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00003979 struct igb_ring *ring = adapter->rx_ring[i];
3980 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00003981 net_stats->rx_fifo_errors += rqdpc_tmp;
Alexander Duyck3025a442010-02-17 01:02:39 +00003982 bytes += ring->rx_stats.bytes;
3983 packets += ring->rx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003984 }
3985
Alexander Duyck128e45e2009-11-12 18:37:38 +00003986 net_stats->rx_bytes = bytes;
3987 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003988
3989 bytes = 0;
3990 packets = 0;
3991 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003992 struct igb_ring *ring = adapter->tx_ring[i];
3993 bytes += ring->tx_stats.bytes;
3994 packets += ring->tx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003995 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00003996 net_stats->tx_bytes = bytes;
3997 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003998
3999 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004000 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4001 adapter->stats.gprc += rd32(E1000_GPRC);
4002 adapter->stats.gorc += rd32(E1000_GORCL);
4003 rd32(E1000_GORCH); /* clear GORCL */
4004 adapter->stats.bprc += rd32(E1000_BPRC);
4005 adapter->stats.mprc += rd32(E1000_MPRC);
4006 adapter->stats.roc += rd32(E1000_ROC);
4007
4008 adapter->stats.prc64 += rd32(E1000_PRC64);
4009 adapter->stats.prc127 += rd32(E1000_PRC127);
4010 adapter->stats.prc255 += rd32(E1000_PRC255);
4011 adapter->stats.prc511 += rd32(E1000_PRC511);
4012 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4013 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4014 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4015 adapter->stats.sec += rd32(E1000_SEC);
4016
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004017 mpc = rd32(E1000_MPC);
4018 adapter->stats.mpc += mpc;
4019 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004020 adapter->stats.scc += rd32(E1000_SCC);
4021 adapter->stats.ecol += rd32(E1000_ECOL);
4022 adapter->stats.mcc += rd32(E1000_MCC);
4023 adapter->stats.latecol += rd32(E1000_LATECOL);
4024 adapter->stats.dc += rd32(E1000_DC);
4025 adapter->stats.rlec += rd32(E1000_RLEC);
4026 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4027 adapter->stats.xontxc += rd32(E1000_XONTXC);
4028 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4029 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4030 adapter->stats.fcruc += rd32(E1000_FCRUC);
4031 adapter->stats.gptc += rd32(E1000_GPTC);
4032 adapter->stats.gotc += rd32(E1000_GOTCL);
4033 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004034 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004035 adapter->stats.ruc += rd32(E1000_RUC);
4036 adapter->stats.rfc += rd32(E1000_RFC);
4037 adapter->stats.rjc += rd32(E1000_RJC);
4038 adapter->stats.tor += rd32(E1000_TORH);
4039 adapter->stats.tot += rd32(E1000_TOTH);
4040 adapter->stats.tpr += rd32(E1000_TPR);
4041
4042 adapter->stats.ptc64 += rd32(E1000_PTC64);
4043 adapter->stats.ptc127 += rd32(E1000_PTC127);
4044 adapter->stats.ptc255 += rd32(E1000_PTC255);
4045 adapter->stats.ptc511 += rd32(E1000_PTC511);
4046 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4047 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4048
4049 adapter->stats.mptc += rd32(E1000_MPTC);
4050 adapter->stats.bptc += rd32(E1000_BPTC);
4051
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004052 adapter->stats.tpt += rd32(E1000_TPT);
4053 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004054
4055 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004056 /* read internal phy specific stats */
4057 reg = rd32(E1000_CTRL_EXT);
4058 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4059 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4060 adapter->stats.tncrs += rd32(E1000_TNCRS);
4061 }
4062
Auke Kok9d5c8242008-01-24 02:22:38 -08004063 adapter->stats.tsctc += rd32(E1000_TSCTC);
4064 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4065
4066 adapter->stats.iac += rd32(E1000_IAC);
4067 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4068 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4069 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4070 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4071 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4072 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4073 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4074 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4075
4076 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004077 net_stats->multicast = adapter->stats.mprc;
4078 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004079
4080 /* Rx Errors */
4081
4082 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004083 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004084 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004085 adapter->stats.crcerrs + adapter->stats.algnerrc +
4086 adapter->stats.ruc + adapter->stats.roc +
4087 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004088 net_stats->rx_length_errors = adapter->stats.ruc +
4089 adapter->stats.roc;
4090 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4091 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4092 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004093
4094 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004095 net_stats->tx_errors = adapter->stats.ecol +
4096 adapter->stats.latecol;
4097 net_stats->tx_aborted_errors = adapter->stats.ecol;
4098 net_stats->tx_window_errors = adapter->stats.latecol;
4099 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004100
4101 /* Tx Dropped needs to be maintained elsewhere */
4102
4103 /* Phy Stats */
4104 if (hw->phy.media_type == e1000_media_type_copper) {
4105 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004106 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004107 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4108 adapter->phy_stats.idle_errors += phy_tmp;
4109 }
4110 }
4111
4112 /* Management Stats */
4113 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4114 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4115 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4116}
4117
Auke Kok9d5c8242008-01-24 02:22:38 -08004118static irqreturn_t igb_msix_other(int irq, void *data)
4119{
Alexander Duyck047e0032009-10-27 15:49:27 +00004120 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004121 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004122 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004123 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004124
Alexander Duyck7f081d42010-01-07 17:41:00 +00004125 if (icr & E1000_ICR_DRSTA)
4126 schedule_work(&adapter->reset_task);
4127
Alexander Duyck047e0032009-10-27 15:49:27 +00004128 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004129 /* HW is reporting DMA is out of sync */
4130 adapter->stats.doosync++;
4131 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004132
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004133 /* Check for a mailbox event */
4134 if (icr & E1000_ICR_VMMB)
4135 igb_msg_task(adapter);
4136
4137 if (icr & E1000_ICR_LSC) {
4138 hw->mac.get_link_status = 1;
4139 /* guard against interrupt when we're going down */
4140 if (!test_bit(__IGB_DOWN, &adapter->state))
4141 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4142 }
4143
Alexander Duyck25568a52009-10-27 23:49:59 +00004144 if (adapter->vfs_allocated_count)
4145 wr32(E1000_IMS, E1000_IMS_LSC |
4146 E1000_IMS_VMMB |
4147 E1000_IMS_DOUTSYNC);
4148 else
4149 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004150 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004151
4152 return IRQ_HANDLED;
4153}
4154
Alexander Duyck047e0032009-10-27 15:49:27 +00004155static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004156{
Alexander Duyck26b39272010-02-17 01:00:41 +00004157 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004158 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004159
Alexander Duyck047e0032009-10-27 15:49:27 +00004160 if (!q_vector->set_itr)
4161 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004162
Alexander Duyck047e0032009-10-27 15:49:27 +00004163 if (!itr_val)
4164 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004165
Alexander Duyck26b39272010-02-17 01:00:41 +00004166 if (adapter->hw.mac.type == e1000_82575)
4167 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004168 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004169 itr_val |= 0x8000000;
4170
4171 writel(itr_val, q_vector->itr_register);
4172 q_vector->set_itr = 0;
4173}
4174
4175static irqreturn_t igb_msix_ring(int irq, void *data)
4176{
4177 struct igb_q_vector *q_vector = data;
4178
4179 /* Write the ITR value calculated from the previous interrupt. */
4180 igb_write_itr(q_vector);
4181
4182 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004183
Auke Kok9d5c8242008-01-24 02:22:38 -08004184 return IRQ_HANDLED;
4185}
4186
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004187#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004188static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004189{
Alexander Duyck047e0032009-10-27 15:49:27 +00004190 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004191 struct e1000_hw *hw = &adapter->hw;
4192 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004193
Alexander Duyck047e0032009-10-27 15:49:27 +00004194 if (q_vector->cpu == cpu)
4195 goto out_no_update;
4196
4197 if (q_vector->tx_ring) {
4198 int q = q_vector->tx_ring->reg_idx;
4199 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4200 if (hw->mac.type == e1000_82575) {
4201 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4202 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4203 } else {
4204 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4205 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4206 E1000_DCA_TXCTRL_CPUID_SHIFT;
4207 }
4208 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4209 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4210 }
4211 if (q_vector->rx_ring) {
4212 int q = q_vector->rx_ring->reg_idx;
4213 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4214 if (hw->mac.type == e1000_82575) {
4215 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4216 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4217 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004218 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004219 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004220 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004221 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004222 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4223 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4224 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4225 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004226 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004227 q_vector->cpu = cpu;
4228out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004229 put_cpu();
4230}
4231
4232static void igb_setup_dca(struct igb_adapter *adapter)
4233{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004234 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004235 int i;
4236
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004237 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004238 return;
4239
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004240 /* Always use CB2 mode, difference is masked in the CB driver. */
4241 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4242
Alexander Duyck047e0032009-10-27 15:49:27 +00004243 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004244 adapter->q_vector[i]->cpu = -1;
4245 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004246 }
4247}
4248
4249static int __igb_notify_dca(struct device *dev, void *data)
4250{
4251 struct net_device *netdev = dev_get_drvdata(dev);
4252 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004253 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004254 struct e1000_hw *hw = &adapter->hw;
4255 unsigned long event = *(unsigned long *)data;
4256
4257 switch (event) {
4258 case DCA_PROVIDER_ADD:
4259 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004260 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004261 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004262 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004263 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004264 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004265 igb_setup_dca(adapter);
4266 break;
4267 }
4268 /* Fall Through since DCA is disabled. */
4269 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004270 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004271 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004272 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004273 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004274 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004275 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004276 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004277 }
4278 break;
4279 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004280
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004281 return 0;
4282}
4283
4284static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4285 void *p)
4286{
4287 int ret_val;
4288
4289 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4290 __igb_notify_dca);
4291
4292 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4293}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004294#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004295
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004296static void igb_ping_all_vfs(struct igb_adapter *adapter)
4297{
4298 struct e1000_hw *hw = &adapter->hw;
4299 u32 ping;
4300 int i;
4301
4302 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4303 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004304 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004305 ping |= E1000_VT_MSGTYPE_CTS;
4306 igb_write_mbx(hw, &ping, 1, i);
4307 }
4308}
4309
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004310static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4311{
4312 struct e1000_hw *hw = &adapter->hw;
4313 u32 vmolr = rd32(E1000_VMOLR(vf));
4314 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4315
4316 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4317 IGB_VF_FLAG_MULTI_PROMISC);
4318 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4319
4320 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4321 vmolr |= E1000_VMOLR_MPME;
4322 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4323 } else {
4324 /*
4325 * if we have hashes and we are clearing a multicast promisc
4326 * flag we need to write the hashes to the MTA as this step
4327 * was previously skipped
4328 */
4329 if (vf_data->num_vf_mc_hashes > 30) {
4330 vmolr |= E1000_VMOLR_MPME;
4331 } else if (vf_data->num_vf_mc_hashes) {
4332 int j;
4333 vmolr |= E1000_VMOLR_ROMPE;
4334 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4335 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4336 }
4337 }
4338
4339 wr32(E1000_VMOLR(vf), vmolr);
4340
4341 /* there are flags left unprocessed, likely not supported */
4342 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4343 return -EINVAL;
4344
4345 return 0;
4346
4347}
4348
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004349static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4350 u32 *msgbuf, u32 vf)
4351{
4352 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4353 u16 *hash_list = (u16 *)&msgbuf[1];
4354 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4355 int i;
4356
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004357 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004358 * to this VF for later use to restore when the PF multi cast
4359 * list changes
4360 */
4361 vf_data->num_vf_mc_hashes = n;
4362
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004363 /* only up to 30 hash values supported */
4364 if (n > 30)
4365 n = 30;
4366
4367 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004368 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004369 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004370
4371 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004372 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004373
4374 return 0;
4375}
4376
4377static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4378{
4379 struct e1000_hw *hw = &adapter->hw;
4380 struct vf_data_storage *vf_data;
4381 int i, j;
4382
4383 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004384 u32 vmolr = rd32(E1000_VMOLR(i));
4385 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4386
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004387 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004388
4389 if ((vf_data->num_vf_mc_hashes > 30) ||
4390 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4391 vmolr |= E1000_VMOLR_MPME;
4392 } else if (vf_data->num_vf_mc_hashes) {
4393 vmolr |= E1000_VMOLR_ROMPE;
4394 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4395 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4396 }
4397 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004398 }
4399}
4400
4401static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4402{
4403 struct e1000_hw *hw = &adapter->hw;
4404 u32 pool_mask, reg, vid;
4405 int i;
4406
4407 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4408
4409 /* Find the vlan filter for this id */
4410 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4411 reg = rd32(E1000_VLVF(i));
4412
4413 /* remove the vf from the pool */
4414 reg &= ~pool_mask;
4415
4416 /* if pool is empty then remove entry from vfta */
4417 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4418 (reg & E1000_VLVF_VLANID_ENABLE)) {
4419 reg = 0;
4420 vid = reg & E1000_VLVF_VLANID_MASK;
4421 igb_vfta_set(hw, vid, false);
4422 }
4423
4424 wr32(E1000_VLVF(i), reg);
4425 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004426
4427 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004428}
4429
4430static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4431{
4432 struct e1000_hw *hw = &adapter->hw;
4433 u32 reg, i;
4434
Alexander Duyck51466232009-10-27 23:47:35 +00004435 /* The vlvf table only exists on 82576 hardware and newer */
4436 if (hw->mac.type < e1000_82576)
4437 return -1;
4438
4439 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004440 if (!adapter->vfs_allocated_count)
4441 return -1;
4442
4443 /* Find the vlan filter for this id */
4444 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4445 reg = rd32(E1000_VLVF(i));
4446 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4447 vid == (reg & E1000_VLVF_VLANID_MASK))
4448 break;
4449 }
4450
4451 if (add) {
4452 if (i == E1000_VLVF_ARRAY_SIZE) {
4453 /* Did not find a matching VLAN ID entry that was
4454 * enabled. Search for a free filter entry, i.e.
4455 * one without the enable bit set
4456 */
4457 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4458 reg = rd32(E1000_VLVF(i));
4459 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4460 break;
4461 }
4462 }
4463 if (i < E1000_VLVF_ARRAY_SIZE) {
4464 /* Found an enabled/available entry */
4465 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4466
4467 /* if !enabled we need to set this up in vfta */
4468 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004469 /* add VID to filter table */
4470 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004471 reg |= E1000_VLVF_VLANID_ENABLE;
4472 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004473 reg &= ~E1000_VLVF_VLANID_MASK;
4474 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004475 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004476
4477 /* do not modify RLPML for PF devices */
4478 if (vf >= adapter->vfs_allocated_count)
4479 return 0;
4480
4481 if (!adapter->vf_data[vf].vlans_enabled) {
4482 u32 size;
4483 reg = rd32(E1000_VMOLR(vf));
4484 size = reg & E1000_VMOLR_RLPML_MASK;
4485 size += 4;
4486 reg &= ~E1000_VMOLR_RLPML_MASK;
4487 reg |= size;
4488 wr32(E1000_VMOLR(vf), reg);
4489 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004490
Alexander Duyck51466232009-10-27 23:47:35 +00004491 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004492 return 0;
4493 }
4494 } else {
4495 if (i < E1000_VLVF_ARRAY_SIZE) {
4496 /* remove vf from the pool */
4497 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4498 /* if pool is empty then remove entry from vfta */
4499 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4500 reg = 0;
4501 igb_vfta_set(hw, vid, false);
4502 }
4503 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004504
4505 /* do not modify RLPML for PF devices */
4506 if (vf >= adapter->vfs_allocated_count)
4507 return 0;
4508
4509 adapter->vf_data[vf].vlans_enabled--;
4510 if (!adapter->vf_data[vf].vlans_enabled) {
4511 u32 size;
4512 reg = rd32(E1000_VMOLR(vf));
4513 size = reg & E1000_VMOLR_RLPML_MASK;
4514 size -= 4;
4515 reg &= ~E1000_VMOLR_RLPML_MASK;
4516 reg |= size;
4517 wr32(E1000_VMOLR(vf), reg);
4518 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004519 }
4520 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004521 return 0;
4522}
4523
4524static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4525{
4526 struct e1000_hw *hw = &adapter->hw;
4527
4528 if (vid)
4529 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4530 else
4531 wr32(E1000_VMVIR(vf), 0);
4532}
4533
4534static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4535 int vf, u16 vlan, u8 qos)
4536{
4537 int err = 0;
4538 struct igb_adapter *adapter = netdev_priv(netdev);
4539
4540 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4541 return -EINVAL;
4542 if (vlan || qos) {
4543 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4544 if (err)
4545 goto out;
4546 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4547 igb_set_vmolr(adapter, vf, !vlan);
4548 adapter->vf_data[vf].pf_vlan = vlan;
4549 adapter->vf_data[vf].pf_qos = qos;
4550 dev_info(&adapter->pdev->dev,
4551 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4552 if (test_bit(__IGB_DOWN, &adapter->state)) {
4553 dev_warn(&adapter->pdev->dev,
4554 "The VF VLAN has been set,"
4555 " but the PF device is not up.\n");
4556 dev_warn(&adapter->pdev->dev,
4557 "Bring the PF device up before"
4558 " attempting to use the VF device.\n");
4559 }
4560 } else {
4561 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4562 false, vf);
4563 igb_set_vmvir(adapter, vlan, vf);
4564 igb_set_vmolr(adapter, vf, true);
4565 adapter->vf_data[vf].pf_vlan = 0;
4566 adapter->vf_data[vf].pf_qos = 0;
4567 }
4568out:
4569 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004570}
4571
4572static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4573{
4574 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4575 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4576
4577 return igb_vlvf_set(adapter, vid, add, vf);
4578}
4579
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004580static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004581{
Williams, Mitch A8151d292010-02-10 01:44:24 +00004582 /* clear flags */
4583 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004584 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004585
4586 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004587 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004588
4589 /* reset vlans for device */
4590 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00004591 if (adapter->vf_data[vf].pf_vlan)
4592 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4593 adapter->vf_data[vf].pf_vlan,
4594 adapter->vf_data[vf].pf_qos);
4595 else
4596 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004597
4598 /* reset multicast table array for vf */
4599 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4600
4601 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004602 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004603}
4604
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004605static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4606{
4607 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4608
4609 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004610 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4611 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004612
4613 /* process remaining reset events */
4614 igb_vf_reset(adapter, vf);
4615}
4616
4617static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004618{
4619 struct e1000_hw *hw = &adapter->hw;
4620 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004621 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004622 u32 reg, msgbuf[3];
4623 u8 *addr = (u8 *)(&msgbuf[1]);
4624
4625 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004626 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004627
4628 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004629 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004630
4631 /* enable transmit and receive for vf */
4632 reg = rd32(E1000_VFTE);
4633 wr32(E1000_VFTE, reg | (1 << vf));
4634 reg = rd32(E1000_VFRE);
4635 wr32(E1000_VFRE, reg | (1 << vf));
4636
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004637 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004638
4639 /* reply to reset with ack and vf mac address */
4640 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4641 memcpy(addr, vf_mac, 6);
4642 igb_write_mbx(hw, msgbuf, 3, vf);
4643}
4644
4645static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4646{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004647 unsigned char *addr = (char *)&msg[1];
4648 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004649
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004650 if (is_valid_ether_addr(addr))
4651 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004652
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004653 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004654}
4655
4656static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4657{
4658 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004659 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004660 u32 msg = E1000_VT_MSGTYPE_NACK;
4661
4662 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004663 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4664 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004665 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004666 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004667 }
4668}
4669
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004670static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004671{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004672 struct pci_dev *pdev = adapter->pdev;
4673 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004674 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004675 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004676 s32 retval;
4677
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004678 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004679
Alexander Duyckfef45f42009-12-11 22:57:34 -08004680 if (retval) {
4681 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004682 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08004683 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4684 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4685 return;
4686 goto out;
4687 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004688
4689 /* this is a message we already processed, do nothing */
4690 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004691 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004692
4693 /*
4694 * until the vf completes a reset it should not be
4695 * allowed to start any configuration.
4696 */
4697
4698 if (msgbuf[0] == E1000_VF_RESET) {
4699 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004700 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004701 }
4702
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004703 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08004704 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4705 return;
4706 retval = -1;
4707 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004708 }
4709
4710 switch ((msgbuf[0] & 0xFFFF)) {
4711 case E1000_VF_SET_MAC_ADDR:
4712 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4713 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004714 case E1000_VF_SET_PROMISC:
4715 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4716 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004717 case E1000_VF_SET_MULTICAST:
4718 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4719 break;
4720 case E1000_VF_SET_LPE:
4721 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4722 break;
4723 case E1000_VF_SET_VLAN:
Williams, Mitch A8151d292010-02-10 01:44:24 +00004724 if (adapter->vf_data[vf].pf_vlan)
4725 retval = -1;
4726 else
4727 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004728 break;
4729 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00004730 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004731 retval = -1;
4732 break;
4733 }
4734
Alexander Duyckfef45f42009-12-11 22:57:34 -08004735 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4736out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004737 /* notify the VF of the results of what it sent us */
4738 if (retval)
4739 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4740 else
4741 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4742
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004743 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004744}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004745
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004746static void igb_msg_task(struct igb_adapter *adapter)
4747{
4748 struct e1000_hw *hw = &adapter->hw;
4749 u32 vf;
4750
4751 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4752 /* process any reset requests */
4753 if (!igb_check_for_rst(hw, vf))
4754 igb_vf_reset_event(adapter, vf);
4755
4756 /* process any messages pending */
4757 if (!igb_check_for_msg(hw, vf))
4758 igb_rcv_msg_from_vf(adapter, vf);
4759
4760 /* process any acks */
4761 if (!igb_check_for_ack(hw, vf))
4762 igb_rcv_ack_from_vf(adapter, vf);
4763 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004764}
4765
Auke Kok9d5c8242008-01-24 02:22:38 -08004766/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004767 * igb_set_uta - Set unicast filter table address
4768 * @adapter: board private structure
4769 *
4770 * The unicast table address is a register array of 32-bit registers.
4771 * The table is meant to be used in a way similar to how the MTA is used
4772 * however due to certain limitations in the hardware it is necessary to
4773 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4774 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4775 **/
4776static void igb_set_uta(struct igb_adapter *adapter)
4777{
4778 struct e1000_hw *hw = &adapter->hw;
4779 int i;
4780
4781 /* The UTA table only exists on 82576 hardware and newer */
4782 if (hw->mac.type < e1000_82576)
4783 return;
4784
4785 /* we only need to do this if VMDq is enabled */
4786 if (!adapter->vfs_allocated_count)
4787 return;
4788
4789 for (i = 0; i < hw->mac.uta_reg_count; i++)
4790 array_wr32(E1000_UTA, i, ~0);
4791}
4792
4793/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004794 * igb_intr_msi - Interrupt Handler
4795 * @irq: interrupt number
4796 * @data: pointer to a network interface device structure
4797 **/
4798static irqreturn_t igb_intr_msi(int irq, void *data)
4799{
Alexander Duyck047e0032009-10-27 15:49:27 +00004800 struct igb_adapter *adapter = data;
4801 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004802 struct e1000_hw *hw = &adapter->hw;
4803 /* read ICR disables interrupts using IAM */
4804 u32 icr = rd32(E1000_ICR);
4805
Alexander Duyck047e0032009-10-27 15:49:27 +00004806 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004807
Alexander Duyck7f081d42010-01-07 17:41:00 +00004808 if (icr & E1000_ICR_DRSTA)
4809 schedule_work(&adapter->reset_task);
4810
Alexander Duyck047e0032009-10-27 15:49:27 +00004811 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004812 /* HW is reporting DMA is out of sync */
4813 adapter->stats.doosync++;
4814 }
4815
Auke Kok9d5c8242008-01-24 02:22:38 -08004816 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4817 hw->mac.get_link_status = 1;
4818 if (!test_bit(__IGB_DOWN, &adapter->state))
4819 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4820 }
4821
Alexander Duyck047e0032009-10-27 15:49:27 +00004822 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004823
4824 return IRQ_HANDLED;
4825}
4826
4827/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004828 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004829 * @irq: interrupt number
4830 * @data: pointer to a network interface device structure
4831 **/
4832static irqreturn_t igb_intr(int irq, void *data)
4833{
Alexander Duyck047e0032009-10-27 15:49:27 +00004834 struct igb_adapter *adapter = data;
4835 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004836 struct e1000_hw *hw = &adapter->hw;
4837 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4838 * need for the IMC write */
4839 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004840 if (!icr)
4841 return IRQ_NONE; /* Not our interrupt */
4842
Alexander Duyck047e0032009-10-27 15:49:27 +00004843 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004844
4845 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4846 * not set, then the adapter didn't send an interrupt */
4847 if (!(icr & E1000_ICR_INT_ASSERTED))
4848 return IRQ_NONE;
4849
Alexander Duyck7f081d42010-01-07 17:41:00 +00004850 if (icr & E1000_ICR_DRSTA)
4851 schedule_work(&adapter->reset_task);
4852
Alexander Duyck047e0032009-10-27 15:49:27 +00004853 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004854 /* HW is reporting DMA is out of sync */
4855 adapter->stats.doosync++;
4856 }
4857
Auke Kok9d5c8242008-01-24 02:22:38 -08004858 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4859 hw->mac.get_link_status = 1;
4860 /* guard against interrupt when we're going down */
4861 if (!test_bit(__IGB_DOWN, &adapter->state))
4862 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4863 }
4864
Alexander Duyck047e0032009-10-27 15:49:27 +00004865 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004866
4867 return IRQ_HANDLED;
4868}
4869
Alexander Duyck047e0032009-10-27 15:49:27 +00004870static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004871{
Alexander Duyck047e0032009-10-27 15:49:27 +00004872 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004873 struct e1000_hw *hw = &adapter->hw;
4874
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004875 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4876 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004877 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004878 igb_set_itr(adapter);
4879 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004880 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004881 }
4882
4883 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4884 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004885 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004886 else
4887 igb_irq_enable(adapter);
4888 }
4889}
4890
Auke Kok9d5c8242008-01-24 02:22:38 -08004891/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004892 * igb_poll - NAPI Rx polling callback
4893 * @napi: napi polling structure
4894 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004895 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004896static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004897{
Alexander Duyck047e0032009-10-27 15:49:27 +00004898 struct igb_q_vector *q_vector = container_of(napi,
4899 struct igb_q_vector,
4900 napi);
4901 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004902
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004903#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004904 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4905 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004906#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004907 if (q_vector->tx_ring)
4908 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004909
Alexander Duyck047e0032009-10-27 15:49:27 +00004910 if (q_vector->rx_ring)
4911 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4912
4913 if (!tx_clean_complete)
4914 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004915
Alexander Duyck46544252009-02-19 20:39:04 -08004916 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004917 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004918 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004919 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004920 }
4921
4922 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004923}
Al Viro6d8126f2008-03-16 22:23:24 +00004924
Auke Kok9d5c8242008-01-24 02:22:38 -08004925/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004926 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004927 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004928 * @shhwtstamps: timestamp structure to update
4929 * @regval: unsigned 64bit system time value.
4930 *
4931 * We need to convert the system time value stored in the RX/TXSTMP registers
4932 * into a hwtstamp which can be used by the upper level timestamping functions
4933 */
4934static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4935 struct skb_shared_hwtstamps *shhwtstamps,
4936 u64 regval)
4937{
4938 u64 ns;
4939
Alexander Duyck55cac242009-11-19 12:42:21 +00004940 /*
4941 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4942 * 24 to match clock shift we setup earlier.
4943 */
4944 if (adapter->hw.mac.type == e1000_82580)
4945 regval <<= IGB_82580_TSYNC_SHIFT;
4946
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004947 ns = timecounter_cyc2time(&adapter->clock, regval);
4948 timecompare_update(&adapter->compare, ns);
4949 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4950 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4951 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4952}
4953
4954/**
4955 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4956 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004957 * @skb: packet that was just sent
4958 *
4959 * If we were asked to do hardware stamping and such a time stamp is
4960 * available, then it must have been for this skb here because we only
4961 * allow only one such packet into the queue.
4962 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004963static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004964{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004965 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004966 union skb_shared_tx *shtx = skb_tx(skb);
4967 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004968 struct skb_shared_hwtstamps shhwtstamps;
4969 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004970
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004971 /* if skb does not support hw timestamp or TX stamp not valid exit */
4972 if (likely(!shtx->hardware) ||
4973 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4974 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004975
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004976 regval = rd32(E1000_TXSTMPL);
4977 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4978
4979 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4980 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004981}
4982
4983/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004984 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004985 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004986 * returns true if ring is completely cleaned
4987 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004988static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004989{
Alexander Duyck047e0032009-10-27 15:49:27 +00004990 struct igb_adapter *adapter = q_vector->adapter;
4991 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004992 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004993 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004994 struct igb_buffer *buffer_info;
4995 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004996 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004997 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004998 unsigned int i, eop, count = 0;
4999 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005000
Auke Kok9d5c8242008-01-24 02:22:38 -08005001 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005002 eop = tx_ring->buffer_info[i].next_to_watch;
5003 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5004
5005 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5006 (count < tx_ring->count)) {
5007 for (cleaned = false; !cleaned; count++) {
5008 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005009 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005010 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005011 skb = buffer_info->skb;
5012
5013 if (skb) {
5014 unsigned int segs, bytecount;
5015 /* gso_segs is currently only valid for tcp */
Nick Nunley40e90c22010-02-17 01:04:37 +00005016 segs = buffer_info->gso_segs;
Auke Kok9d5c8242008-01-24 02:22:38 -08005017 /* multiply data chunks by size of headers */
5018 bytecount = ((segs - 1) * skb_headlen(skb)) +
5019 skb->len;
5020 total_packets += segs;
5021 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005022
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005023 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005024 }
5025
Alexander Duyck80785292009-10-27 15:51:47 +00005026 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005027 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005028
5029 i++;
5030 if (i == tx_ring->count)
5031 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005032 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005033 eop = tx_ring->buffer_info[i].next_to_watch;
5034 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5035 }
5036
Auke Kok9d5c8242008-01-24 02:22:38 -08005037 tx_ring->next_to_clean = i;
5038
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005039 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005040 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005041 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005042 /* Make sure that anybody stopping the queue after this
5043 * sees the new next_to_clean.
5044 */
5045 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005046 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5047 !(test_bit(__IGB_DOWN, &adapter->state))) {
5048 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005049 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005050 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005051 }
5052
5053 if (tx_ring->detect_tx_hung) {
5054 /* Detect a transmit hang in hardware, this serializes the
5055 * check with the clearing of time_stamp and movement of i */
5056 tx_ring->detect_tx_hung = false;
5057 if (tx_ring->buffer_info[i].time_stamp &&
5058 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005059 (adapter->tx_timeout_factor * HZ)) &&
5060 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005061
Auke Kok9d5c8242008-01-24 02:22:38 -08005062 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00005063 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005064 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005065 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005066 " TDH <%x>\n"
5067 " TDT <%x>\n"
5068 " next_to_use <%x>\n"
5069 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005070 "buffer_info[next_to_clean]\n"
5071 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005072 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005073 " jiffies <%lx>\n"
5074 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005075 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005076 readl(tx_ring->head),
5077 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005078 tx_ring->next_to_use,
5079 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005080 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005081 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005082 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005083 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005084 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005085 }
5086 }
5087 tx_ring->total_bytes += total_bytes;
5088 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07005089 tx_ring->tx_stats.bytes += total_bytes;
5090 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005091 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005092}
5093
Auke Kok9d5c8242008-01-24 02:22:38 -08005094/**
5095 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005096 * @q_vector: structure containing interrupt and ring information
5097 * @skb: packet to send up
5098 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005099 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005100static void igb_receive_skb(struct igb_q_vector *q_vector,
5101 struct sk_buff *skb,
5102 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005103{
Alexander Duyck047e0032009-10-27 15:49:27 +00005104 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005105
Alexander Duyck31b24b92010-03-23 18:35:18 +00005106 if (vlan_tag && adapter->vlgrp)
Alexander Duyck047e0032009-10-27 15:49:27 +00005107 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5108 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005109 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005110 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005111}
5112
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005113static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005114 u32 status_err, struct sk_buff *skb)
5115{
5116 skb->ip_summed = CHECKSUM_NONE;
5117
5118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005119 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5120 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005121 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005122
Auke Kok9d5c8242008-01-24 02:22:38 -08005123 /* TCP/UDP checksum error bit is set */
5124 if (status_err &
5125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005126 /*
5127 * work around errata with sctp packets where the TCPE aka
5128 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5129 * packets, (aka let the stack check the crc32c)
5130 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005131 if ((skb->len == 60) &&
5132 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005133 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005134
Auke Kok9d5c8242008-01-24 02:22:38 -08005135 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005136 return;
5137 }
5138 /* It must be a TCP or UDP packet with a valid checksum */
5139 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5140 skb->ip_summed = CHECKSUM_UNNECESSARY;
5141
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005142 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005143}
5144
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005145static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5146 struct sk_buff *skb)
5147{
5148 struct igb_adapter *adapter = q_vector->adapter;
5149 struct e1000_hw *hw = &adapter->hw;
5150 u64 regval;
5151
5152 /*
5153 * If this bit is set, then the RX registers contain the time stamp. No
5154 * other packet will be time stamped until we read these registers, so
5155 * read the registers to make them available again. Because only one
5156 * packet can be time stamped at a time, we know that the register
5157 * values must belong to this one here and therefore we don't need to
5158 * compare any of the additional attributes stored for it.
5159 *
5160 * If nothing went wrong, then it should have a skb_shared_tx that we
5161 * can turn into a skb_shared_hwtstamps.
5162 */
5163 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5164 return;
5165 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5166 return;
5167
5168 regval = rd32(E1000_RXSTMPL);
5169 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5170
5171 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5172}
Alexander Duyck4c844852009-10-27 15:52:07 +00005173static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005174 union e1000_adv_rx_desc *rx_desc)
5175{
5176 /* HW will not DMA in data larger than the given buffer, even if it
5177 * parses the (NFS, of course) header to be larger. In that case, it
5178 * fills the header buffer and spills the rest into the page.
5179 */
5180 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5181 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005182 if (hlen > rx_ring->rx_buffer_len)
5183 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005184 return hlen;
5185}
5186
Alexander Duyck047e0032009-10-27 15:49:27 +00005187static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5188 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005189{
Alexander Duyck047e0032009-10-27 15:49:27 +00005190 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005191 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00005192 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005193 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5194 struct igb_buffer *buffer_info , *next_buffer;
5195 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005196 bool cleaned = false;
5197 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005198 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005199 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005200 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005201 u32 staterr;
5202 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005203 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005204
5205 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005206 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005207 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5208 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5209
5210 while (staterr & E1000_RXD_STAT_DD) {
5211 if (*work_done >= budget)
5212 break;
5213 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005214
5215 skb = buffer_info->skb;
5216 prefetch(skb->data - NET_IP_ALIGN);
5217 buffer_info->skb = NULL;
5218
5219 i++;
5220 if (i == rx_ring->count)
5221 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005222
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005223 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5224 prefetch(next_rxd);
5225 next_buffer = &rx_ring->buffer_info[i];
5226
5227 length = le16_to_cpu(rx_desc->wb.upper.length);
5228 cleaned = true;
5229 cleaned_count++;
5230
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005231 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005232 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005233 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005234 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005235 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005236 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005237 skb_put(skb, length);
5238 goto send_up;
5239 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005240 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005241 }
5242
5243 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005244 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005245 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005246 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005247
5248 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5249 buffer_info->page,
5250 buffer_info->page_offset,
5251 length);
5252
Alexander Duyckd1eff352009-11-12 18:38:35 +00005253 if ((page_count(buffer_info->page) != 1) ||
5254 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005255 buffer_info->page = NULL;
5256 else
5257 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005258
5259 skb->len += length;
5260 skb->data_len += length;
5261 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005262 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005263
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005264 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005265 buffer_info->skb = next_buffer->skb;
5266 buffer_info->dma = next_buffer->dma;
5267 next_buffer->skb = skb;
5268 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005269 goto next_desc;
5270 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005271send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005272 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5273 dev_kfree_skb_irq(skb);
5274 goto next_desc;
5275 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005276
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005277 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005278 total_bytes += skb->len;
5279 total_packets++;
5280
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005281 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005282
5283 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005284 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005285
Alexander Duyck047e0032009-10-27 15:49:27 +00005286 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5287 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5288
5289 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005290
Auke Kok9d5c8242008-01-24 02:22:38 -08005291next_desc:
5292 rx_desc->wb.upper.status_error = 0;
5293
5294 /* return some buffers to hardware, one at a time is too slow */
5295 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005296 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005297 cleaned_count = 0;
5298 }
5299
5300 /* use prefetched values */
5301 rx_desc = next_rxd;
5302 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005303 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5304 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005305
Auke Kok9d5c8242008-01-24 02:22:38 -08005306 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005307 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005308
5309 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005310 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005311
5312 rx_ring->total_packets += total_packets;
5313 rx_ring->total_bytes += total_bytes;
5314 rx_ring->rx_stats.packets += total_packets;
5315 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005316 return cleaned;
5317}
5318
Auke Kok9d5c8242008-01-24 02:22:38 -08005319/**
5320 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5321 * @adapter: address of board private structure
5322 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005323void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005324{
Alexander Duycke694e962009-10-27 15:53:06 +00005325 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005326 union e1000_adv_rx_desc *rx_desc;
5327 struct igb_buffer *buffer_info;
5328 struct sk_buff *skb;
5329 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005330 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005331
5332 i = rx_ring->next_to_use;
5333 buffer_info = &rx_ring->buffer_info[i];
5334
Alexander Duyck4c844852009-10-27 15:52:07 +00005335 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005336
Auke Kok9d5c8242008-01-24 02:22:38 -08005337 while (cleaned_count--) {
5338 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5339
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005340 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005341 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005342 buffer_info->page = netdev_alloc_page(netdev);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005343 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005344 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005345 goto no_buffers;
5346 }
5347 buffer_info->page_offset = 0;
5348 } else {
5349 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005350 }
5351 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005352 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005353 buffer_info->page_offset,
5354 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005355 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005356 if (pci_dma_mapping_error(rx_ring->pdev,
5357 buffer_info->page_dma)) {
5358 buffer_info->page_dma = 0;
5359 rx_ring->rx_stats.alloc_failed++;
5360 goto no_buffers;
5361 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005362 }
5363
Alexander Duyck42d07812009-10-27 23:51:16 +00005364 skb = buffer_info->skb;
5365 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005366 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005367 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005368 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005369 goto no_buffers;
5370 }
5371
Auke Kok9d5c8242008-01-24 02:22:38 -08005372 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005373 }
5374 if (!buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00005375 buffer_info->dma = pci_map_single(rx_ring->pdev,
5376 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005377 bufsz,
5378 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005379 if (pci_dma_mapping_error(rx_ring->pdev,
5380 buffer_info->dma)) {
5381 buffer_info->dma = 0;
5382 rx_ring->rx_stats.alloc_failed++;
5383 goto no_buffers;
5384 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005385 }
5386 /* Refresh the desc even if buffer_addrs didn't change because
5387 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005388 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005389 rx_desc->read.pkt_addr =
5390 cpu_to_le64(buffer_info->page_dma);
5391 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5392 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005393 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005394 rx_desc->read.hdr_addr = 0;
5395 }
5396
5397 i++;
5398 if (i == rx_ring->count)
5399 i = 0;
5400 buffer_info = &rx_ring->buffer_info[i];
5401 }
5402
5403no_buffers:
5404 if (rx_ring->next_to_use != i) {
5405 rx_ring->next_to_use = i;
5406 if (i == 0)
5407 i = (rx_ring->count - 1);
5408 else
5409 i--;
5410
5411 /* Force memory writes to complete before letting h/w
5412 * know there are new descriptors to fetch. (Only
5413 * applicable for weak-ordered memory model archs,
5414 * such as IA-64). */
5415 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005416 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005417 }
5418}
5419
5420/**
5421 * igb_mii_ioctl -
5422 * @netdev:
5423 * @ifreq:
5424 * @cmd:
5425 **/
5426static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5427{
5428 struct igb_adapter *adapter = netdev_priv(netdev);
5429 struct mii_ioctl_data *data = if_mii(ifr);
5430
5431 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5432 return -EOPNOTSUPP;
5433
5434 switch (cmd) {
5435 case SIOCGMIIPHY:
5436 data->phy_id = adapter->hw.phy.addr;
5437 break;
5438 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005439 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5440 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005441 return -EIO;
5442 break;
5443 case SIOCSMIIREG:
5444 default:
5445 return -EOPNOTSUPP;
5446 }
5447 return 0;
5448}
5449
5450/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005451 * igb_hwtstamp_ioctl - control hardware time stamping
5452 * @netdev:
5453 * @ifreq:
5454 * @cmd:
5455 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005456 * Outgoing time stamping can be enabled and disabled. Play nice and
5457 * disable it when requested, although it shouldn't case any overhead
5458 * when no packet needs it. At most one packet in the queue may be
5459 * marked for time stamping, otherwise it would be impossible to tell
5460 * for sure to which packet the hardware time stamp belongs.
5461 *
5462 * Incoming time stamping has to be configured via the hardware
5463 * filters. Not all combinations are supported, in particular event
5464 * type has to be specified. Matching the kind of event packet is
5465 * not supported, with the exception of "all V2 events regardless of
5466 * level 2 or 4".
5467 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005468 **/
5469static int igb_hwtstamp_ioctl(struct net_device *netdev,
5470 struct ifreq *ifr, int cmd)
5471{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005472 struct igb_adapter *adapter = netdev_priv(netdev);
5473 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005474 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005475 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5476 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005477 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005478 bool is_l4 = false;
5479 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005480 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005481
5482 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5483 return -EFAULT;
5484
5485 /* reserved for future extensions */
5486 if (config.flags)
5487 return -EINVAL;
5488
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005489 switch (config.tx_type) {
5490 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005491 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005492 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005493 break;
5494 default:
5495 return -ERANGE;
5496 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005497
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005498 switch (config.rx_filter) {
5499 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005500 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005501 break;
5502 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5503 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5504 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5505 case HWTSTAMP_FILTER_ALL:
5506 /*
5507 * register TSYNCRXCFG must be set, therefore it is not
5508 * possible to time stamp both Sync and Delay_Req messages
5509 * => fall back to time stamping all packets
5510 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005511 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005512 config.rx_filter = HWTSTAMP_FILTER_ALL;
5513 break;
5514 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005515 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005516 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005517 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005518 break;
5519 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005520 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005521 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005522 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005523 break;
5524 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5525 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005526 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005527 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005528 is_l2 = true;
5529 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005530 config.rx_filter = HWTSTAMP_FILTER_SOME;
5531 break;
5532 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5533 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005534 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005535 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005536 is_l2 = true;
5537 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005538 config.rx_filter = HWTSTAMP_FILTER_SOME;
5539 break;
5540 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5541 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5542 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005543 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005544 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005545 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005546 break;
5547 default:
5548 return -ERANGE;
5549 }
5550
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005551 if (hw->mac.type == e1000_82575) {
5552 if (tsync_rx_ctl | tsync_tx_ctl)
5553 return -EINVAL;
5554 return 0;
5555 }
5556
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005557 /* enable/disable TX */
5558 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005559 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5560 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005561 wr32(E1000_TSYNCTXCTL, regval);
5562
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005563 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005564 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005565 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5566 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005567 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005568
5569 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005570 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5571
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005572 /* define ethertype filter for timestamped packets */
5573 if (is_l2)
5574 wr32(E1000_ETQF(3),
5575 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5576 E1000_ETQF_1588 | /* enable timestamping */
5577 ETH_P_1588)); /* 1588 eth protocol type */
5578 else
5579 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005580
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005581#define PTP_PORT 319
5582 /* L4 Queue Filter[3]: filter by destination port and protocol */
5583 if (is_l4) {
5584 u32 ftqf = (IPPROTO_UDP /* UDP */
5585 | E1000_FTQF_VF_BP /* VF not compared */
5586 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5587 | E1000_FTQF_MASK); /* mask all inputs */
5588 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005589
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005590 wr32(E1000_IMIR(3), htons(PTP_PORT));
5591 wr32(E1000_IMIREXT(3),
5592 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5593 if (hw->mac.type == e1000_82576) {
5594 /* enable source port check */
5595 wr32(E1000_SPQF(3), htons(PTP_PORT));
5596 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5597 }
5598 wr32(E1000_FTQF(3), ftqf);
5599 } else {
5600 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5601 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005602 wrfl();
5603
5604 adapter->hwtstamp_config = config;
5605
5606 /* clear TX/RX time stamp registers, just to be sure */
5607 regval = rd32(E1000_TXSTMPH);
5608 regval = rd32(E1000_RXSTMPH);
5609
5610 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5611 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005612}
5613
5614/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005615 * igb_ioctl -
5616 * @netdev:
5617 * @ifreq:
5618 * @cmd:
5619 **/
5620static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5621{
5622 switch (cmd) {
5623 case SIOCGMIIPHY:
5624 case SIOCGMIIREG:
5625 case SIOCSMIIREG:
5626 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005627 case SIOCSHWTSTAMP:
5628 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005629 default:
5630 return -EOPNOTSUPP;
5631 }
5632}
5633
Alexander Duyck009bc062009-07-23 18:08:35 +00005634s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5635{
5636 struct igb_adapter *adapter = hw->back;
5637 u16 cap_offset;
5638
5639 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5640 if (!cap_offset)
5641 return -E1000_ERR_CONFIG;
5642
5643 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5644
5645 return 0;
5646}
5647
5648s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5649{
5650 struct igb_adapter *adapter = hw->back;
5651 u16 cap_offset;
5652
5653 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5654 if (!cap_offset)
5655 return -E1000_ERR_CONFIG;
5656
5657 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5658
5659 return 0;
5660}
5661
Auke Kok9d5c8242008-01-24 02:22:38 -08005662static void igb_vlan_rx_register(struct net_device *netdev,
5663 struct vlan_group *grp)
5664{
5665 struct igb_adapter *adapter = netdev_priv(netdev);
5666 struct e1000_hw *hw = &adapter->hw;
5667 u32 ctrl, rctl;
5668
5669 igb_irq_disable(adapter);
5670 adapter->vlgrp = grp;
5671
5672 if (grp) {
5673 /* enable VLAN tag insert/strip */
5674 ctrl = rd32(E1000_CTRL);
5675 ctrl |= E1000_CTRL_VME;
5676 wr32(E1000_CTRL, ctrl);
5677
Alexander Duyck51466232009-10-27 23:47:35 +00005678 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005679 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005680 rctl &= ~E1000_RCTL_CFIEN;
5681 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005682 } else {
5683 /* disable VLAN tag insert/strip */
5684 ctrl = rd32(E1000_CTRL);
5685 ctrl &= ~E1000_CTRL_VME;
5686 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005687 }
5688
Alexander Duycke1739522009-02-19 20:39:44 -08005689 igb_rlpml_set(adapter);
5690
Auke Kok9d5c8242008-01-24 02:22:38 -08005691 if (!test_bit(__IGB_DOWN, &adapter->state))
5692 igb_irq_enable(adapter);
5693}
5694
5695static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5696{
5697 struct igb_adapter *adapter = netdev_priv(netdev);
5698 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005699 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005700
Alexander Duyck51466232009-10-27 23:47:35 +00005701 /* attempt to add filter to vlvf array */
5702 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005703
Alexander Duyck51466232009-10-27 23:47:35 +00005704 /* add the filter since PF can receive vlans w/o entry in vlvf */
5705 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005706}
5707
5708static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5709{
5710 struct igb_adapter *adapter = netdev_priv(netdev);
5711 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005712 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005713 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005714
5715 igb_irq_disable(adapter);
5716 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5717
5718 if (!test_bit(__IGB_DOWN, &adapter->state))
5719 igb_irq_enable(adapter);
5720
Alexander Duyck51466232009-10-27 23:47:35 +00005721 /* remove vlan from VLVF table array */
5722 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005723
Alexander Duyck51466232009-10-27 23:47:35 +00005724 /* if vid was not present in VLVF just remove it from table */
5725 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005726 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005727}
5728
5729static void igb_restore_vlan(struct igb_adapter *adapter)
5730{
5731 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5732
5733 if (adapter->vlgrp) {
5734 u16 vid;
5735 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5736 if (!vlan_group_get_device(adapter->vlgrp, vid))
5737 continue;
5738 igb_vlan_rx_add_vid(adapter->netdev, vid);
5739 }
5740 }
5741}
5742
5743int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5744{
Alexander Duyck090b1792009-10-27 23:51:55 +00005745 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005746 struct e1000_mac_info *mac = &adapter->hw.mac;
5747
5748 mac->autoneg = 0;
5749
Auke Kok9d5c8242008-01-24 02:22:38 -08005750 switch (spddplx) {
5751 case SPEED_10 + DUPLEX_HALF:
5752 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5753 break;
5754 case SPEED_10 + DUPLEX_FULL:
5755 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5756 break;
5757 case SPEED_100 + DUPLEX_HALF:
5758 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5759 break;
5760 case SPEED_100 + DUPLEX_FULL:
5761 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5762 break;
5763 case SPEED_1000 + DUPLEX_FULL:
5764 mac->autoneg = 1;
5765 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5766 break;
5767 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5768 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005769 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005770 return -EINVAL;
5771 }
5772 return 0;
5773}
5774
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005775static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005776{
5777 struct net_device *netdev = pci_get_drvdata(pdev);
5778 struct igb_adapter *adapter = netdev_priv(netdev);
5779 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005780 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005781 u32 wufc = adapter->wol;
5782#ifdef CONFIG_PM
5783 int retval = 0;
5784#endif
5785
5786 netif_device_detach(netdev);
5787
Alexander Duycka88f10e2008-07-08 15:13:38 -07005788 if (netif_running(netdev))
5789 igb_close(netdev);
5790
Alexander Duyck047e0032009-10-27 15:49:27 +00005791 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005792
5793#ifdef CONFIG_PM
5794 retval = pci_save_state(pdev);
5795 if (retval)
5796 return retval;
5797#endif
5798
5799 status = rd32(E1000_STATUS);
5800 if (status & E1000_STATUS_LU)
5801 wufc &= ~E1000_WUFC_LNKC;
5802
5803 if (wufc) {
5804 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005805 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005806
5807 /* turn on all-multi mode if wake on multicast is enabled */
5808 if (wufc & E1000_WUFC_MC) {
5809 rctl = rd32(E1000_RCTL);
5810 rctl |= E1000_RCTL_MPE;
5811 wr32(E1000_RCTL, rctl);
5812 }
5813
5814 ctrl = rd32(E1000_CTRL);
5815 /* advertise wake from D3Cold */
5816 #define E1000_CTRL_ADVD3WUC 0x00100000
5817 /* phy power management enable */
5818 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5819 ctrl |= E1000_CTRL_ADVD3WUC;
5820 wr32(E1000_CTRL, ctrl);
5821
Auke Kok9d5c8242008-01-24 02:22:38 -08005822 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00005823 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005824
5825 wr32(E1000_WUC, E1000_WUC_PME_EN);
5826 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005827 } else {
5828 wr32(E1000_WUC, 0);
5829 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005830 }
5831
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005832 *enable_wake = wufc || adapter->en_mng_pt;
5833 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00005834 igb_power_down_link(adapter);
5835 else
5836 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005837
5838 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5839 * would have already happened in close and is redundant. */
5840 igb_release_hw_control(adapter);
5841
5842 pci_disable_device(pdev);
5843
Auke Kok9d5c8242008-01-24 02:22:38 -08005844 return 0;
5845}
5846
5847#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005848static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5849{
5850 int retval;
5851 bool wake;
5852
5853 retval = __igb_shutdown(pdev, &wake);
5854 if (retval)
5855 return retval;
5856
5857 if (wake) {
5858 pci_prepare_to_sleep(pdev);
5859 } else {
5860 pci_wake_from_d3(pdev, false);
5861 pci_set_power_state(pdev, PCI_D3hot);
5862 }
5863
5864 return 0;
5865}
5866
Auke Kok9d5c8242008-01-24 02:22:38 -08005867static int igb_resume(struct pci_dev *pdev)
5868{
5869 struct net_device *netdev = pci_get_drvdata(pdev);
5870 struct igb_adapter *adapter = netdev_priv(netdev);
5871 struct e1000_hw *hw = &adapter->hw;
5872 u32 err;
5873
5874 pci_set_power_state(pdev, PCI_D0);
5875 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00005876 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005877
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005878 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005879 if (err) {
5880 dev_err(&pdev->dev,
5881 "igb: Cannot enable PCI device from suspend\n");
5882 return err;
5883 }
5884 pci_set_master(pdev);
5885
5886 pci_enable_wake(pdev, PCI_D3hot, 0);
5887 pci_enable_wake(pdev, PCI_D3cold, 0);
5888
Alexander Duyck047e0032009-10-27 15:49:27 +00005889 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005890 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5891 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005892 }
5893
Auke Kok9d5c8242008-01-24 02:22:38 -08005894 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005895
5896 /* let the f/w know that the h/w is now under the control of the
5897 * driver. */
5898 igb_get_hw_control(adapter);
5899
Auke Kok9d5c8242008-01-24 02:22:38 -08005900 wr32(E1000_WUS, ~0);
5901
Alexander Duycka88f10e2008-07-08 15:13:38 -07005902 if (netif_running(netdev)) {
5903 err = igb_open(netdev);
5904 if (err)
5905 return err;
5906 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005907
5908 netif_device_attach(netdev);
5909
Auke Kok9d5c8242008-01-24 02:22:38 -08005910 return 0;
5911}
5912#endif
5913
5914static void igb_shutdown(struct pci_dev *pdev)
5915{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005916 bool wake;
5917
5918 __igb_shutdown(pdev, &wake);
5919
5920 if (system_state == SYSTEM_POWER_OFF) {
5921 pci_wake_from_d3(pdev, wake);
5922 pci_set_power_state(pdev, PCI_D3hot);
5923 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005924}
5925
5926#ifdef CONFIG_NET_POLL_CONTROLLER
5927/*
5928 * Polling 'interrupt' - used by things like netconsole to send skbs
5929 * without having to re-enable interrupts. It's not called while
5930 * the interrupt routine is executing.
5931 */
5932static void igb_netpoll(struct net_device *netdev)
5933{
5934 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005935 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005936 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005937
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005938 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005939 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005940 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005941 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005942 return;
5943 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005944
Alexander Duyck047e0032009-10-27 15:49:27 +00005945 for (i = 0; i < adapter->num_q_vectors; i++) {
5946 struct igb_q_vector *q_vector = adapter->q_vector[i];
5947 wr32(E1000_EIMC, q_vector->eims_value);
5948 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005949 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005950}
5951#endif /* CONFIG_NET_POLL_CONTROLLER */
5952
5953/**
5954 * igb_io_error_detected - called when PCI error is detected
5955 * @pdev: Pointer to PCI device
5956 * @state: The current pci connection state
5957 *
5958 * This function is called after a PCI bus error affecting
5959 * this device has been detected.
5960 */
5961static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5962 pci_channel_state_t state)
5963{
5964 struct net_device *netdev = pci_get_drvdata(pdev);
5965 struct igb_adapter *adapter = netdev_priv(netdev);
5966
5967 netif_device_detach(netdev);
5968
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005969 if (state == pci_channel_io_perm_failure)
5970 return PCI_ERS_RESULT_DISCONNECT;
5971
Auke Kok9d5c8242008-01-24 02:22:38 -08005972 if (netif_running(netdev))
5973 igb_down(adapter);
5974 pci_disable_device(pdev);
5975
5976 /* Request a slot slot reset. */
5977 return PCI_ERS_RESULT_NEED_RESET;
5978}
5979
5980/**
5981 * igb_io_slot_reset - called after the pci bus has been reset.
5982 * @pdev: Pointer to PCI device
5983 *
5984 * Restart the card from scratch, as if from a cold-boot. Implementation
5985 * resembles the first-half of the igb_resume routine.
5986 */
5987static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5988{
5989 struct net_device *netdev = pci_get_drvdata(pdev);
5990 struct igb_adapter *adapter = netdev_priv(netdev);
5991 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005992 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005993 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005994
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005995 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005996 dev_err(&pdev->dev,
5997 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005998 result = PCI_ERS_RESULT_DISCONNECT;
5999 } else {
6000 pci_set_master(pdev);
6001 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006002 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006003
6004 pci_enable_wake(pdev, PCI_D3hot, 0);
6005 pci_enable_wake(pdev, PCI_D3cold, 0);
6006
6007 igb_reset(adapter);
6008 wr32(E1000_WUS, ~0);
6009 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006010 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006011
Jeff Kirsherea943d42008-12-11 20:34:19 -08006012 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6013 if (err) {
6014 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6015 "failed 0x%0x\n", err);
6016 /* non-fatal, continue */
6017 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006018
Alexander Duyck40a914f2008-11-27 00:24:37 -08006019 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006020}
6021
6022/**
6023 * igb_io_resume - called when traffic can start flowing again.
6024 * @pdev: Pointer to PCI device
6025 *
6026 * This callback is called when the error recovery driver tells us that
6027 * its OK to resume normal operation. Implementation resembles the
6028 * second-half of the igb_resume routine.
6029 */
6030static void igb_io_resume(struct pci_dev *pdev)
6031{
6032 struct net_device *netdev = pci_get_drvdata(pdev);
6033 struct igb_adapter *adapter = netdev_priv(netdev);
6034
Auke Kok9d5c8242008-01-24 02:22:38 -08006035 if (netif_running(netdev)) {
6036 if (igb_up(adapter)) {
6037 dev_err(&pdev->dev, "igb_up failed after reset\n");
6038 return;
6039 }
6040 }
6041
6042 netif_device_attach(netdev);
6043
6044 /* let the f/w know that the h/w is now under the control of the
6045 * driver. */
6046 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006047}
6048
Alexander Duyck26ad9172009-10-05 06:32:49 +00006049static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6050 u8 qsel)
6051{
6052 u32 rar_low, rar_high;
6053 struct e1000_hw *hw = &adapter->hw;
6054
6055 /* HW expects these in little endian so we reverse the byte order
6056 * from network order (big endian) to little endian
6057 */
6058 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6059 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6060 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6061
6062 /* Indicate to hardware the Address is Valid. */
6063 rar_high |= E1000_RAH_AV;
6064
6065 if (hw->mac.type == e1000_82575)
6066 rar_high |= E1000_RAH_POOL_1 * qsel;
6067 else
6068 rar_high |= E1000_RAH_POOL_1 << qsel;
6069
6070 wr32(E1000_RAL(index), rar_low);
6071 wrfl();
6072 wr32(E1000_RAH(index), rar_high);
6073 wrfl();
6074}
6075
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006076static int igb_set_vf_mac(struct igb_adapter *adapter,
6077 int vf, unsigned char *mac_addr)
6078{
6079 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006080 /* VF MAC addresses start at end of receive addresses and moves
6081 * torwards the first, as a result a collision should not be possible */
6082 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006083
Alexander Duyck37680112009-02-19 20:40:30 -08006084 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006085
Alexander Duyck26ad9172009-10-05 06:32:49 +00006086 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006087
6088 return 0;
6089}
6090
Williams, Mitch A8151d292010-02-10 01:44:24 +00006091static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6092{
6093 struct igb_adapter *adapter = netdev_priv(netdev);
6094 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6095 return -EINVAL;
6096 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6097 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6098 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6099 " change effective.");
6100 if (test_bit(__IGB_DOWN, &adapter->state)) {
6101 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6102 " but the PF device is not up.\n");
6103 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6104 " attempting to use the VF device.\n");
6105 }
6106 return igb_set_vf_mac(adapter, vf, mac);
6107}
6108
6109static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6110{
6111 return -EOPNOTSUPP;
6112}
6113
6114static int igb_ndo_get_vf_config(struct net_device *netdev,
6115 int vf, struct ifla_vf_info *ivi)
6116{
6117 struct igb_adapter *adapter = netdev_priv(netdev);
6118 if (vf >= adapter->vfs_allocated_count)
6119 return -EINVAL;
6120 ivi->vf = vf;
6121 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6122 ivi->tx_rate = 0;
6123 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6124 ivi->qos = adapter->vf_data[vf].pf_qos;
6125 return 0;
6126}
6127
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006128static void igb_vmm_control(struct igb_adapter *adapter)
6129{
6130 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006131 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006132
Alexander Duyckd4960302009-10-27 15:53:45 +00006133 /* replication is not supported for 82575 */
6134 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006135 return;
6136
Alexander Duyck10d8e902009-10-27 15:54:04 +00006137 /* enable replication vlan tag stripping */
6138 reg = rd32(E1000_RPLOLR);
6139 reg |= E1000_RPLOLR_STRVLAN;
6140 wr32(E1000_RPLOLR, reg);
6141
6142 /* notify HW that the MAC is adding vlan tags */
6143 reg = rd32(E1000_DTXCTL);
6144 reg |= E1000_DTXCTL_VLAN_ADDED;
6145 wr32(E1000_DTXCTL, reg);
6146
Alexander Duyckd4960302009-10-27 15:53:45 +00006147 if (adapter->vfs_allocated_count) {
6148 igb_vmdq_set_loopback_pf(hw, true);
6149 igb_vmdq_set_replication_pf(hw, true);
6150 } else {
6151 igb_vmdq_set_loopback_pf(hw, false);
6152 igb_vmdq_set_replication_pf(hw, false);
6153 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006154}
6155
Auke Kok9d5c8242008-01-24 02:22:38 -08006156/* igb_main.c */