blob: c9baa2aa98cd640268924ef2ef891afbdd29ba33 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080036#include <net/checksum.h>
37#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070043#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080044#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080047#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070048#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070049#include <linux/dca.h>
50#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include "igb.h"
52
Alexander Duyck55cac242009-11-19 12:42:21 +000053#define DRV_VERSION "2.1.0-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080054char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000058static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080059
Auke Kok9d5c8242008-01-24 02:22:38 -080060static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info,
62};
63
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000064static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyck55cac242009-11-19 12:42:21 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
81 /* required last entry */
82 {0, }
83};
84
85MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
86
87void igb_reset(struct igb_adapter *);
88static int igb_setup_all_tx_resources(struct igb_adapter *);
89static int igb_setup_all_rx_resources(struct igb_adapter *);
90static void igb_free_all_tx_resources(struct igb_adapter *);
91static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000092static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080093void igb_update_stats(struct igb_adapter *);
94static int igb_probe(struct pci_dev *, const struct pci_device_id *);
95static void __devexit igb_remove(struct pci_dev *pdev);
96static int igb_sw_init(struct igb_adapter *);
97static int igb_open(struct net_device *);
98static int igb_close(struct net_device *);
99static void igb_configure_tx(struct igb_adapter *);
100static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800101static void igb_clean_all_tx_rings(struct igb_adapter *);
102static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700103static void igb_clean_tx_ring(struct igb_ring *);
104static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000105static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800106static void igb_update_phy_info(unsigned long);
107static void igb_watchdog(unsigned long);
108static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000109static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800110static struct net_device_stats *igb_get_stats(struct net_device *);
111static int igb_change_mtu(struct net_device *, int);
112static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000113static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static irqreturn_t igb_intr(int irq, void *);
115static irqreturn_t igb_intr_msi(int irq, void *);
116static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700118#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000119static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700120static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700121#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000122static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700123static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000124static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
126static void igb_tx_timeout(struct net_device *);
127static void igb_reset_task(struct work_struct *);
128static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
129static void igb_vlan_rx_add_vid(struct net_device *, u16);
130static void igb_vlan_rx_kill_vid(struct net_device *, u16);
131static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000132static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800133static void igb_ping_all_vfs(struct igb_adapter *);
134static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800135static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000136static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800137static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000138static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
139static int igb_ndo_set_vf_vlan(struct net_device *netdev,
140 int vf, u16 vlan, u8 qos);
141static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
142static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
143 struct ifla_vf_info *ivi);
Auke Kok9d5c8242008-01-24 02:22:38 -0800144
Auke Kok9d5c8242008-01-24 02:22:38 -0800145#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000146static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800147static int igb_resume(struct pci_dev *);
148#endif
149static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700150#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700151static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
152static struct notifier_block dca_notifier = {
153 .notifier_call = igb_notify_dca,
154 .next = NULL,
155 .priority = 0
156};
157#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800158#ifdef CONFIG_NET_POLL_CONTROLLER
159/* for netdump / net console */
160static void igb_netpoll(struct net_device *);
161#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800162#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000163static unsigned int max_vfs = 0;
164module_param(max_vfs, uint, 0);
165MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
166 "per physical function");
167#endif /* CONFIG_PCI_IOV */
168
Auke Kok9d5c8242008-01-24 02:22:38 -0800169static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
170 pci_channel_state_t);
171static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
172static void igb_io_resume(struct pci_dev *);
173
174static struct pci_error_handlers igb_err_handler = {
175 .error_detected = igb_io_error_detected,
176 .slot_reset = igb_io_slot_reset,
177 .resume = igb_io_resume,
178};
179
180
181static struct pci_driver igb_driver = {
182 .name = igb_driver_name,
183 .id_table = igb_pci_tbl,
184 .probe = igb_probe,
185 .remove = __devexit_p(igb_remove),
186#ifdef CONFIG_PM
187 /* Power Managment Hooks */
188 .suspend = igb_suspend,
189 .resume = igb_resume,
190#endif
191 .shutdown = igb_shutdown,
192 .err_handler = &igb_err_handler
193};
194
195MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
196MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
197MODULE_LICENSE("GPL");
198MODULE_VERSION(DRV_VERSION);
199
Patrick Ohly38c845c2009-02-12 05:03:41 +0000200/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000201 * igb_read_clock - read raw cycle counter (to be used by time counter)
202 */
203static cycle_t igb_read_clock(const struct cyclecounter *tc)
204{
205 struct igb_adapter *adapter =
206 container_of(tc, struct igb_adapter, cycles);
207 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000208 u64 stamp = 0;
209 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000210
Alexander Duyck55cac242009-11-19 12:42:21 +0000211 /*
212 * The timestamp latches on lowest register read. For the 82580
213 * the lowest register is SYSTIMR instead of SYSTIML. However we never
214 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
215 */
216 if (hw->mac.type == e1000_82580) {
217 stamp = rd32(E1000_SYSTIMR) >> 8;
218 shift = IGB_82580_TSYNC_SHIFT;
219 }
220
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000221 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
222 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000223 return stamp;
224}
225
Auke Kok9d5c8242008-01-24 02:22:38 -0800226#ifdef DEBUG
227/**
228 * igb_get_hw_dev_name - return device name string
229 * used by hardware layer to print debugging information
230 **/
231char *igb_get_hw_dev_name(struct e1000_hw *hw)
232{
233 struct igb_adapter *adapter = hw->back;
234 return adapter->netdev->name;
235}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000236
237/**
238 * igb_get_time_str - format current NIC and system time as string
239 */
240static char *igb_get_time_str(struct igb_adapter *adapter,
241 char buffer[160])
242{
243 cycle_t hw = adapter->cycles.read(&adapter->cycles);
244 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec sys;
246 struct timespec delta;
247 getnstimeofday(&sys);
248
249 delta = timespec_sub(nic, sys);
250
251 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000252 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000254 (long)nic.tv_sec, nic.tv_nsec,
255 (long)sys.tv_sec, sys.tv_nsec,
256 (long)delta.tv_sec, delta.tv_nsec);
257
258 return buffer;
259}
Auke Kok9d5c8242008-01-24 02:22:38 -0800260#endif
261
262/**
263 * igb_init_module - Driver Registration Routine
264 *
265 * igb_init_module is the first routine called when the driver is
266 * loaded. All it does is register with the PCI subsystem.
267 **/
268static int __init igb_init_module(void)
269{
270 int ret;
271 printk(KERN_INFO "%s - version %s\n",
272 igb_driver_string, igb_driver_version);
273
274 printk(KERN_INFO "%s\n", igb_copyright);
275
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700276#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700277 dca_register_notify(&dca_notifier);
278#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800279 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800280 return ret;
281}
282
283module_init(igb_init_module);
284
285/**
286 * igb_exit_module - Driver Exit Cleanup Routine
287 *
288 * igb_exit_module is called just before the driver is removed
289 * from memory.
290 **/
291static void __exit igb_exit_module(void)
292{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700293#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700294 dca_unregister_notify(&dca_notifier);
295#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800296 pci_unregister_driver(&igb_driver);
297}
298
299module_exit(igb_exit_module);
300
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800301#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
302/**
303 * igb_cache_ring_register - Descriptor ring to register mapping
304 * @adapter: board private structure to initialize
305 *
306 * Once we know the feature-set enabled for the device, we'll cache
307 * the register offset the descriptor ring is assigned to.
308 **/
309static void igb_cache_ring_register(struct igb_adapter *adapter)
310{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000311 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000312 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800313
314 switch (adapter->hw.mac.type) {
315 case e1000_82576:
316 /* The queues are allocated for virtualization such that VF 0
317 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
318 * In order to avoid collision we start at the first free queue
319 * and continue consuming queues in the same sequence
320 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000321 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000322 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000323 adapter->rx_ring[i]->reg_idx = rbase_offset +
324 Q_IDX_82576(i);
Alexander Duycka99955f2009-11-12 18:37:19 +0000325 for (; j < adapter->rss_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000326 adapter->tx_ring[j]->reg_idx = rbase_offset +
327 Q_IDX_82576(j);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000328 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800329 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000330 case e1000_82580:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800331 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000332 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000333 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000334 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000335 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800336 break;
337 }
338}
339
Alexander Duyck047e0032009-10-27 15:49:27 +0000340static void igb_free_queues(struct igb_adapter *adapter)
341{
Alexander Duyck3025a442010-02-17 01:02:39 +0000342 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000343
Alexander Duyck3025a442010-02-17 01:02:39 +0000344 for (i = 0; i < adapter->num_tx_queues; i++) {
345 kfree(adapter->tx_ring[i]);
346 adapter->tx_ring[i] = NULL;
347 }
348 for (i = 0; i < adapter->num_rx_queues; i++) {
349 kfree(adapter->rx_ring[i]);
350 adapter->rx_ring[i] = NULL;
351 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000352 adapter->num_rx_queues = 0;
353 adapter->num_tx_queues = 0;
354}
355
Auke Kok9d5c8242008-01-24 02:22:38 -0800356/**
357 * igb_alloc_queues - Allocate memory for all rings
358 * @adapter: board private structure to initialize
359 *
360 * We allocate one ring per queue at run-time since we don't know the
361 * number of queues at compile-time.
362 **/
363static int igb_alloc_queues(struct igb_adapter *adapter)
364{
Alexander Duyck3025a442010-02-17 01:02:39 +0000365 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800366 int i;
367
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700368 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000369 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
370 if (!ring)
371 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800372 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700373 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000374 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000375 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000376 /* For 82575, context index must be unique per ring. */
377 if (adapter->hw.mac.type == e1000_82575)
378 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000379 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700380 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000381
Auke Kok9d5c8242008-01-24 02:22:38 -0800382 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000383 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
384 if (!ring)
385 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800386 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700387 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000388 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000389 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000390 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000391 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
392 /* set flag indicating ring supports SCTP checksum offload */
393 if (adapter->hw.mac.type >= e1000_82576)
394 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000395 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800396 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800397
398 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000399
Auke Kok9d5c8242008-01-24 02:22:38 -0800400 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800401
Alexander Duyck047e0032009-10-27 15:49:27 +0000402err:
403 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700404
Alexander Duyck047e0032009-10-27 15:49:27 +0000405 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700406}
407
Auke Kok9d5c8242008-01-24 02:22:38 -0800408#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000409static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800410{
411 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000412 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800413 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700414 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000415 int rx_queue = IGB_N0_QUEUE;
416 int tx_queue = IGB_N0_QUEUE;
417
418 if (q_vector->rx_ring)
419 rx_queue = q_vector->rx_ring->reg_idx;
420 if (q_vector->tx_ring)
421 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700422
423 switch (hw->mac.type) {
424 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800425 /* The 82575 assigns vectors using a bitmask, which matches the
426 bitmask for the EICR/EIMS/EIMC registers. To assign one
427 or more queues to a vector, we write the appropriate bits
428 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000429 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800430 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000431 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800432 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000433 if (!adapter->msix_entries && msix_vector == 0)
434 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800435 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000436 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700437 break;
438 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800439 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700440 Each queue has a single entry in the table to which we write
441 a vector number along with a "valid" bit. Sadly, the layout
442 of the table is somewhat counterintuitive. */
443 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000444 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700445 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000446 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800447 /* vector goes into low byte of register */
448 ivar = ivar & 0xFFFFFF00;
449 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000450 } else {
451 /* vector goes into third byte of register */
452 ivar = ivar & 0xFF00FFFF;
453 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700454 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700455 array_wr32(E1000_IVAR0, index, ivar);
456 }
457 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000458 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700459 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000460 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800461 /* vector goes into second byte of register */
462 ivar = ivar & 0xFFFF00FF;
463 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000464 } else {
465 /* vector goes into high byte of register */
466 ivar = ivar & 0x00FFFFFF;
467 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700468 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700469 array_wr32(E1000_IVAR0, index, ivar);
470 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000471 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700472 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000473 case e1000_82580:
474 /* 82580 uses the same table-based approach as 82576 but has fewer
475 entries as a result we carry over for queues greater than 4. */
476 if (rx_queue > IGB_N0_QUEUE) {
477 index = (rx_queue >> 1);
478 ivar = array_rd32(E1000_IVAR0, index);
479 if (rx_queue & 0x1) {
480 /* vector goes into third byte of register */
481 ivar = ivar & 0xFF00FFFF;
482 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
483 } else {
484 /* vector goes into low byte of register */
485 ivar = ivar & 0xFFFFFF00;
486 ivar |= msix_vector | E1000_IVAR_VALID;
487 }
488 array_wr32(E1000_IVAR0, index, ivar);
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 index = (tx_queue >> 1);
492 ivar = array_rd32(E1000_IVAR0, index);
493 if (tx_queue & 0x1) {
494 /* vector goes into high byte of register */
495 ivar = ivar & 0x00FFFFFF;
496 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
497 } else {
498 /* vector goes into second byte of register */
499 ivar = ivar & 0xFFFF00FF;
500 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
501 }
502 array_wr32(E1000_IVAR0, index, ivar);
503 }
504 q_vector->eims_value = 1 << msix_vector;
505 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700506 default:
507 BUG();
508 break;
509 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000510
511 /* add q_vector eims value to global eims_enable_mask */
512 adapter->eims_enable_mask |= q_vector->eims_value;
513
514 /* configure q_vector to set itr on first interrupt */
515 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800516}
517
518/**
519 * igb_configure_msix - Configure MSI-X hardware
520 *
521 * igb_configure_msix sets up the hardware to properly
522 * generate MSI-X interrupts.
523 **/
524static void igb_configure_msix(struct igb_adapter *adapter)
525{
526 u32 tmp;
527 int i, vector = 0;
528 struct e1000_hw *hw = &adapter->hw;
529
530 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800531
532 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700533 switch (hw->mac.type) {
534 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800535 tmp = rd32(E1000_CTRL_EXT);
536 /* enable MSI-X PBA support*/
537 tmp |= E1000_CTRL_EXT_PBA_CLR;
538
539 /* Auto-Mask interrupts upon ICR read. */
540 tmp |= E1000_CTRL_EXT_EIAME;
541 tmp |= E1000_CTRL_EXT_IRCA;
542
543 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000544
545 /* enable msix_other interrupt */
546 array_wr32(E1000_MSIXBM(0), vector++,
547 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700548 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800549
Alexander Duyck2d064c02008-07-08 15:10:12 -0700550 break;
551
552 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000553 case e1000_82580:
Alexander Duyck047e0032009-10-27 15:49:27 +0000554 /* Turn on MSI-X capability first, or our settings
555 * won't stick. And it will take days to debug. */
556 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
557 E1000_GPIE_PBA | E1000_GPIE_EIAME |
558 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700559
Alexander Duyck047e0032009-10-27 15:49:27 +0000560 /* enable msix_other interrupt */
561 adapter->eims_other = 1 << vector;
562 tmp = (vector++ | E1000_IVAR_VALID) << 8;
563
564 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700565 break;
566 default:
567 /* do nothing, since nothing else supports MSI-X */
568 break;
569 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000570
571 adapter->eims_enable_mask |= adapter->eims_other;
572
Alexander Duyck26b39272010-02-17 01:00:41 +0000573 for (i = 0; i < adapter->num_q_vectors; i++)
574 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000575
Auke Kok9d5c8242008-01-24 02:22:38 -0800576 wrfl();
577}
578
579/**
580 * igb_request_msix - Initialize MSI-X interrupts
581 *
582 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
583 * kernel.
584 **/
585static int igb_request_msix(struct igb_adapter *adapter)
586{
587 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000588 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800589 int i, err = 0, vector = 0;
590
Auke Kok9d5c8242008-01-24 02:22:38 -0800591 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800592 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800593 if (err)
594 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000595 vector++;
596
597 for (i = 0; i < adapter->num_q_vectors; i++) {
598 struct igb_q_vector *q_vector = adapter->q_vector[i];
599
600 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
601
602 if (q_vector->rx_ring && q_vector->tx_ring)
603 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
604 q_vector->rx_ring->queue_index);
605 else if (q_vector->tx_ring)
606 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
607 q_vector->tx_ring->queue_index);
608 else if (q_vector->rx_ring)
609 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
610 q_vector->rx_ring->queue_index);
611 else
612 sprintf(q_vector->name, "%s-unused", netdev->name);
613
614 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800615 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000616 q_vector);
617 if (err)
618 goto out;
619 vector++;
620 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800621
Auke Kok9d5c8242008-01-24 02:22:38 -0800622 igb_configure_msix(adapter);
623 return 0;
624out:
625 return err;
626}
627
628static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
629{
630 if (adapter->msix_entries) {
631 pci_disable_msix(adapter->pdev);
632 kfree(adapter->msix_entries);
633 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000634 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800635 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000636 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800637}
638
Alexander Duyck047e0032009-10-27 15:49:27 +0000639/**
640 * igb_free_q_vectors - Free memory allocated for interrupt vectors
641 * @adapter: board private structure to initialize
642 *
643 * This function frees the memory allocated to the q_vectors. In addition if
644 * NAPI is enabled it will delete any references to the NAPI struct prior
645 * to freeing the q_vector.
646 **/
647static void igb_free_q_vectors(struct igb_adapter *adapter)
648{
649 int v_idx;
650
651 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
652 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
653 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000654 if (!q_vector)
655 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000656 netif_napi_del(&q_vector->napi);
657 kfree(q_vector);
658 }
659 adapter->num_q_vectors = 0;
660}
661
662/**
663 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
664 *
665 * This function resets the device so that it has 0 rx queues, tx queues, and
666 * MSI-X interrupts allocated.
667 */
668static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
669{
670 igb_free_queues(adapter);
671 igb_free_q_vectors(adapter);
672 igb_reset_interrupt_capability(adapter);
673}
Auke Kok9d5c8242008-01-24 02:22:38 -0800674
675/**
676 * igb_set_interrupt_capability - set MSI or MSI-X if supported
677 *
678 * Attempt to configure interrupts using the best available
679 * capabilities of the hardware and kernel.
680 **/
681static void igb_set_interrupt_capability(struct igb_adapter *adapter)
682{
683 int err;
684 int numvecs, i;
685
Alexander Duyck83b71802009-02-06 23:15:45 +0000686 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +0000687 adapter->num_rx_queues = adapter->rss_queues;
688 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +0000689
Alexander Duyck047e0032009-10-27 15:49:27 +0000690 /* start with one vector for every rx queue */
691 numvecs = adapter->num_rx_queues;
692
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800693 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +0000694 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
695 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +0000696
697 /* store the number of vectors reserved for queues */
698 adapter->num_q_vectors = numvecs;
699
700 /* add 1 vector for link status interrupts */
701 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800702 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
703 GFP_KERNEL);
704 if (!adapter->msix_entries)
705 goto msi_only;
706
707 for (i = 0; i < numvecs; i++)
708 adapter->msix_entries[i].entry = i;
709
710 err = pci_enable_msix(adapter->pdev,
711 adapter->msix_entries,
712 numvecs);
713 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700714 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800715
716 igb_reset_interrupt_capability(adapter);
717
718 /* If we can't do MSI-X, try MSI */
719msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000720#ifdef CONFIG_PCI_IOV
721 /* disable SR-IOV for non MSI-X configurations */
722 if (adapter->vf_data) {
723 struct e1000_hw *hw = &adapter->hw;
724 /* disable iov and allow time for transactions to clear */
725 pci_disable_sriov(adapter->pdev);
726 msleep(500);
727
728 kfree(adapter->vf_data);
729 adapter->vf_data = NULL;
730 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
731 msleep(100);
732 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
733 }
734#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000735 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +0000736 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000737 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800738 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700739 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000740 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800741 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700742 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700743out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700744 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700745 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800746 return;
747}
748
749/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000750 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
751 * @adapter: board private structure to initialize
752 *
753 * We allocate one q_vector per queue interrupt. If allocation fails we
754 * return -ENOMEM.
755 **/
756static int igb_alloc_q_vectors(struct igb_adapter *adapter)
757{
758 struct igb_q_vector *q_vector;
759 struct e1000_hw *hw = &adapter->hw;
760 int v_idx;
761
762 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
763 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
764 if (!q_vector)
765 goto err_out;
766 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
768 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000769 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
770 adapter->q_vector[v_idx] = q_vector;
771 }
772 return 0;
773
774err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000775 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000776 return -ENOMEM;
777}
778
779static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
780 int ring_idx, int v_idx)
781{
Alexander Duyck3025a442010-02-17 01:02:39 +0000782 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000783
Alexander Duyck3025a442010-02-17 01:02:39 +0000784 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000785 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000786 q_vector->itr_val = adapter->rx_itr_setting;
787 if (q_vector->itr_val && q_vector->itr_val <= 3)
788 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000789}
790
791static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
792 int ring_idx, int v_idx)
793{
Alexander Duyck3025a442010-02-17 01:02:39 +0000794 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000795
Alexander Duyck3025a442010-02-17 01:02:39 +0000796 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000797 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000798 q_vector->itr_val = adapter->tx_itr_setting;
799 if (q_vector->itr_val && q_vector->itr_val <= 3)
800 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000801}
802
803/**
804 * igb_map_ring_to_vector - maps allocated queues to vectors
805 *
806 * This function maps the recently allocated queues to vectors.
807 **/
808static int igb_map_ring_to_vector(struct igb_adapter *adapter)
809{
810 int i;
811 int v_idx = 0;
812
813 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
814 (adapter->num_q_vectors < adapter->num_tx_queues))
815 return -ENOMEM;
816
817 if (adapter->num_q_vectors >=
818 (adapter->num_rx_queues + adapter->num_tx_queues)) {
819 for (i = 0; i < adapter->num_rx_queues; i++)
820 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
821 for (i = 0; i < adapter->num_tx_queues; i++)
822 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
823 } else {
824 for (i = 0; i < adapter->num_rx_queues; i++) {
825 if (i < adapter->num_tx_queues)
826 igb_map_tx_ring_to_vector(adapter, i, v_idx);
827 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
828 }
829 for (; i < adapter->num_tx_queues; i++)
830 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
831 }
832 return 0;
833}
834
835/**
836 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
837 *
838 * This function initializes the interrupts and allocates all of the queues.
839 **/
840static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
841{
842 struct pci_dev *pdev = adapter->pdev;
843 int err;
844
845 igb_set_interrupt_capability(adapter);
846
847 err = igb_alloc_q_vectors(adapter);
848 if (err) {
849 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
850 goto err_alloc_q_vectors;
851 }
852
853 err = igb_alloc_queues(adapter);
854 if (err) {
855 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
856 goto err_alloc_queues;
857 }
858
859 err = igb_map_ring_to_vector(adapter);
860 if (err) {
861 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
862 goto err_map_queues;
863 }
864
865
866 return 0;
867err_map_queues:
868 igb_free_queues(adapter);
869err_alloc_queues:
870 igb_free_q_vectors(adapter);
871err_alloc_q_vectors:
872 igb_reset_interrupt_capability(adapter);
873 return err;
874}
875
876/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800877 * igb_request_irq - initialize interrupts
878 *
879 * Attempts to configure interrupts using the best available
880 * capabilities of the hardware and kernel.
881 **/
882static int igb_request_irq(struct igb_adapter *adapter)
883{
884 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000885 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800886 int err = 0;
887
888 if (adapter->msix_entries) {
889 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700890 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800891 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800892 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000893 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800894 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700895 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800896 igb_free_all_tx_resources(adapter);
897 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000898 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800899 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000900 adapter->num_q_vectors = 1;
901 err = igb_alloc_q_vectors(adapter);
902 if (err) {
903 dev_err(&pdev->dev,
904 "Unable to allocate memory for vectors\n");
905 goto request_done;
906 }
907 err = igb_alloc_queues(adapter);
908 if (err) {
909 dev_err(&pdev->dev,
910 "Unable to allocate memory for queues\n");
911 igb_free_q_vectors(adapter);
912 goto request_done;
913 }
914 igb_setup_all_tx_resources(adapter);
915 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700916 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000917 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800918 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700919
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700920 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -0800921 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000922 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800923 if (!err)
924 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000925
Auke Kok9d5c8242008-01-24 02:22:38 -0800926 /* fall back to legacy interrupts */
927 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700928 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800929 }
930
Joe Perchesa0607fd2009-11-18 23:29:17 -0800931 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000932 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800933
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800934 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800935 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
936 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800937
938request_done:
939 return err;
940}
941
942static void igb_free_irq(struct igb_adapter *adapter)
943{
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 if (adapter->msix_entries) {
945 int vector = 0, i;
946
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800948
Alexander Duyck047e0032009-10-27 15:49:27 +0000949 for (i = 0; i < adapter->num_q_vectors; i++) {
950 struct igb_q_vector *q_vector = adapter->q_vector[i];
951 free_irq(adapter->msix_entries[vector++].vector,
952 q_vector);
953 }
954 } else {
955 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800956 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800957}
958
959/**
960 * igb_irq_disable - Mask off interrupt generation on the NIC
961 * @adapter: board private structure
962 **/
963static void igb_irq_disable(struct igb_adapter *adapter)
964{
965 struct e1000_hw *hw = &adapter->hw;
966
Alexander Duyck25568a52009-10-27 23:49:59 +0000967 /*
968 * we need to be careful when disabling interrupts. The VFs are also
969 * mapped into these registers and so clearing the bits can cause
970 * issues on the VF drivers so we only need to clear what we set
971 */
Auke Kok9d5c8242008-01-24 02:22:38 -0800972 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000973 u32 regval = rd32(E1000_EIAM);
974 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
975 wr32(E1000_EIMC, adapter->eims_enable_mask);
976 regval = rd32(E1000_EIAC);
977 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800978 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700979
980 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800981 wr32(E1000_IMC, ~0);
982 wrfl();
983 synchronize_irq(adapter->pdev->irq);
984}
985
986/**
987 * igb_irq_enable - Enable default interrupt generation settings
988 * @adapter: board private structure
989 **/
990static void igb_irq_enable(struct igb_adapter *adapter)
991{
992 struct e1000_hw *hw = &adapter->hw;
993
994 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +0000995 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000996 u32 regval = rd32(E1000_EIAC);
997 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
998 regval = rd32(E1000_EIAM);
999 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001000 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001001 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001002 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001003 ims |= E1000_IMS_VMMB;
1004 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001005 if (adapter->hw.mac.type == e1000_82580)
1006 ims |= E1000_IMS_DRSTA;
1007
Alexander Duyck25568a52009-10-27 23:49:59 +00001008 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001009 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001010 wr32(E1000_IMS, IMS_ENABLE_MASK |
1011 E1000_IMS_DRSTA);
1012 wr32(E1000_IAM, IMS_ENABLE_MASK |
1013 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001014 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001015}
1016
1017static void igb_update_mng_vlan(struct igb_adapter *adapter)
1018{
Alexander Duyck51466232009-10-27 23:47:35 +00001019 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001020 u16 vid = adapter->hw.mng_cookie.vlan_id;
1021 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001022
Alexander Duyck51466232009-10-27 23:47:35 +00001023 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1024 /* add VID to filter table */
1025 igb_vfta_set(hw, vid, true);
1026 adapter->mng_vlan_id = vid;
1027 } else {
1028 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1029 }
1030
1031 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1032 (vid != old_vid) &&
1033 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1034 /* remove VID from filter table */
1035 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001036 }
1037}
1038
1039/**
1040 * igb_release_hw_control - release control of the h/w to f/w
1041 * @adapter: address of board private structure
1042 *
1043 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1044 * For ASF and Pass Through versions of f/w this means that the
1045 * driver is no longer loaded.
1046 *
1047 **/
1048static void igb_release_hw_control(struct igb_adapter *adapter)
1049{
1050 struct e1000_hw *hw = &adapter->hw;
1051 u32 ctrl_ext;
1052
1053 /* Let firmware take over control of h/w */
1054 ctrl_ext = rd32(E1000_CTRL_EXT);
1055 wr32(E1000_CTRL_EXT,
1056 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1057}
1058
Auke Kok9d5c8242008-01-24 02:22:38 -08001059/**
1060 * igb_get_hw_control - get control of the h/w from f/w
1061 * @adapter: address of board private structure
1062 *
1063 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1064 * For ASF and Pass Through versions of f/w this means that
1065 * the driver is loaded.
1066 *
1067 **/
1068static void igb_get_hw_control(struct igb_adapter *adapter)
1069{
1070 struct e1000_hw *hw = &adapter->hw;
1071 u32 ctrl_ext;
1072
1073 /* Let firmware know the driver has taken over */
1074 ctrl_ext = rd32(E1000_CTRL_EXT);
1075 wr32(E1000_CTRL_EXT,
1076 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1077}
1078
Auke Kok9d5c8242008-01-24 02:22:38 -08001079/**
1080 * igb_configure - configure the hardware for RX and TX
1081 * @adapter: private board structure
1082 **/
1083static void igb_configure(struct igb_adapter *adapter)
1084{
1085 struct net_device *netdev = adapter->netdev;
1086 int i;
1087
1088 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001089 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001090
1091 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001092
Alexander Duyck85b430b2009-10-27 15:50:29 +00001093 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001094 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001095 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001096
1097 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001098 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001099
1100 igb_rx_fifo_flush_82575(&adapter->hw);
1101
Alexander Duyckc493ea42009-03-20 00:16:50 +00001102 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001103 * at least 1 descriptor unused to make sure
1104 * next_to_use != next_to_clean */
1105 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001106 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001107 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001108 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001109}
1110
Nick Nunley88a268c2010-02-17 01:01:59 +00001111/**
1112 * igb_power_up_link - Power up the phy/serdes link
1113 * @adapter: address of board private structure
1114 **/
1115void igb_power_up_link(struct igb_adapter *adapter)
1116{
1117 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1118 igb_power_up_phy_copper(&adapter->hw);
1119 else
1120 igb_power_up_serdes_link_82575(&adapter->hw);
1121}
1122
1123/**
1124 * igb_power_down_link - Power down the phy/serdes link
1125 * @adapter: address of board private structure
1126 */
1127static void igb_power_down_link(struct igb_adapter *adapter)
1128{
1129 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1130 igb_power_down_phy_copper_82575(&adapter->hw);
1131 else
1132 igb_shutdown_serdes_link_82575(&adapter->hw);
1133}
Auke Kok9d5c8242008-01-24 02:22:38 -08001134
1135/**
1136 * igb_up - Open the interface and prepare it to handle traffic
1137 * @adapter: board private structure
1138 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001139int igb_up(struct igb_adapter *adapter)
1140{
1141 struct e1000_hw *hw = &adapter->hw;
1142 int i;
1143
1144 /* hardware has been reset, we need to reload some things */
1145 igb_configure(adapter);
1146
1147 clear_bit(__IGB_DOWN, &adapter->state);
1148
Alexander Duyck047e0032009-10-27 15:49:27 +00001149 for (i = 0; i < adapter->num_q_vectors; i++) {
1150 struct igb_q_vector *q_vector = adapter->q_vector[i];
1151 napi_enable(&q_vector->napi);
1152 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001153 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001154 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001155 else
1156 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001157
1158 /* Clear any pending interrupts. */
1159 rd32(E1000_ICR);
1160 igb_irq_enable(adapter);
1161
Alexander Duyckd4960302009-10-27 15:53:45 +00001162 /* notify VFs that reset has been completed */
1163 if (adapter->vfs_allocated_count) {
1164 u32 reg_data = rd32(E1000_CTRL_EXT);
1165 reg_data |= E1000_CTRL_EXT_PFRSTD;
1166 wr32(E1000_CTRL_EXT, reg_data);
1167 }
1168
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001169 netif_tx_start_all_queues(adapter->netdev);
1170
Alexander Duyck25568a52009-10-27 23:49:59 +00001171 /* start the watchdog. */
1172 hw->mac.get_link_status = 1;
1173 schedule_work(&adapter->watchdog_task);
1174
Auke Kok9d5c8242008-01-24 02:22:38 -08001175 return 0;
1176}
1177
1178void igb_down(struct igb_adapter *adapter)
1179{
Auke Kok9d5c8242008-01-24 02:22:38 -08001180 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001181 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001182 u32 tctl, rctl;
1183 int i;
1184
1185 /* signal that we're down so the interrupt handler does not
1186 * reschedule our watchdog timer */
1187 set_bit(__IGB_DOWN, &adapter->state);
1188
1189 /* disable receives in the hardware */
1190 rctl = rd32(E1000_RCTL);
1191 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1192 /* flush and sleep below */
1193
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001194 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001195
1196 /* disable transmits in the hardware */
1197 tctl = rd32(E1000_TCTL);
1198 tctl &= ~E1000_TCTL_EN;
1199 wr32(E1000_TCTL, tctl);
1200 /* flush both disables and wait for them to finish */
1201 wrfl();
1202 msleep(10);
1203
Alexander Duyck047e0032009-10-27 15:49:27 +00001204 for (i = 0; i < adapter->num_q_vectors; i++) {
1205 struct igb_q_vector *q_vector = adapter->q_vector[i];
1206 napi_disable(&q_vector->napi);
1207 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001208
Auke Kok9d5c8242008-01-24 02:22:38 -08001209 igb_irq_disable(adapter);
1210
1211 del_timer_sync(&adapter->watchdog_timer);
1212 del_timer_sync(&adapter->phy_info_timer);
1213
Auke Kok9d5c8242008-01-24 02:22:38 -08001214 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001215
1216 /* record the stats before reset*/
1217 igb_update_stats(adapter);
1218
Auke Kok9d5c8242008-01-24 02:22:38 -08001219 adapter->link_speed = 0;
1220 adapter->link_duplex = 0;
1221
Jeff Kirsher30236822008-06-24 17:01:15 -07001222 if (!pci_channel_offline(adapter->pdev))
1223 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001224 igb_clean_all_tx_rings(adapter);
1225 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001226#ifdef CONFIG_IGB_DCA
1227
1228 /* since we reset the hardware DCA settings were cleared */
1229 igb_setup_dca(adapter);
1230#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001231}
1232
1233void igb_reinit_locked(struct igb_adapter *adapter)
1234{
1235 WARN_ON(in_interrupt());
1236 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1237 msleep(1);
1238 igb_down(adapter);
1239 igb_up(adapter);
1240 clear_bit(__IGB_RESETTING, &adapter->state);
1241}
1242
1243void igb_reset(struct igb_adapter *adapter)
1244{
Alexander Duyck090b1792009-10-27 23:51:55 +00001245 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001246 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001247 struct e1000_mac_info *mac = &hw->mac;
1248 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001249 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1250 u16 hwm;
1251
1252 /* Repartition Pba for greater than 9k mtu
1253 * To take effect CTRL.RST is required.
1254 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001255 switch (mac->type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001256 case e1000_82580:
1257 pba = rd32(E1000_RXPBS);
1258 pba = igb_rxpbs_adjust_82580(pba);
1259 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001260 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001261 pba = rd32(E1000_RXPBS);
1262 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001263 break;
1264 case e1000_82575:
1265 default:
1266 pba = E1000_PBA_34K;
1267 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001268 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001269
Alexander Duyck2d064c02008-07-08 15:10:12 -07001270 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1271 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001272 /* adjust PBA for jumbo frames */
1273 wr32(E1000_PBA, pba);
1274
1275 /* To maintain wire speed transmits, the Tx FIFO should be
1276 * large enough to accommodate two full transmit packets,
1277 * rounded up to the next 1KB and expressed in KB. Likewise,
1278 * the Rx FIFO should be large enough to accommodate at least
1279 * one full receive packet and is similarly rounded up and
1280 * expressed in KB. */
1281 pba = rd32(E1000_PBA);
1282 /* upper 16 bits has Tx packet buffer allocation size in KB */
1283 tx_space = pba >> 16;
1284 /* lower 16 bits has Rx packet buffer allocation size in KB */
1285 pba &= 0xffff;
1286 /* the tx fifo also stores 16 bytes of information about the tx
1287 * but don't include ethernet FCS because hardware appends it */
1288 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001289 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001290 ETH_FCS_LEN) * 2;
1291 min_tx_space = ALIGN(min_tx_space, 1024);
1292 min_tx_space >>= 10;
1293 /* software strips receive CRC, so leave room for it */
1294 min_rx_space = adapter->max_frame_size;
1295 min_rx_space = ALIGN(min_rx_space, 1024);
1296 min_rx_space >>= 10;
1297
1298 /* If current Tx allocation is less than the min Tx FIFO size,
1299 * and the min Tx FIFO size is less than the current Rx FIFO
1300 * allocation, take space away from current Rx allocation */
1301 if (tx_space < min_tx_space &&
1302 ((min_tx_space - tx_space) < pba)) {
1303 pba = pba - (min_tx_space - tx_space);
1304
1305 /* if short on rx space, rx wins and must trump tx
1306 * adjustment */
1307 if (pba < min_rx_space)
1308 pba = min_rx_space;
1309 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001310 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001312
1313 /* flow control settings */
1314 /* The high water mark must be low enough to fit one full frame
1315 * (or the size used for early receive) above it in the Rx FIFO.
1316 * Set it to the lower of:
1317 * - 90% of the Rx FIFO size, or
1318 * - the full Rx FIFO size minus one full frame */
1319 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001320 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001321
Alexander Duyckd405ea32009-12-23 13:21:27 +00001322 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1323 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001324 fc->pause_time = 0xFFFF;
1325 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001326 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001328 /* disable receive for all VFs and wait one second */
1329 if (adapter->vfs_allocated_count) {
1330 int i;
1331 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001332 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001333
1334 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001335 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001336
1337 /* disable transmits and receives */
1338 wr32(E1000_VFRE, 0);
1339 wr32(E1000_VFTE, 0);
1340 }
1341
Auke Kok9d5c8242008-01-24 02:22:38 -08001342 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001343 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001344 wr32(E1000_WUC, 0);
1345
Alexander Duyck330a6d62009-10-27 23:51:35 +00001346 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001347 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001348
Alexander Duyck55cac242009-11-19 12:42:21 +00001349 if (hw->mac.type == e1000_82580) {
1350 u32 reg = rd32(E1000_PCIEMISC);
1351 wr32(E1000_PCIEMISC,
1352 reg & ~E1000_PCIEMISC_LX_DECISION);
1353 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001354 if (!netif_running(adapter->netdev))
1355 igb_power_down_link(adapter);
1356
Auke Kok9d5c8242008-01-24 02:22:38 -08001357 igb_update_mng_vlan(adapter);
1358
1359 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1360 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1361
Alexander Duyck330a6d62009-10-27 23:51:35 +00001362 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001363}
1364
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001365static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001366 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001367 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001368 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001369 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001370 .ndo_set_rx_mode = igb_set_rx_mode,
1371 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001372 .ndo_set_mac_address = igb_set_mac,
1373 .ndo_change_mtu = igb_change_mtu,
1374 .ndo_do_ioctl = igb_ioctl,
1375 .ndo_tx_timeout = igb_tx_timeout,
1376 .ndo_validate_addr = eth_validate_addr,
1377 .ndo_vlan_rx_register = igb_vlan_rx_register,
1378 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1379 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001380 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1381 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1382 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1383 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001384#ifdef CONFIG_NET_POLL_CONTROLLER
1385 .ndo_poll_controller = igb_netpoll,
1386#endif
1387};
1388
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001389/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001390 * igb_probe - Device Initialization Routine
1391 * @pdev: PCI device information struct
1392 * @ent: entry in igb_pci_tbl
1393 *
1394 * Returns 0 on success, negative on failure
1395 *
1396 * igb_probe initializes an adapter identified by a pci_dev structure.
1397 * The OS initialization, configuring of the adapter private structure,
1398 * and a hardware reset occur.
1399 **/
1400static int __devinit igb_probe(struct pci_dev *pdev,
1401 const struct pci_device_id *ent)
1402{
1403 struct net_device *netdev;
1404 struct igb_adapter *adapter;
1405 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001406 u16 eeprom_data = 0;
1407 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001408 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1409 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001410 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001411 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1412 u32 part_num;
1413
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001414 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001415 if (err)
1416 return err;
1417
1418 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001420 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001421 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001422 if (!err)
1423 pci_using_dac = 1;
1424 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001425 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001426 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001427 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001428 if (err) {
1429 dev_err(&pdev->dev, "No usable DMA "
1430 "configuration, aborting\n");
1431 goto err_dma;
1432 }
1433 }
1434 }
1435
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001436 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1437 IORESOURCE_MEM),
1438 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001439 if (err)
1440 goto err_pci_reg;
1441
Frans Pop19d5afd2009-10-02 10:04:12 -07001442 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001443
Auke Kok9d5c8242008-01-24 02:22:38 -08001444 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001445 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001446
1447 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001448 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1449 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001450 if (!netdev)
1451 goto err_alloc_etherdev;
1452
1453 SET_NETDEV_DEV(netdev, &pdev->dev);
1454
1455 pci_set_drvdata(pdev, netdev);
1456 adapter = netdev_priv(netdev);
1457 adapter->netdev = netdev;
1458 adapter->pdev = pdev;
1459 hw = &adapter->hw;
1460 hw->back = adapter;
1461 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1462
1463 mmio_start = pci_resource_start(pdev, 0);
1464 mmio_len = pci_resource_len(pdev, 0);
1465
1466 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001467 hw->hw_addr = ioremap(mmio_start, mmio_len);
1468 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001469 goto err_ioremap;
1470
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001471 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001473 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001474
1475 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1476
1477 netdev->mem_start = mmio_start;
1478 netdev->mem_end = mmio_start + mmio_len;
1479
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 /* PCI config space info */
1481 hw->vendor_id = pdev->vendor;
1482 hw->device_id = pdev->device;
1483 hw->revision_id = pdev->revision;
1484 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1485 hw->subsystem_device_id = pdev->subsystem_device;
1486
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 /* Copy the default MAC, PHY and NVM function pointers */
1488 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1489 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1490 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1491 /* Initialize skew-specific constants */
1492 err = ei->get_invariants(hw);
1493 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001494 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001495
Alexander Duyck450c87c2009-02-06 23:22:11 +00001496 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001497 err = igb_sw_init(adapter);
1498 if (err)
1499 goto err_sw_init;
1500
1501 igb_get_bus_info_pcie(hw);
1502
1503 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001504
1505 /* Copper options */
1506 if (hw->phy.media_type == e1000_media_type_copper) {
1507 hw->phy.mdix = AUTO_ALL_MODES;
1508 hw->phy.disable_polarity_correction = false;
1509 hw->phy.ms_type = e1000_ms_hw_default;
1510 }
1511
1512 if (igb_check_reset_block(hw))
1513 dev_info(&pdev->dev,
1514 "PHY reset is blocked due to SOL/IDER session.\n");
1515
1516 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001517 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001518 NETIF_F_HW_VLAN_TX |
1519 NETIF_F_HW_VLAN_RX |
1520 NETIF_F_HW_VLAN_FILTER;
1521
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001522 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001523 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001524 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001525 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001526
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001527 netdev->vlan_features |= NETIF_F_TSO;
1528 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001529 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001530 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001531 netdev->vlan_features |= NETIF_F_SG;
1532
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 if (pci_using_dac)
1534 netdev->features |= NETIF_F_HIGHDMA;
1535
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001536 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001537 netdev->features |= NETIF_F_SCTP_CSUM;
1538
Alexander Duyck330a6d62009-10-27 23:51:35 +00001539 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001540
1541 /* before reading the NVM, reset the controller to put the device in a
1542 * known good starting state */
1543 hw->mac.ops.reset_hw(hw);
1544
1545 /* make sure the NVM is good */
1546 if (igb_validate_nvm_checksum(hw) < 0) {
1547 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1548 err = -EIO;
1549 goto err_eeprom;
1550 }
1551
1552 /* copy the MAC address out of the NVM */
1553 if (hw->mac.ops.read_mac_addr(hw))
1554 dev_err(&pdev->dev, "NVM Read Error\n");
1555
1556 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1557 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1558
1559 if (!is_valid_ether_addr(netdev->perm_addr)) {
1560 dev_err(&pdev->dev, "Invalid MAC Address\n");
1561 err = -EIO;
1562 goto err_eeprom;
1563 }
1564
Alexander Duyck0e340482009-03-20 00:17:08 +00001565 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1566 (unsigned long) adapter);
1567 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1568 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001569
1570 INIT_WORK(&adapter->reset_task, igb_reset_task);
1571 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1572
Alexander Duyck450c87c2009-02-06 23:22:11 +00001573 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001574 adapter->fc_autoneg = true;
1575 hw->mac.autoneg = true;
1576 hw->phy.autoneg_advertised = 0x2f;
1577
Alexander Duyck0cce1192009-07-23 18:10:24 +00001578 hw->fc.requested_mode = e1000_fc_default;
1579 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001580
Auke Kok9d5c8242008-01-24 02:22:38 -08001581 igb_validate_mdi_setting(hw);
1582
Auke Kok9d5c8242008-01-24 02:22:38 -08001583 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1584 * enable the ACPI Magic Packet filter
1585 */
1586
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001587 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001588 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001589 else if (hw->mac.type == e1000_82580)
1590 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1591 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1592 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001593 else if (hw->bus.func == 1)
1594 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001595
1596 if (eeprom_data & eeprom_apme_mask)
1597 adapter->eeprom_wol |= E1000_WUFC_MAG;
1598
1599 /* now that we have the eeprom settings, apply the special cases where
1600 * the eeprom may be wrong or the board simply won't support wake on
1601 * lan on a particular port */
1602 switch (pdev->device) {
1603 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1604 adapter->eeprom_wol = 0;
1605 break;
1606 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001607 case E1000_DEV_ID_82576_FIBER:
1608 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001609 /* Wake events only supported on port A for dual fiber
1610 * regardless of eeprom setting */
1611 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1612 adapter->eeprom_wol = 0;
1613 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001614 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00001615 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001616 /* if quad port adapter, disable WoL on all but port A */
1617 if (global_quad_port_a != 0)
1618 adapter->eeprom_wol = 0;
1619 else
1620 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1621 /* Reset for multiple quad port adapters */
1622 if (++global_quad_port_a == 4)
1623 global_quad_port_a = 0;
1624 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001625 }
1626
1627 /* initialize the wol settings based on the eeprom settings */
1628 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001629 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001630
1631 /* reset the hardware with the new settings */
1632 igb_reset(adapter);
1633
1634 /* let the f/w know that the h/w is now under the control of the
1635 * driver. */
1636 igb_get_hw_control(adapter);
1637
Auke Kok9d5c8242008-01-24 02:22:38 -08001638 strcpy(netdev->name, "eth%d");
1639 err = register_netdev(netdev);
1640 if (err)
1641 goto err_register;
1642
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001643 /* carrier off reporting is important to ethtool even BEFORE open */
1644 netif_carrier_off(netdev);
1645
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001646#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001647 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001648 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001649 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001650 igb_setup_dca(adapter);
1651 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001652
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001653#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001654 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1655 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001656 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001657 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001658 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1659 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001660 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1661 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1662 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1663 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001664 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001665
1666 igb_read_part_num(hw, &part_num);
1667 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1668 (part_num >> 8), (part_num & 0xff));
1669
1670 dev_info(&pdev->dev,
1671 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1672 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001673 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001674 adapter->num_rx_queues, adapter->num_tx_queues);
1675
Auke Kok9d5c8242008-01-24 02:22:38 -08001676 return 0;
1677
1678err_register:
1679 igb_release_hw_control(adapter);
1680err_eeprom:
1681 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001682 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001683
1684 if (hw->flash_address)
1685 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001686err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001687 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001688 iounmap(hw->hw_addr);
1689err_ioremap:
1690 free_netdev(netdev);
1691err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00001692 pci_release_selected_regions(pdev,
1693 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001694err_pci_reg:
1695err_dma:
1696 pci_disable_device(pdev);
1697 return err;
1698}
1699
1700/**
1701 * igb_remove - Device Removal Routine
1702 * @pdev: PCI device information struct
1703 *
1704 * igb_remove is called by the PCI subsystem to alert the driver
1705 * that it should release a PCI device. The could be caused by a
1706 * Hot-Plug event, or because the driver is going to be removed from
1707 * memory.
1708 **/
1709static void __devexit igb_remove(struct pci_dev *pdev)
1710{
1711 struct net_device *netdev = pci_get_drvdata(pdev);
1712 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001713 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001714
1715 /* flush_scheduled work may reschedule our watchdog task, so
1716 * explicitly disable watchdog tasks from being rescheduled */
1717 set_bit(__IGB_DOWN, &adapter->state);
1718 del_timer_sync(&adapter->watchdog_timer);
1719 del_timer_sync(&adapter->phy_info_timer);
1720
1721 flush_scheduled_work();
1722
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001723#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001724 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001725 dev_info(&pdev->dev, "DCA disabled\n");
1726 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001727 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001728 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001729 }
1730#endif
1731
Auke Kok9d5c8242008-01-24 02:22:38 -08001732 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1733 * would have already happened in close and is redundant. */
1734 igb_release_hw_control(adapter);
1735
1736 unregister_netdev(netdev);
1737
Alexander Duyck047e0032009-10-27 15:49:27 +00001738 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001739
Alexander Duyck37680112009-02-19 20:40:30 -08001740#ifdef CONFIG_PCI_IOV
1741 /* reclaim resources allocated to VFs */
1742 if (adapter->vf_data) {
1743 /* disable iov and allow time for transactions to clear */
1744 pci_disable_sriov(pdev);
1745 msleep(500);
1746
1747 kfree(adapter->vf_data);
1748 adapter->vf_data = NULL;
1749 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1750 msleep(100);
1751 dev_info(&pdev->dev, "IOV Disabled\n");
1752 }
1753#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00001754
Alexander Duyck28b07592009-02-06 23:20:31 +00001755 iounmap(hw->hw_addr);
1756 if (hw->flash_address)
1757 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00001758 pci_release_selected_regions(pdev,
1759 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001760
1761 free_netdev(netdev);
1762
Frans Pop19d5afd2009-10-02 10:04:12 -07001763 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001764
Auke Kok9d5c8242008-01-24 02:22:38 -08001765 pci_disable_device(pdev);
1766}
1767
1768/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001769 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1770 * @adapter: board private structure to initialize
1771 *
1772 * This function initializes the vf specific data storage and then attempts to
1773 * allocate the VFs. The reason for ordering it this way is because it is much
1774 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1775 * the memory for the VFs.
1776 **/
1777static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1778{
1779#ifdef CONFIG_PCI_IOV
1780 struct pci_dev *pdev = adapter->pdev;
1781
1782 if (adapter->vfs_allocated_count > 7)
1783 adapter->vfs_allocated_count = 7;
1784
1785 if (adapter->vfs_allocated_count) {
1786 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1787 sizeof(struct vf_data_storage),
1788 GFP_KERNEL);
1789 /* if allocation failed then we do not support SR-IOV */
1790 if (!adapter->vf_data) {
1791 adapter->vfs_allocated_count = 0;
1792 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1793 "Data Storage\n");
1794 }
1795 }
1796
1797 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1798 kfree(adapter->vf_data);
1799 adapter->vf_data = NULL;
1800#endif /* CONFIG_PCI_IOV */
1801 adapter->vfs_allocated_count = 0;
1802#ifdef CONFIG_PCI_IOV
1803 } else {
1804 unsigned char mac_addr[ETH_ALEN];
1805 int i;
1806 dev_info(&pdev->dev, "%d vfs allocated\n",
1807 adapter->vfs_allocated_count);
1808 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1809 random_ether_addr(mac_addr);
1810 igb_set_vf_mac(adapter, i, mac_addr);
1811 }
1812 }
1813#endif /* CONFIG_PCI_IOV */
1814}
1815
Alexander Duyck115f4592009-11-12 18:37:00 +00001816
1817/**
1818 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1819 * @adapter: board private structure to initialize
1820 *
1821 * igb_init_hw_timer initializes the function pointer and values for the hw
1822 * timer found in hardware.
1823 **/
1824static void igb_init_hw_timer(struct igb_adapter *adapter)
1825{
1826 struct e1000_hw *hw = &adapter->hw;
1827
1828 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001829 case e1000_82580:
1830 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1831 adapter->cycles.read = igb_read_clock;
1832 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1833 adapter->cycles.mult = 1;
1834 /*
1835 * The 82580 timesync updates the system timer every 8ns by 8ns
1836 * and the value cannot be shifted. Instead we need to shift
1837 * the registers to generate a 64bit timer value. As a result
1838 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1839 * 24 in order to generate a larger value for synchronization.
1840 */
1841 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1842 /* disable system timer temporarily by setting bit 31 */
1843 wr32(E1000_TSAUXC, 0x80000000);
1844 wrfl();
1845
1846 /* Set registers so that rollover occurs soon to test this. */
1847 wr32(E1000_SYSTIMR, 0x00000000);
1848 wr32(E1000_SYSTIML, 0x80000000);
1849 wr32(E1000_SYSTIMH, 0x000000FF);
1850 wrfl();
1851
1852 /* enable system timer by clearing bit 31 */
1853 wr32(E1000_TSAUXC, 0x0);
1854 wrfl();
1855
1856 timecounter_init(&adapter->clock,
1857 &adapter->cycles,
1858 ktime_to_ns(ktime_get_real()));
1859 /*
1860 * Synchronize our NIC clock against system wall clock. NIC
1861 * time stamp reading requires ~3us per sample, each sample
1862 * was pretty stable even under load => only require 10
1863 * samples for each offset comparison.
1864 */
1865 memset(&adapter->compare, 0, sizeof(adapter->compare));
1866 adapter->compare.source = &adapter->clock;
1867 adapter->compare.target = ktime_get_real;
1868 adapter->compare.num_samples = 10;
1869 timecompare_update(&adapter->compare, 0);
1870 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00001871 case e1000_82576:
1872 /*
1873 * Initialize hardware timer: we keep it running just in case
1874 * that some program needs it later on.
1875 */
1876 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1877 adapter->cycles.read = igb_read_clock;
1878 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1879 adapter->cycles.mult = 1;
1880 /**
1881 * Scale the NIC clock cycle by a large factor so that
1882 * relatively small clock corrections can be added or
1883 * substracted at each clock tick. The drawbacks of a large
1884 * factor are a) that the clock register overflows more quickly
1885 * (not such a big deal) and b) that the increment per tick has
1886 * to fit into 24 bits. As a result we need to use a shift of
1887 * 19 so we can fit a value of 16 into the TIMINCA register.
1888 */
1889 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1890 wr32(E1000_TIMINCA,
1891 (1 << E1000_TIMINCA_16NS_SHIFT) |
1892 (16 << IGB_82576_TSYNC_SHIFT));
1893
1894 /* Set registers so that rollover occurs soon to test this. */
1895 wr32(E1000_SYSTIML, 0x00000000);
1896 wr32(E1000_SYSTIMH, 0xFF800000);
1897 wrfl();
1898
1899 timecounter_init(&adapter->clock,
1900 &adapter->cycles,
1901 ktime_to_ns(ktime_get_real()));
1902 /*
1903 * Synchronize our NIC clock against system wall clock. NIC
1904 * time stamp reading requires ~3us per sample, each sample
1905 * was pretty stable even under load => only require 10
1906 * samples for each offset comparison.
1907 */
1908 memset(&adapter->compare, 0, sizeof(adapter->compare));
1909 adapter->compare.source = &adapter->clock;
1910 adapter->compare.target = ktime_get_real;
1911 adapter->compare.num_samples = 10;
1912 timecompare_update(&adapter->compare, 0);
1913 break;
1914 case e1000_82575:
1915 /* 82575 does not support timesync */
1916 default:
1917 break;
1918 }
1919
1920}
1921
Alexander Duycka6b623e2009-10-27 23:47:53 +00001922/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001923 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1924 * @adapter: board private structure to initialize
1925 *
1926 * igb_sw_init initializes the Adapter private data structure.
1927 * Fields are initialized based on PCI device information and
1928 * OS network device settings (MTU size).
1929 **/
1930static int __devinit igb_sw_init(struct igb_adapter *adapter)
1931{
1932 struct e1000_hw *hw = &adapter->hw;
1933 struct net_device *netdev = adapter->netdev;
1934 struct pci_dev *pdev = adapter->pdev;
1935
1936 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1937
Alexander Duyck68fd9912008-11-20 00:48:10 -08001938 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1939 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001940 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1941 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1942
Auke Kok9d5c8242008-01-24 02:22:38 -08001943 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1944 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1945
Alexander Duycka6b623e2009-10-27 23:47:53 +00001946#ifdef CONFIG_PCI_IOV
1947 if (hw->mac.type == e1000_82576)
1948 adapter->vfs_allocated_count = max_vfs;
1949
1950#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00001951 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1952
1953 /*
1954 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1955 * then we should combine the queues into a queue pair in order to
1956 * conserve interrupts due to limited supply
1957 */
1958 if ((adapter->rss_queues > 4) ||
1959 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1960 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1961
Alexander Duycka6b623e2009-10-27 23:47:53 +00001962 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001963 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001964 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1965 return -ENOMEM;
1966 }
1967
Alexander Duyck115f4592009-11-12 18:37:00 +00001968 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00001969 igb_probe_vfs(adapter);
1970
Auke Kok9d5c8242008-01-24 02:22:38 -08001971 /* Explicitly disable IRQ since the NIC can be in any state. */
1972 igb_irq_disable(adapter);
1973
1974 set_bit(__IGB_DOWN, &adapter->state);
1975 return 0;
1976}
1977
1978/**
1979 * igb_open - Called when a network interface is made active
1980 * @netdev: network interface device structure
1981 *
1982 * Returns 0 on success, negative value on failure
1983 *
1984 * The open entry point is called when a network interface is made
1985 * active by the system (IFF_UP). At this point all resources needed
1986 * for transmit and receive operations are allocated, the interrupt
1987 * handler is registered with the OS, the watchdog timer is started,
1988 * and the stack is notified that the interface is ready.
1989 **/
1990static int igb_open(struct net_device *netdev)
1991{
1992 struct igb_adapter *adapter = netdev_priv(netdev);
1993 struct e1000_hw *hw = &adapter->hw;
1994 int err;
1995 int i;
1996
1997 /* disallow open during test */
1998 if (test_bit(__IGB_TESTING, &adapter->state))
1999 return -EBUSY;
2000
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002001 netif_carrier_off(netdev);
2002
Auke Kok9d5c8242008-01-24 02:22:38 -08002003 /* allocate transmit descriptors */
2004 err = igb_setup_all_tx_resources(adapter);
2005 if (err)
2006 goto err_setup_tx;
2007
2008 /* allocate receive descriptors */
2009 err = igb_setup_all_rx_resources(adapter);
2010 if (err)
2011 goto err_setup_rx;
2012
Nick Nunley88a268c2010-02-17 01:01:59 +00002013 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002014
Auke Kok9d5c8242008-01-24 02:22:38 -08002015 /* before we allocate an interrupt, we must be ready to handle it.
2016 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2017 * as soon as we call pci_request_irq, so we have to setup our
2018 * clean_rx handler before we do so. */
2019 igb_configure(adapter);
2020
2021 err = igb_request_irq(adapter);
2022 if (err)
2023 goto err_req_irq;
2024
2025 /* From here on the code is the same as igb_up() */
2026 clear_bit(__IGB_DOWN, &adapter->state);
2027
Alexander Duyck047e0032009-10-27 15:49:27 +00002028 for (i = 0; i < adapter->num_q_vectors; i++) {
2029 struct igb_q_vector *q_vector = adapter->q_vector[i];
2030 napi_enable(&q_vector->napi);
2031 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002032
2033 /* Clear any pending interrupts. */
2034 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002035
2036 igb_irq_enable(adapter);
2037
Alexander Duyckd4960302009-10-27 15:53:45 +00002038 /* notify VFs that reset has been completed */
2039 if (adapter->vfs_allocated_count) {
2040 u32 reg_data = rd32(E1000_CTRL_EXT);
2041 reg_data |= E1000_CTRL_EXT_PFRSTD;
2042 wr32(E1000_CTRL_EXT, reg_data);
2043 }
2044
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002045 netif_tx_start_all_queues(netdev);
2046
Alexander Duyck25568a52009-10-27 23:49:59 +00002047 /* start the watchdog. */
2048 hw->mac.get_link_status = 1;
2049 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002050
2051 return 0;
2052
2053err_req_irq:
2054 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002055 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002056 igb_free_all_rx_resources(adapter);
2057err_setup_rx:
2058 igb_free_all_tx_resources(adapter);
2059err_setup_tx:
2060 igb_reset(adapter);
2061
2062 return err;
2063}
2064
2065/**
2066 * igb_close - Disables a network interface
2067 * @netdev: network interface device structure
2068 *
2069 * Returns 0, this is not allowed to fail
2070 *
2071 * The close entry point is called when an interface is de-activated
2072 * by the OS. The hardware is still under the driver's control, but
2073 * needs to be disabled. A global MAC reset is issued to stop the
2074 * hardware, and all transmit and receive resources are freed.
2075 **/
2076static int igb_close(struct net_device *netdev)
2077{
2078 struct igb_adapter *adapter = netdev_priv(netdev);
2079
2080 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2081 igb_down(adapter);
2082
2083 igb_free_irq(adapter);
2084
2085 igb_free_all_tx_resources(adapter);
2086 igb_free_all_rx_resources(adapter);
2087
Auke Kok9d5c8242008-01-24 02:22:38 -08002088 return 0;
2089}
2090
2091/**
2092 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002093 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2094 *
2095 * Return 0 on success, negative on failure
2096 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002097int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002098{
Alexander Duyck80785292009-10-27 15:51:47 +00002099 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002100 int size;
2101
2102 size = sizeof(struct igb_buffer) * tx_ring->count;
2103 tx_ring->buffer_info = vmalloc(size);
2104 if (!tx_ring->buffer_info)
2105 goto err;
2106 memset(tx_ring->buffer_info, 0, size);
2107
2108 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002109 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002110 tx_ring->size = ALIGN(tx_ring->size, 4096);
2111
Alexander Duyck439705e2009-10-27 23:49:20 +00002112 tx_ring->desc = pci_alloc_consistent(pdev,
2113 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08002114 &tx_ring->dma);
2115
2116 if (!tx_ring->desc)
2117 goto err;
2118
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 tx_ring->next_to_use = 0;
2120 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002121 return 0;
2122
2123err:
2124 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002125 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 "Unable to allocate memory for the transmit descriptor ring\n");
2127 return -ENOMEM;
2128}
2129
2130/**
2131 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2132 * (Descriptors) for all queues
2133 * @adapter: board private structure
2134 *
2135 * Return 0 on success, negative on failure
2136 **/
2137static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2138{
Alexander Duyck439705e2009-10-27 23:49:20 +00002139 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002140 int i, err = 0;
2141
2142 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002143 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002144 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002145 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002146 "Allocation for Tx Queue %u failed\n", i);
2147 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002148 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002149 break;
2150 }
2151 }
2152
Alexander Duycka99955f2009-11-12 18:37:19 +00002153 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002154 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002155 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002156 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002157 return err;
2158}
2159
2160/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002161 * igb_setup_tctl - configure the transmit control registers
2162 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002163 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002164void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002165{
Auke Kok9d5c8242008-01-24 02:22:38 -08002166 struct e1000_hw *hw = &adapter->hw;
2167 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002168
Alexander Duyck85b430b2009-10-27 15:50:29 +00002169 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2170 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002171
2172 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 tctl = rd32(E1000_TCTL);
2174 tctl &= ~E1000_TCTL_CT;
2175 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2176 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2177
2178 igb_config_collision_dist(hw);
2179
Auke Kok9d5c8242008-01-24 02:22:38 -08002180 /* Enable transmits */
2181 tctl |= E1000_TCTL_EN;
2182
2183 wr32(E1000_TCTL, tctl);
2184}
2185
2186/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002187 * igb_configure_tx_ring - Configure transmit ring after Reset
2188 * @adapter: board private structure
2189 * @ring: tx ring to configure
2190 *
2191 * Configure a transmit ring after a reset.
2192 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002193void igb_configure_tx_ring(struct igb_adapter *adapter,
2194 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002195{
2196 struct e1000_hw *hw = &adapter->hw;
2197 u32 txdctl;
2198 u64 tdba = ring->dma;
2199 int reg_idx = ring->reg_idx;
2200
2201 /* disable the queue */
2202 txdctl = rd32(E1000_TXDCTL(reg_idx));
2203 wr32(E1000_TXDCTL(reg_idx),
2204 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2205 wrfl();
2206 mdelay(10);
2207
2208 wr32(E1000_TDLEN(reg_idx),
2209 ring->count * sizeof(union e1000_adv_tx_desc));
2210 wr32(E1000_TDBAL(reg_idx),
2211 tdba & 0x00000000ffffffffULL);
2212 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2213
Alexander Duyckfce99e32009-10-27 15:51:27 +00002214 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2215 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2216 writel(0, ring->head);
2217 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002218
2219 txdctl |= IGB_TX_PTHRESH;
2220 txdctl |= IGB_TX_HTHRESH << 8;
2221 txdctl |= IGB_TX_WTHRESH << 16;
2222
2223 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2224 wr32(E1000_TXDCTL(reg_idx), txdctl);
2225}
2226
2227/**
2228 * igb_configure_tx - Configure transmit Unit after Reset
2229 * @adapter: board private structure
2230 *
2231 * Configure the Tx unit of the MAC after a reset.
2232 **/
2233static void igb_configure_tx(struct igb_adapter *adapter)
2234{
2235 int i;
2236
2237 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002238 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002239}
2240
2241/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002242 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002243 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2244 *
2245 * Returns 0 on success, negative on failure
2246 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002247int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002248{
Alexander Duyck80785292009-10-27 15:51:47 +00002249 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002250 int size, desc_len;
2251
2252 size = sizeof(struct igb_buffer) * rx_ring->count;
2253 rx_ring->buffer_info = vmalloc(size);
2254 if (!rx_ring->buffer_info)
2255 goto err;
2256 memset(rx_ring->buffer_info, 0, size);
2257
2258 desc_len = sizeof(union e1000_adv_rx_desc);
2259
2260 /* Round up to nearest 4K */
2261 rx_ring->size = rx_ring->count * desc_len;
2262 rx_ring->size = ALIGN(rx_ring->size, 4096);
2263
2264 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2265 &rx_ring->dma);
2266
2267 if (!rx_ring->desc)
2268 goto err;
2269
2270 rx_ring->next_to_clean = 0;
2271 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002272
Auke Kok9d5c8242008-01-24 02:22:38 -08002273 return 0;
2274
2275err:
2276 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002277 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002278 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002279 "the receive descriptor ring\n");
2280 return -ENOMEM;
2281}
2282
2283/**
2284 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2285 * (Descriptors) for all queues
2286 * @adapter: board private structure
2287 *
2288 * Return 0 on success, negative on failure
2289 **/
2290static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2291{
Alexander Duyck439705e2009-10-27 23:49:20 +00002292 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002293 int i, err = 0;
2294
2295 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002296 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002297 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002298 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002299 "Allocation for Rx Queue %u failed\n", i);
2300 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002301 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002302 break;
2303 }
2304 }
2305
2306 return err;
2307}
2308
2309/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002310 * igb_setup_mrqc - configure the multiple receive queue control registers
2311 * @adapter: Board private structure
2312 **/
2313static void igb_setup_mrqc(struct igb_adapter *adapter)
2314{
2315 struct e1000_hw *hw = &adapter->hw;
2316 u32 mrqc, rxcsum;
2317 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2318 union e1000_reta {
2319 u32 dword;
2320 u8 bytes[4];
2321 } reta;
2322 static const u8 rsshash[40] = {
2323 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2324 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2325 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2326 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2327
2328 /* Fill out hash function seeds */
2329 for (j = 0; j < 10; j++) {
2330 u32 rsskey = rsshash[(j * 4)];
2331 rsskey |= rsshash[(j * 4) + 1] << 8;
2332 rsskey |= rsshash[(j * 4) + 2] << 16;
2333 rsskey |= rsshash[(j * 4) + 3] << 24;
2334 array_wr32(E1000_RSSRK(0), j, rsskey);
2335 }
2336
Alexander Duycka99955f2009-11-12 18:37:19 +00002337 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002338
2339 if (adapter->vfs_allocated_count) {
2340 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2341 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00002342 case e1000_82580:
2343 num_rx_queues = 1;
2344 shift = 0;
2345 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002346 case e1000_82576:
2347 shift = 3;
2348 num_rx_queues = 2;
2349 break;
2350 case e1000_82575:
2351 shift = 2;
2352 shift2 = 6;
2353 default:
2354 break;
2355 }
2356 } else {
2357 if (hw->mac.type == e1000_82575)
2358 shift = 6;
2359 }
2360
2361 for (j = 0; j < (32 * 4); j++) {
2362 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2363 if (shift2)
2364 reta.bytes[j & 3] |= num_rx_queues << shift2;
2365 if ((j & 3) == 3)
2366 wr32(E1000_RETA(j >> 2), reta.dword);
2367 }
2368
2369 /*
2370 * Disable raw packet checksumming so that RSS hash is placed in
2371 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2372 * offloads as they are enabled by default
2373 */
2374 rxcsum = rd32(E1000_RXCSUM);
2375 rxcsum |= E1000_RXCSUM_PCSD;
2376
2377 if (adapter->hw.mac.type >= e1000_82576)
2378 /* Enable Receive Checksum Offload for SCTP */
2379 rxcsum |= E1000_RXCSUM_CRCOFL;
2380
2381 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2382 wr32(E1000_RXCSUM, rxcsum);
2383
2384 /* If VMDq is enabled then we set the appropriate mode for that, else
2385 * we default to RSS so that an RSS hash is calculated per packet even
2386 * if we are only using one queue */
2387 if (adapter->vfs_allocated_count) {
2388 if (hw->mac.type > e1000_82575) {
2389 /* Set the default pool for the PF's first queue */
2390 u32 vtctl = rd32(E1000_VT_CTL);
2391 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2392 E1000_VT_CTL_DISABLE_DEF_POOL);
2393 vtctl |= adapter->vfs_allocated_count <<
2394 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2395 wr32(E1000_VT_CTL, vtctl);
2396 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002397 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002398 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2399 else
2400 mrqc = E1000_MRQC_ENABLE_VMDQ;
2401 } else {
2402 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2403 }
2404 igb_vmm_control(adapter);
2405
2406 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2407 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2408 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2409 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2410 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2411 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2412 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2413 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2414
2415 wr32(E1000_MRQC, mrqc);
2416}
2417
2418/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002419 * igb_setup_rctl - configure the receive control registers
2420 * @adapter: Board private structure
2421 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002422void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002423{
2424 struct e1000_hw *hw = &adapter->hw;
2425 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002426
2427 rctl = rd32(E1000_RCTL);
2428
2429 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002430 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002431
Alexander Duyck69d728b2008-11-25 01:04:03 -08002432 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002433 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002434
Auke Kok87cb7e82008-07-08 15:08:29 -07002435 /*
2436 * enable stripping of CRC. It's unlikely this will break BMC
2437 * redirection as it did with e1000. Newer features require
2438 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002439 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002440 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002441
Alexander Duyck559e9c42009-10-27 23:52:50 +00002442 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002443 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002444
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002445 /* enable LPE to prevent packets larger than max_frame_size */
2446 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002447
Alexander Duyck952f72a2009-10-27 15:51:07 +00002448 /* disable queue 0 to prevent tail write w/o re-config */
2449 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002450
Alexander Duycke1739522009-02-19 20:39:44 -08002451 /* Attention!!! For SR-IOV PF driver operations you must enable
2452 * queue drop for all VF and PF queues to prevent head of line blocking
2453 * if an un-trusted VF does not provide descriptors to hardware.
2454 */
2455 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002456 /* set all queue drop enable bits */
2457 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002458 }
2459
Auke Kok9d5c8242008-01-24 02:22:38 -08002460 wr32(E1000_RCTL, rctl);
2461}
2462
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002463static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2464 int vfn)
2465{
2466 struct e1000_hw *hw = &adapter->hw;
2467 u32 vmolr;
2468
2469 /* if it isn't the PF check to see if VFs are enabled and
2470 * increase the size to support vlan tags */
2471 if (vfn < adapter->vfs_allocated_count &&
2472 adapter->vf_data[vfn].vlans_enabled)
2473 size += VLAN_TAG_SIZE;
2474
2475 vmolr = rd32(E1000_VMOLR(vfn));
2476 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2477 vmolr |= size | E1000_VMOLR_LPE;
2478 wr32(E1000_VMOLR(vfn), vmolr);
2479
2480 return 0;
2481}
2482
Auke Kok9d5c8242008-01-24 02:22:38 -08002483/**
Alexander Duycke1739522009-02-19 20:39:44 -08002484 * igb_rlpml_set - set maximum receive packet size
2485 * @adapter: board private structure
2486 *
2487 * Configure maximum receivable packet size.
2488 **/
2489static void igb_rlpml_set(struct igb_adapter *adapter)
2490{
2491 u32 max_frame_size = adapter->max_frame_size;
2492 struct e1000_hw *hw = &adapter->hw;
2493 u16 pf_id = adapter->vfs_allocated_count;
2494
2495 if (adapter->vlgrp)
2496 max_frame_size += VLAN_TAG_SIZE;
2497
2498 /* if vfs are enabled we set RLPML to the largest possible request
2499 * size and set the VMOLR RLPML to the size we need */
2500 if (pf_id) {
2501 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002502 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002503 }
2504
2505 wr32(E1000_RLPML, max_frame_size);
2506}
2507
Williams, Mitch A8151d292010-02-10 01:44:24 +00002508static inline void igb_set_vmolr(struct igb_adapter *adapter,
2509 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002510{
2511 struct e1000_hw *hw = &adapter->hw;
2512 u32 vmolr;
2513
2514 /*
2515 * This register exists only on 82576 and newer so if we are older then
2516 * we should exit and do nothing
2517 */
2518 if (hw->mac.type < e1000_82576)
2519 return;
2520
2521 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002522 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2523 if (aupe)
2524 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2525 else
2526 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002527
2528 /* clear all bits that might not be set */
2529 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2530
Alexander Duycka99955f2009-11-12 18:37:19 +00002531 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002532 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2533 /*
2534 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2535 * multicast packets
2536 */
2537 if (vfn <= adapter->vfs_allocated_count)
2538 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2539
2540 wr32(E1000_VMOLR(vfn), vmolr);
2541}
2542
Alexander Duycke1739522009-02-19 20:39:44 -08002543/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002544 * igb_configure_rx_ring - Configure a receive ring after Reset
2545 * @adapter: board private structure
2546 * @ring: receive ring to be configured
2547 *
2548 * Configure the Rx unit of the MAC after a reset.
2549 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002550void igb_configure_rx_ring(struct igb_adapter *adapter,
2551 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002552{
2553 struct e1000_hw *hw = &adapter->hw;
2554 u64 rdba = ring->dma;
2555 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002556 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002557
2558 /* disable the queue */
2559 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2560 wr32(E1000_RXDCTL(reg_idx),
2561 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2562
2563 /* Set DMA base address registers */
2564 wr32(E1000_RDBAL(reg_idx),
2565 rdba & 0x00000000ffffffffULL);
2566 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2567 wr32(E1000_RDLEN(reg_idx),
2568 ring->count * sizeof(union e1000_adv_rx_desc));
2569
2570 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002571 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2572 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2573 writel(0, ring->head);
2574 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002575
Alexander Duyck952f72a2009-10-27 15:51:07 +00002576 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002577 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2578 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002579 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2580#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2581 srrctl |= IGB_RXBUFFER_16384 >>
2582 E1000_SRRCTL_BSIZEPKT_SHIFT;
2583#else
2584 srrctl |= (PAGE_SIZE / 2) >>
2585 E1000_SRRCTL_BSIZEPKT_SHIFT;
2586#endif
2587 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2588 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002589 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002590 E1000_SRRCTL_BSIZEPKT_SHIFT;
2591 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2592 }
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00002593 /* Only set Drop Enable if we are supporting multiple queues */
2594 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2595 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002596
2597 wr32(E1000_SRRCTL(reg_idx), srrctl);
2598
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002599 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002600 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002601
Alexander Duyck85b430b2009-10-27 15:50:29 +00002602 /* enable receive descriptor fetching */
2603 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2604 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2605 rxdctl &= 0xFFF00000;
2606 rxdctl |= IGB_RX_PTHRESH;
2607 rxdctl |= IGB_RX_HTHRESH << 8;
2608 rxdctl |= IGB_RX_WTHRESH << 16;
2609 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2610}
2611
2612/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002613 * igb_configure_rx - Configure receive Unit after Reset
2614 * @adapter: board private structure
2615 *
2616 * Configure the Rx unit of the MAC after a reset.
2617 **/
2618static void igb_configure_rx(struct igb_adapter *adapter)
2619{
Hannes Eder91075842009-02-18 19:36:04 -08002620 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002621
Alexander Duyck68d480c2009-10-05 06:33:08 +00002622 /* set UTA to appropriate mode */
2623 igb_set_uta(adapter);
2624
Alexander Duyck26ad9172009-10-05 06:32:49 +00002625 /* set the correct pool for the PF default MAC address in entry 0 */
2626 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2627 adapter->vfs_allocated_count);
2628
Alexander Duyck06cf2662009-10-27 15:53:25 +00002629 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2630 * the Base and Length of the Rx Descriptor Ring */
2631 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002632 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002633}
2634
2635/**
2636 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002637 * @tx_ring: Tx descriptor ring for a specific queue
2638 *
2639 * Free all transmit software resources
2640 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002641void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002642{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002643 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002644
2645 vfree(tx_ring->buffer_info);
2646 tx_ring->buffer_info = NULL;
2647
Alexander Duyck439705e2009-10-27 23:49:20 +00002648 /* if not set, then don't free */
2649 if (!tx_ring->desc)
2650 return;
2651
Alexander Duyck80785292009-10-27 15:51:47 +00002652 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2653 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002654
2655 tx_ring->desc = NULL;
2656}
2657
2658/**
2659 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2660 * @adapter: board private structure
2661 *
2662 * Free all transmit software resources
2663 **/
2664static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2665{
2666 int i;
2667
2668 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002669 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002670}
2671
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002672void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2673 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002674{
Alexander Duyck6366ad32009-12-02 16:47:18 +00002675 if (buffer_info->dma) {
2676 if (buffer_info->mapped_as_page)
2677 pci_unmap_page(tx_ring->pdev,
2678 buffer_info->dma,
2679 buffer_info->length,
2680 PCI_DMA_TODEVICE);
2681 else
2682 pci_unmap_single(tx_ring->pdev,
2683 buffer_info->dma,
2684 buffer_info->length,
2685 PCI_DMA_TODEVICE);
2686 buffer_info->dma = 0;
2687 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002688 if (buffer_info->skb) {
2689 dev_kfree_skb_any(buffer_info->skb);
2690 buffer_info->skb = NULL;
2691 }
2692 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00002693 buffer_info->length = 0;
2694 buffer_info->next_to_watch = 0;
2695 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002696}
2697
2698/**
2699 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002700 * @tx_ring: ring to be cleaned
2701 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002702static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002703{
2704 struct igb_buffer *buffer_info;
2705 unsigned long size;
2706 unsigned int i;
2707
2708 if (!tx_ring->buffer_info)
2709 return;
2710 /* Free all the Tx ring sk_buffs */
2711
2712 for (i = 0; i < tx_ring->count; i++) {
2713 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002714 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002715 }
2716
2717 size = sizeof(struct igb_buffer) * tx_ring->count;
2718 memset(tx_ring->buffer_info, 0, size);
2719
2720 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002721 memset(tx_ring->desc, 0, tx_ring->size);
2722
2723 tx_ring->next_to_use = 0;
2724 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002725}
2726
2727/**
2728 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2729 * @adapter: board private structure
2730 **/
2731static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2732{
2733 int i;
2734
2735 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002736 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002737}
2738
2739/**
2740 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002741 * @rx_ring: ring to clean the resources from
2742 *
2743 * Free all receive software resources
2744 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002745void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002746{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002747 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002748
2749 vfree(rx_ring->buffer_info);
2750 rx_ring->buffer_info = NULL;
2751
Alexander Duyck439705e2009-10-27 23:49:20 +00002752 /* if not set, then don't free */
2753 if (!rx_ring->desc)
2754 return;
2755
Alexander Duyck80785292009-10-27 15:51:47 +00002756 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2757 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002758
2759 rx_ring->desc = NULL;
2760}
2761
2762/**
2763 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2764 * @adapter: board private structure
2765 *
2766 * Free all receive software resources
2767 **/
2768static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2769{
2770 int i;
2771
2772 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002773 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002774}
2775
2776/**
2777 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002778 * @rx_ring: ring to free buffers from
2779 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002780static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002781{
2782 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002783 unsigned long size;
2784 unsigned int i;
2785
2786 if (!rx_ring->buffer_info)
2787 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002788
Auke Kok9d5c8242008-01-24 02:22:38 -08002789 /* Free all the Rx ring sk_buffs */
2790 for (i = 0; i < rx_ring->count; i++) {
2791 buffer_info = &rx_ring->buffer_info[i];
2792 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002793 pci_unmap_single(rx_ring->pdev,
2794 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002795 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002796 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002797 buffer_info->dma = 0;
2798 }
2799
2800 if (buffer_info->skb) {
2801 dev_kfree_skb(buffer_info->skb);
2802 buffer_info->skb = NULL;
2803 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002804 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002805 pci_unmap_page(rx_ring->pdev,
2806 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002807 PAGE_SIZE / 2,
2808 PCI_DMA_FROMDEVICE);
2809 buffer_info->page_dma = 0;
2810 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002811 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002812 put_page(buffer_info->page);
2813 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002814 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002815 }
2816 }
2817
Auke Kok9d5c8242008-01-24 02:22:38 -08002818 size = sizeof(struct igb_buffer) * rx_ring->count;
2819 memset(rx_ring->buffer_info, 0, size);
2820
2821 /* Zero out the descriptor ring */
2822 memset(rx_ring->desc, 0, rx_ring->size);
2823
2824 rx_ring->next_to_clean = 0;
2825 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002826}
2827
2828/**
2829 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2830 * @adapter: board private structure
2831 **/
2832static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2833{
2834 int i;
2835
2836 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002837 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002838}
2839
2840/**
2841 * igb_set_mac - Change the Ethernet Address of the NIC
2842 * @netdev: network interface device structure
2843 * @p: pointer to an address structure
2844 *
2845 * Returns 0 on success, negative on failure
2846 **/
2847static int igb_set_mac(struct net_device *netdev, void *p)
2848{
2849 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002850 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002851 struct sockaddr *addr = p;
2852
2853 if (!is_valid_ether_addr(addr->sa_data))
2854 return -EADDRNOTAVAIL;
2855
2856 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002857 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002858
Alexander Duyck26ad9172009-10-05 06:32:49 +00002859 /* set the correct pool for the new PF MAC address in entry 0 */
2860 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2861 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002862
Auke Kok9d5c8242008-01-24 02:22:38 -08002863 return 0;
2864}
2865
2866/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002867 * igb_write_mc_addr_list - write multicast addresses to MTA
2868 * @netdev: network interface device structure
2869 *
2870 * Writes multicast address list to the MTA hash table.
2871 * Returns: -ENOMEM on failure
2872 * 0 on no addresses written
2873 * X on writing X addresses to MTA
2874 **/
2875static int igb_write_mc_addr_list(struct net_device *netdev)
2876{
2877 struct igb_adapter *adapter = netdev_priv(netdev);
2878 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko48e2f182010-02-22 09:22:26 +00002879 struct dev_mc_list *mc_ptr;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002880 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002881 int i;
2882
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002883 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002884 /* nothing to program, so clear mc list */
2885 igb_update_mc_addr_list(hw, NULL, 0);
2886 igb_restore_vf_multicasts(adapter);
2887 return 0;
2888 }
2889
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002890 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002891 if (!mta_list)
2892 return -ENOMEM;
2893
Alexander Duyck68d480c2009-10-05 06:33:08 +00002894 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00002895 i = 0;
2896 netdev_for_each_mc_addr(mc_ptr, netdev)
2897 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002898
Alexander Duyck68d480c2009-10-05 06:33:08 +00002899 igb_update_mc_addr_list(hw, mta_list, i);
2900 kfree(mta_list);
2901
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002902 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002903}
2904
2905/**
2906 * igb_write_uc_addr_list - write unicast addresses to RAR table
2907 * @netdev: network interface device structure
2908 *
2909 * Writes unicast address list to the RAR table.
2910 * Returns: -ENOMEM on failure/insufficient address space
2911 * 0 on no addresses written
2912 * X on writing X addresses to the RAR table
2913 **/
2914static int igb_write_uc_addr_list(struct net_device *netdev)
2915{
2916 struct igb_adapter *adapter = netdev_priv(netdev);
2917 struct e1000_hw *hw = &adapter->hw;
2918 unsigned int vfn = adapter->vfs_allocated_count;
2919 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2920 int count = 0;
2921
2922 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002923 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00002924 return -ENOMEM;
2925
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002926 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002927 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002928
2929 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002930 if (!rar_entries)
2931 break;
2932 igb_rar_set_qsel(adapter, ha->addr,
2933 rar_entries--,
2934 vfn);
2935 count++;
2936 }
2937 }
2938 /* write the addresses in reverse order to avoid write combining */
2939 for (; rar_entries > 0 ; rar_entries--) {
2940 wr32(E1000_RAH(rar_entries), 0);
2941 wr32(E1000_RAL(rar_entries), 0);
2942 }
2943 wrfl();
2944
2945 return count;
2946}
2947
2948/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002949 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002950 * @netdev: network interface device structure
2951 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002952 * The set_rx_mode entry point is called whenever the unicast or multicast
2953 * address lists or the network interface flags are updated. This routine is
2954 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002955 * promiscuous mode, and all-multi behavior.
2956 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002957static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002958{
2959 struct igb_adapter *adapter = netdev_priv(netdev);
2960 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002961 unsigned int vfn = adapter->vfs_allocated_count;
2962 u32 rctl, vmolr = 0;
2963 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002964
2965 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002966 rctl = rd32(E1000_RCTL);
2967
Alexander Duyck68d480c2009-10-05 06:33:08 +00002968 /* clear the effected bits */
2969 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2970
Patrick McHardy746b9f02008-07-16 20:15:45 -07002971 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002972 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002973 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002974 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002975 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002976 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002977 vmolr |= E1000_VMOLR_MPME;
2978 } else {
2979 /*
2980 * Write addresses to the MTA, if the attempt fails
2981 * then we should just turn on promiscous mode so
2982 * that we can at least receive multicast traffic
2983 */
2984 count = igb_write_mc_addr_list(netdev);
2985 if (count < 0) {
2986 rctl |= E1000_RCTL_MPE;
2987 vmolr |= E1000_VMOLR_MPME;
2988 } else if (count) {
2989 vmolr |= E1000_VMOLR_ROMPE;
2990 }
2991 }
2992 /*
2993 * Write addresses to available RAR registers, if there is not
2994 * sufficient space to store all the addresses then enable
2995 * unicast promiscous mode
2996 */
2997 count = igb_write_uc_addr_list(netdev);
2998 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002999 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003000 vmolr |= E1000_VMOLR_ROPE;
3001 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003002 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003003 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003004 wr32(E1000_RCTL, rctl);
3005
Alexander Duyck68d480c2009-10-05 06:33:08 +00003006 /*
3007 * In order to support SR-IOV and eventually VMDq it is necessary to set
3008 * the VMOLR to enable the appropriate modes. Without this workaround
3009 * we will have issues with VLAN tag stripping not being done for frames
3010 * that are only arriving because we are the default pool
3011 */
3012 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003013 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003014
Alexander Duyck68d480c2009-10-05 06:33:08 +00003015 vmolr |= rd32(E1000_VMOLR(vfn)) &
3016 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3017 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003018 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003019}
3020
3021/* Need to wait a few seconds after link up to get diagnostic information from
3022 * the phy */
3023static void igb_update_phy_info(unsigned long data)
3024{
3025 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003026 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003027}
3028
3029/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003030 * igb_has_link - check shared code for link and determine up/down
3031 * @adapter: pointer to driver private info
3032 **/
Nick Nunley31455352010-02-17 01:01:21 +00003033bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003034{
3035 struct e1000_hw *hw = &adapter->hw;
3036 bool link_active = false;
3037 s32 ret_val = 0;
3038
3039 /* get_link_status is set on LSC (link status) interrupt or
3040 * rx sequence error interrupt. get_link_status will stay
3041 * false until the e1000_check_for_link establishes link
3042 * for copper adapters ONLY
3043 */
3044 switch (hw->phy.media_type) {
3045 case e1000_media_type_copper:
3046 if (hw->mac.get_link_status) {
3047 ret_val = hw->mac.ops.check_for_link(hw);
3048 link_active = !hw->mac.get_link_status;
3049 } else {
3050 link_active = true;
3051 }
3052 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003053 case e1000_media_type_internal_serdes:
3054 ret_val = hw->mac.ops.check_for_link(hw);
3055 link_active = hw->mac.serdes_has_link;
3056 break;
3057 default:
3058 case e1000_media_type_unknown:
3059 break;
3060 }
3061
3062 return link_active;
3063}
3064
3065/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003066 * igb_watchdog - Timer Call-back
3067 * @data: pointer to adapter cast into an unsigned long
3068 **/
3069static void igb_watchdog(unsigned long data)
3070{
3071 struct igb_adapter *adapter = (struct igb_adapter *)data;
3072 /* Do the rest outside of interrupt context */
3073 schedule_work(&adapter->watchdog_task);
3074}
3075
3076static void igb_watchdog_task(struct work_struct *work)
3077{
3078 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003079 struct igb_adapter,
3080 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003081 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003082 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003083 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003084 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003085
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003086 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003087 if (link) {
3088 if (!netif_carrier_ok(netdev)) {
3089 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003090 hw->mac.ops.get_speed_and_duplex(hw,
3091 &adapter->link_speed,
3092 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003093
3094 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003095 /* Links status message must follow this format */
3096 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003097 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003098 netdev->name,
3099 adapter->link_speed,
3100 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003101 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003102 ((ctrl & E1000_CTRL_TFCE) &&
3103 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3104 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3105 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003106
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003107 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003108 adapter->tx_timeout_factor = 1;
3109 switch (adapter->link_speed) {
3110 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003111 adapter->tx_timeout_factor = 14;
3112 break;
3113 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003114 /* maybe add some timeout factor ? */
3115 break;
3116 }
3117
3118 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003119
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003120 igb_ping_all_vfs(adapter);
3121
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003122 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003123 if (!test_bit(__IGB_DOWN, &adapter->state))
3124 mod_timer(&adapter->phy_info_timer,
3125 round_jiffies(jiffies + 2 * HZ));
3126 }
3127 } else {
3128 if (netif_carrier_ok(netdev)) {
3129 adapter->link_speed = 0;
3130 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003131 /* Links status message must follow this format */
3132 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3133 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003134 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003135
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003136 igb_ping_all_vfs(adapter);
3137
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003138 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003139 if (!test_bit(__IGB_DOWN, &adapter->state))
3140 mod_timer(&adapter->phy_info_timer,
3141 round_jiffies(jiffies + 2 * HZ));
3142 }
3143 }
3144
Auke Kok9d5c8242008-01-24 02:22:38 -08003145 igb_update_stats(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003146
Alexander Duyckdbabb062009-11-12 18:38:16 +00003147 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003148 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003149 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003150 /* We've lost link, so the controller stops DMA,
3151 * but we've got queued Tx work that's never going
3152 * to get done, so reset controller to flush Tx.
3153 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003154 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3155 adapter->tx_timeout_count++;
3156 schedule_work(&adapter->reset_task);
3157 /* return immediately since reset is imminent */
3158 return;
3159 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003160 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003161
Alexander Duyckdbabb062009-11-12 18:38:16 +00003162 /* Force detection of hung controller every watchdog period */
3163 tx_ring->detect_tx_hung = true;
3164 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003165
Auke Kok9d5c8242008-01-24 02:22:38 -08003166 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003167 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003168 u32 eics = 0;
3169 for (i = 0; i < adapter->num_q_vectors; i++) {
3170 struct igb_q_vector *q_vector = adapter->q_vector[i];
3171 eics |= q_vector->eims_value;
3172 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003173 wr32(E1000_EICS, eics);
3174 } else {
3175 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3176 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003177
Auke Kok9d5c8242008-01-24 02:22:38 -08003178 /* Reset the timer */
3179 if (!test_bit(__IGB_DOWN, &adapter->state))
3180 mod_timer(&adapter->watchdog_timer,
3181 round_jiffies(jiffies + 2 * HZ));
3182}
3183
3184enum latency_range {
3185 lowest_latency = 0,
3186 low_latency = 1,
3187 bulk_latency = 2,
3188 latency_invalid = 255
3189};
3190
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003191/**
3192 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3193 *
3194 * Stores a new ITR value based on strictly on packet size. This
3195 * algorithm is less sophisticated than that used in igb_update_itr,
3196 * due to the difficulty of synchronizing statistics across multiple
3197 * receive rings. The divisors and thresholds used by this fuction
3198 * were determined based on theoretical maximum wire speed and testing
3199 * data, in order to minimize response time while increasing bulk
3200 * throughput.
3201 * This functionality is controlled by the InterruptThrottleRate module
3202 * parameter (see igb_param.c)
3203 * NOTE: This function is called only when operating in a multiqueue
3204 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003205 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003206 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003207static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003208{
Alexander Duyck047e0032009-10-27 15:49:27 +00003209 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003210 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003211 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003212
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003213 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3214 * ints/sec - ITR timer value of 120 ticks.
3215 */
3216 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003217 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003218 goto set_itr_val;
3219 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003220
3221 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3222 struct igb_ring *ring = q_vector->rx_ring;
3223 avg_wire_size = ring->total_bytes / ring->total_packets;
3224 }
3225
3226 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3227 struct igb_ring *ring = q_vector->tx_ring;
3228 avg_wire_size = max_t(u32, avg_wire_size,
3229 (ring->total_bytes /
3230 ring->total_packets));
3231 }
3232
3233 /* if avg_wire_size isn't set no work was done */
3234 if (!avg_wire_size)
3235 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003236
3237 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3238 avg_wire_size += 24;
3239
3240 /* Don't starve jumbo frames */
3241 avg_wire_size = min(avg_wire_size, 3000);
3242
3243 /* Give a little boost to mid-size frames */
3244 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3245 new_val = avg_wire_size / 3;
3246 else
3247 new_val = avg_wire_size / 2;
3248
Nick Nunleyabe1c362010-02-17 01:03:19 +00003249 /* when in itr mode 3 do not exceed 20K ints/sec */
3250 if (adapter->rx_itr_setting == 3 && new_val < 196)
3251 new_val = 196;
3252
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003253set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003254 if (new_val != q_vector->itr_val) {
3255 q_vector->itr_val = new_val;
3256 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003257 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003258clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003259 if (q_vector->rx_ring) {
3260 q_vector->rx_ring->total_bytes = 0;
3261 q_vector->rx_ring->total_packets = 0;
3262 }
3263 if (q_vector->tx_ring) {
3264 q_vector->tx_ring->total_bytes = 0;
3265 q_vector->tx_ring->total_packets = 0;
3266 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003267}
3268
3269/**
3270 * igb_update_itr - update the dynamic ITR value based on statistics
3271 * Stores a new ITR value based on packets and byte
3272 * counts during the last interrupt. The advantage of per interrupt
3273 * computation is faster updates and more accurate ITR for the current
3274 * traffic pattern. Constants in this function were computed
3275 * based on theoretical maximum wire speed and thresholds were set based
3276 * on testing data as well as attempting to minimize response time
3277 * while increasing bulk throughput.
3278 * this functionality is controlled by the InterruptThrottleRate module
3279 * parameter (see igb_param.c)
3280 * NOTE: These calculations are only valid when operating in a single-
3281 * queue environment.
3282 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003283 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003284 * @packets: the number of packets during this measurement interval
3285 * @bytes: the number of bytes during this measurement interval
3286 **/
3287static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3288 int packets, int bytes)
3289{
3290 unsigned int retval = itr_setting;
3291
3292 if (packets == 0)
3293 goto update_itr_done;
3294
3295 switch (itr_setting) {
3296 case lowest_latency:
3297 /* handle TSO and jumbo frames */
3298 if (bytes/packets > 8000)
3299 retval = bulk_latency;
3300 else if ((packets < 5) && (bytes > 512))
3301 retval = low_latency;
3302 break;
3303 case low_latency: /* 50 usec aka 20000 ints/s */
3304 if (bytes > 10000) {
3305 /* this if handles the TSO accounting */
3306 if (bytes/packets > 8000) {
3307 retval = bulk_latency;
3308 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3309 retval = bulk_latency;
3310 } else if ((packets > 35)) {
3311 retval = lowest_latency;
3312 }
3313 } else if (bytes/packets > 2000) {
3314 retval = bulk_latency;
3315 } else if (packets <= 2 && bytes < 512) {
3316 retval = lowest_latency;
3317 }
3318 break;
3319 case bulk_latency: /* 250 usec aka 4000 ints/s */
3320 if (bytes > 25000) {
3321 if (packets > 35)
3322 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003323 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003324 retval = low_latency;
3325 }
3326 break;
3327 }
3328
3329update_itr_done:
3330 return retval;
3331}
3332
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003333static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003334{
Alexander Duyck047e0032009-10-27 15:49:27 +00003335 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003336 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003337 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003338
3339 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3340 if (adapter->link_speed != SPEED_1000) {
3341 current_itr = 0;
3342 new_itr = 4000;
3343 goto set_itr_now;
3344 }
3345
3346 adapter->rx_itr = igb_update_itr(adapter,
3347 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003348 q_vector->rx_ring->total_packets,
3349 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003350
Alexander Duyck047e0032009-10-27 15:49:27 +00003351 adapter->tx_itr = igb_update_itr(adapter,
3352 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003353 q_vector->tx_ring->total_packets,
3354 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003355 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003356
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003357 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003358 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003359 current_itr = low_latency;
3360
Auke Kok9d5c8242008-01-24 02:22:38 -08003361 switch (current_itr) {
3362 /* counts and packets in update_itr are dependent on these numbers */
3363 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003364 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003365 break;
3366 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003367 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003368 break;
3369 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003370 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003371 break;
3372 default:
3373 break;
3374 }
3375
3376set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003377 q_vector->rx_ring->total_bytes = 0;
3378 q_vector->rx_ring->total_packets = 0;
3379 q_vector->tx_ring->total_bytes = 0;
3380 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003381
Alexander Duyck047e0032009-10-27 15:49:27 +00003382 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003383 /* this attempts to bias the interrupt rate towards Bulk
3384 * by adding intermediate steps when interrupt rate is
3385 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003386 new_itr = new_itr > q_vector->itr_val ?
3387 max((new_itr * q_vector->itr_val) /
3388 (new_itr + (q_vector->itr_val >> 2)),
3389 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003390 new_itr;
3391 /* Don't write the value here; it resets the adapter's
3392 * internal timer, and causes us to delay far longer than
3393 * we should between interrupts. Instead, we write the ITR
3394 * value at the beginning of the next interrupt so the timing
3395 * ends up being correct.
3396 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003397 q_vector->itr_val = new_itr;
3398 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003399 }
3400
3401 return;
3402}
3403
Auke Kok9d5c8242008-01-24 02:22:38 -08003404#define IGB_TX_FLAGS_CSUM 0x00000001
3405#define IGB_TX_FLAGS_VLAN 0x00000002
3406#define IGB_TX_FLAGS_TSO 0x00000004
3407#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003408#define IGB_TX_FLAGS_TSTAMP 0x00000010
3409#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3410#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003411
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003412static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003413 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3414{
3415 struct e1000_adv_tx_context_desc *context_desc;
3416 unsigned int i;
3417 int err;
3418 struct igb_buffer *buffer_info;
3419 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003420 u32 mss_l4len_idx;
3421 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003422
3423 if (skb_header_cloned(skb)) {
3424 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3425 if (err)
3426 return err;
3427 }
3428
3429 l4len = tcp_hdrlen(skb);
3430 *hdr_len += l4len;
3431
3432 if (skb->protocol == htons(ETH_P_IP)) {
3433 struct iphdr *iph = ip_hdr(skb);
3434 iph->tot_len = 0;
3435 iph->check = 0;
3436 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3437 iph->daddr, 0,
3438 IPPROTO_TCP,
3439 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003440 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003441 ipv6_hdr(skb)->payload_len = 0;
3442 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3443 &ipv6_hdr(skb)->daddr,
3444 0, IPPROTO_TCP, 0);
3445 }
3446
3447 i = tx_ring->next_to_use;
3448
3449 buffer_info = &tx_ring->buffer_info[i];
3450 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3451 /* VLAN MACLEN IPLEN */
3452 if (tx_flags & IGB_TX_FLAGS_VLAN)
3453 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3454 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3455 *hdr_len += skb_network_offset(skb);
3456 info |= skb_network_header_len(skb);
3457 *hdr_len += skb_network_header_len(skb);
3458 context_desc->vlan_macip_lens = cpu_to_le32(info);
3459
3460 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3461 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3462
3463 if (skb->protocol == htons(ETH_P_IP))
3464 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3465 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3466
3467 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3468
3469 /* MSS L4LEN IDX */
3470 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3471 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3472
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003473 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003474 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3475 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003476
3477 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3478 context_desc->seqnum_seed = 0;
3479
3480 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003481 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003482 buffer_info->dma = 0;
3483 i++;
3484 if (i == tx_ring->count)
3485 i = 0;
3486
3487 tx_ring->next_to_use = i;
3488
3489 return true;
3490}
3491
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003492static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3493 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003494{
3495 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003496 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003497 struct igb_buffer *buffer_info;
3498 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003499 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003500
3501 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3502 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3503 i = tx_ring->next_to_use;
3504 buffer_info = &tx_ring->buffer_info[i];
3505 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3506
3507 if (tx_flags & IGB_TX_FLAGS_VLAN)
3508 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003509
Auke Kok9d5c8242008-01-24 02:22:38 -08003510 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3511 if (skb->ip_summed == CHECKSUM_PARTIAL)
3512 info |= skb_network_header_len(skb);
3513
3514 context_desc->vlan_macip_lens = cpu_to_le32(info);
3515
3516 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3517
3518 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003519 __be16 protocol;
3520
3521 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3522 const struct vlan_ethhdr *vhdr =
3523 (const struct vlan_ethhdr*)skb->data;
3524
3525 protocol = vhdr->h_vlan_encapsulated_proto;
3526 } else {
3527 protocol = skb->protocol;
3528 }
3529
3530 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003531 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003532 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003533 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3534 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003535 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3536 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003537 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003538 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003539 /* XXX what about other V6 headers?? */
3540 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3541 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003542 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3543 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003544 break;
3545 default:
3546 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003547 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003548 "partial checksum but proto=%x!\n",
3549 skb->protocol);
3550 break;
3551 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003552 }
3553
3554 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3555 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003556 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003557 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003558 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003559
3560 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003561 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003562 buffer_info->dma = 0;
3563
3564 i++;
3565 if (i == tx_ring->count)
3566 i = 0;
3567 tx_ring->next_to_use = i;
3568
3569 return true;
3570 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003571 return false;
3572}
3573
3574#define IGB_MAX_TXD_PWR 16
3575#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3576
Alexander Duyck80785292009-10-27 15:51:47 +00003577static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003578 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003579{
3580 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003581 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003582 unsigned int len = skb_headlen(skb);
3583 unsigned int count = 0, i;
3584 unsigned int f;
3585
3586 i = tx_ring->next_to_use;
3587
3588 buffer_info = &tx_ring->buffer_info[i];
3589 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3590 buffer_info->length = len;
3591 /* set time_stamp *before* dma to help avoid a possible race */
3592 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003593 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003594 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3595 PCI_DMA_TODEVICE);
3596 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3597 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08003598
3599 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3600 struct skb_frag_struct *frag;
3601
Alexander Duyck85811452010-01-23 01:35:00 -08003602 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003603 i++;
3604 if (i == tx_ring->count)
3605 i = 0;
3606
Auke Kok9d5c8242008-01-24 02:22:38 -08003607 frag = &skb_shinfo(skb)->frags[f];
3608 len = frag->size;
3609
3610 buffer_info = &tx_ring->buffer_info[i];
3611 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3612 buffer_info->length = len;
3613 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003614 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003615 buffer_info->mapped_as_page = true;
3616 buffer_info->dma = pci_map_page(pdev,
3617 frag->page,
3618 frag->page_offset,
3619 len,
3620 PCI_DMA_TODEVICE);
3621 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3622 goto dma_error;
3623
Auke Kok9d5c8242008-01-24 02:22:38 -08003624 }
3625
Auke Kok9d5c8242008-01-24 02:22:38 -08003626 tx_ring->buffer_info[i].skb = skb;
Nick Nunley40e90c22010-02-17 01:04:37 +00003627 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003628 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003629
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003630 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003631
3632dma_error:
3633 dev_err(&pdev->dev, "TX DMA map failed\n");
3634
3635 /* clear timestamp and dma mappings for failed buffer_info mapping */
3636 buffer_info->dma = 0;
3637 buffer_info->time_stamp = 0;
3638 buffer_info->length = 0;
3639 buffer_info->next_to_watch = 0;
3640 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003641
3642 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00003643 while (count--) {
3644 if (i == 0)
3645 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003646 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003647 buffer_info = &tx_ring->buffer_info[i];
3648 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3649 }
3650
3651 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003652}
3653
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003654static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Nick Nunley91d4ee32010-02-17 01:04:56 +00003655 u32 tx_flags, int count, u32 paylen,
Auke Kok9d5c8242008-01-24 02:22:38 -08003656 u8 hdr_len)
3657{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003658 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003659 struct igb_buffer *buffer_info;
3660 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003661 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08003662
3663 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3664 E1000_ADVTXD_DCMD_DEXT);
3665
3666 if (tx_flags & IGB_TX_FLAGS_VLAN)
3667 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3668
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003669 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3670 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3671
Auke Kok9d5c8242008-01-24 02:22:38 -08003672 if (tx_flags & IGB_TX_FLAGS_TSO) {
3673 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3674
3675 /* insert tcp checksum */
3676 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3677
3678 /* insert ip checksum */
3679 if (tx_flags & IGB_TX_FLAGS_IPV4)
3680 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3681
3682 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3683 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3684 }
3685
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003686 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3687 (tx_flags & (IGB_TX_FLAGS_CSUM |
3688 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003689 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003690 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003691
3692 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3693
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003694 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08003695 buffer_info = &tx_ring->buffer_info[i];
3696 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3697 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3698 tx_desc->read.cmd_type_len =
3699 cpu_to_le32(cmd_type_len | buffer_info->length);
3700 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003701 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08003702 i++;
3703 if (i == tx_ring->count)
3704 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003705 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003706
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003707 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003708 /* Force memory writes to complete before letting h/w
3709 * know there are new descriptors to fetch. (Only
3710 * applicable for weak-ordered memory model archs,
3711 * such as IA-64). */
3712 wmb();
3713
3714 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003715 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003716 /* we need this if more than one processor can write to our tail
3717 * at a time, it syncronizes IO on IA64/Altix systems */
3718 mmiowb();
3719}
3720
Alexander Duycke694e962009-10-27 15:53:06 +00003721static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003722{
Alexander Duycke694e962009-10-27 15:53:06 +00003723 struct net_device *netdev = tx_ring->netdev;
3724
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003725 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003726
Auke Kok9d5c8242008-01-24 02:22:38 -08003727 /* Herbert's original patch had:
3728 * smp_mb__after_netif_stop_queue();
3729 * but since that doesn't exist yet, just open code it. */
3730 smp_mb();
3731
3732 /* We need to check again in a case another CPU has just
3733 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003734 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003735 return -EBUSY;
3736
3737 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003738 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003739 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003740 return 0;
3741}
3742
Nick Nunley717ba0892010-02-17 01:04:18 +00003743static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003744{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003745 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003746 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003747 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003748}
3749
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003750netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3751 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003752{
Alexander Duycke694e962009-10-27 15:53:06 +00003753 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003754 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003755 u32 tx_flags = 0;
3756 u16 first;
3757 u8 hdr_len = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003758 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003759
Auke Kok9d5c8242008-01-24 02:22:38 -08003760 /* need: 1 descriptor per page,
3761 * + 2 desc gap to keep tail from touching head,
3762 * + 1 desc for skb->data,
3763 * + 1 desc for context descriptor,
3764 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003765 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003766 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003767 return NETDEV_TX_BUSY;
3768 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003769
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003770 if (unlikely(shtx->hardware)) {
3771 shtx->in_progress = 1;
3772 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003773 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003774
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003775 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 tx_flags |= IGB_TX_FLAGS_VLAN;
3777 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3778 }
3779
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003780 if (skb->protocol == htons(ETH_P_IP))
3781 tx_flags |= IGB_TX_FLAGS_IPV4;
3782
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003783 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003784 if (skb_is_gso(skb)) {
3785 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003786
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003787 if (tso < 0) {
3788 dev_kfree_skb_any(skb);
3789 return NETDEV_TX_OK;
3790 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003791 }
3792
3793 if (tso)
3794 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003795 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003796 (skb->ip_summed == CHECKSUM_PARTIAL))
3797 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003798
Alexander Duyck65689fe2009-03-20 00:17:43 +00003799 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003800 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00003801 * has occured and we need to rewind the descriptor queue
3802 */
Alexander Duyck80785292009-10-27 15:51:47 +00003803 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003804 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003805 dev_kfree_skb_any(skb);
3806 tx_ring->buffer_info[first].time_stamp = 0;
3807 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003808 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003809 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003810
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003811 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3812
3813 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003814 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003815
Auke Kok9d5c8242008-01-24 02:22:38 -08003816 return NETDEV_TX_OK;
3817}
3818
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003819static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3820 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003821{
3822 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003823 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003824 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003825
3826 if (test_bit(__IGB_DOWN, &adapter->state)) {
3827 dev_kfree_skb_any(skb);
3828 return NETDEV_TX_OK;
3829 }
3830
3831 if (skb->len <= 0) {
3832 dev_kfree_skb_any(skb);
3833 return NETDEV_TX_OK;
3834 }
3835
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003836 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003837 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003838
3839 /* This goes back to the question of how to logically map a tx queue
3840 * to a flow. Right now, performance is impacted slightly negatively
3841 * if using multiple tx queues. If the stack breaks away from a
3842 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003843 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003844}
3845
3846/**
3847 * igb_tx_timeout - Respond to a Tx Hang
3848 * @netdev: network interface device structure
3849 **/
3850static void igb_tx_timeout(struct net_device *netdev)
3851{
3852 struct igb_adapter *adapter = netdev_priv(netdev);
3853 struct e1000_hw *hw = &adapter->hw;
3854
3855 /* Do the reset outside of interrupt context */
3856 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003857
Alexander Duyck55cac242009-11-19 12:42:21 +00003858 if (hw->mac.type == e1000_82580)
3859 hw->dev_spec._82575.global_device_reset = true;
3860
Auke Kok9d5c8242008-01-24 02:22:38 -08003861 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003862 wr32(E1000_EICS,
3863 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003864}
3865
3866static void igb_reset_task(struct work_struct *work)
3867{
3868 struct igb_adapter *adapter;
3869 adapter = container_of(work, struct igb_adapter, reset_task);
3870
3871 igb_reinit_locked(adapter);
3872}
3873
3874/**
3875 * igb_get_stats - Get System Network Statistics
3876 * @netdev: network interface device structure
3877 *
3878 * Returns the address of the device statistics structure.
3879 * The statistics are actually updated from the timer callback.
3880 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003881static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003882{
Auke Kok9d5c8242008-01-24 02:22:38 -08003883 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003884 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003885}
3886
3887/**
3888 * igb_change_mtu - Change the Maximum Transfer Unit
3889 * @netdev: network interface device structure
3890 * @new_mtu: new value for maximum frame size
3891 *
3892 * Returns 0 on success, negative on failure
3893 **/
3894static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3895{
3896 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00003897 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003898 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003899 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003900
Alexander Duyckc809d222009-10-27 23:52:13 +00003901 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003902 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003903 return -EINVAL;
3904 }
3905
Auke Kok9d5c8242008-01-24 02:22:38 -08003906 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003907 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 return -EINVAL;
3909 }
3910
3911 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3912 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003913
Auke Kok9d5c8242008-01-24 02:22:38 -08003914 /* igb_down has a dependency on max_frame_size */
3915 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00003916
Auke Kok9d5c8242008-01-24 02:22:38 -08003917 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3918 * means we reserve 2 more, this pushes us to allocate from the next
3919 * larger slab size.
3920 * i.e. RXBUFFER_2048 --> size-4096 slab
3921 */
3922
Alexander Duyck7d95b712009-10-27 15:50:08 +00003923 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003924 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003925 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003926 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003927 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003928 rx_buffer_len = IGB_RXBUFFER_128;
3929
3930 if (netif_running(netdev))
3931 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003932
Alexander Duyck090b1792009-10-27 23:51:55 +00003933 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08003934 netdev->mtu, new_mtu);
3935 netdev->mtu = new_mtu;
3936
Alexander Duyck4c844852009-10-27 15:52:07 +00003937 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003938 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00003939
Auke Kok9d5c8242008-01-24 02:22:38 -08003940 if (netif_running(netdev))
3941 igb_up(adapter);
3942 else
3943 igb_reset(adapter);
3944
3945 clear_bit(__IGB_RESETTING, &adapter->state);
3946
3947 return 0;
3948}
3949
3950/**
3951 * igb_update_stats - Update the board statistics counters
3952 * @adapter: board private structure
3953 **/
3954
3955void igb_update_stats(struct igb_adapter *adapter)
3956{
Alexander Duyck128e45e2009-11-12 18:37:38 +00003957 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003958 struct e1000_hw *hw = &adapter->hw;
3959 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00003960 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003961 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003962 int i;
3963 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003964
3965#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3966
3967 /*
3968 * Prevent stats update while adapter is being reset, or if the pci
3969 * connection is down.
3970 */
3971 if (adapter->link_speed == 0)
3972 return;
3973 if (pci_channel_offline(pdev))
3974 return;
3975
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003976 bytes = 0;
3977 packets = 0;
3978 for (i = 0; i < adapter->num_rx_queues; i++) {
3979 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00003980 struct igb_ring *ring = adapter->rx_ring[i];
3981 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00003982 net_stats->rx_fifo_errors += rqdpc_tmp;
Alexander Duyck3025a442010-02-17 01:02:39 +00003983 bytes += ring->rx_stats.bytes;
3984 packets += ring->rx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003985 }
3986
Alexander Duyck128e45e2009-11-12 18:37:38 +00003987 net_stats->rx_bytes = bytes;
3988 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003989
3990 bytes = 0;
3991 packets = 0;
3992 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003993 struct igb_ring *ring = adapter->tx_ring[i];
3994 bytes += ring->tx_stats.bytes;
3995 packets += ring->tx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003996 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00003997 net_stats->tx_bytes = bytes;
3998 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003999
4000 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004001 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4002 adapter->stats.gprc += rd32(E1000_GPRC);
4003 adapter->stats.gorc += rd32(E1000_GORCL);
4004 rd32(E1000_GORCH); /* clear GORCL */
4005 adapter->stats.bprc += rd32(E1000_BPRC);
4006 adapter->stats.mprc += rd32(E1000_MPRC);
4007 adapter->stats.roc += rd32(E1000_ROC);
4008
4009 adapter->stats.prc64 += rd32(E1000_PRC64);
4010 adapter->stats.prc127 += rd32(E1000_PRC127);
4011 adapter->stats.prc255 += rd32(E1000_PRC255);
4012 adapter->stats.prc511 += rd32(E1000_PRC511);
4013 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4014 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4015 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4016 adapter->stats.sec += rd32(E1000_SEC);
4017
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004018 mpc = rd32(E1000_MPC);
4019 adapter->stats.mpc += mpc;
4020 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004021 adapter->stats.scc += rd32(E1000_SCC);
4022 adapter->stats.ecol += rd32(E1000_ECOL);
4023 adapter->stats.mcc += rd32(E1000_MCC);
4024 adapter->stats.latecol += rd32(E1000_LATECOL);
4025 adapter->stats.dc += rd32(E1000_DC);
4026 adapter->stats.rlec += rd32(E1000_RLEC);
4027 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4028 adapter->stats.xontxc += rd32(E1000_XONTXC);
4029 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4030 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4031 adapter->stats.fcruc += rd32(E1000_FCRUC);
4032 adapter->stats.gptc += rd32(E1000_GPTC);
4033 adapter->stats.gotc += rd32(E1000_GOTCL);
4034 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004035 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004036 adapter->stats.ruc += rd32(E1000_RUC);
4037 adapter->stats.rfc += rd32(E1000_RFC);
4038 adapter->stats.rjc += rd32(E1000_RJC);
4039 adapter->stats.tor += rd32(E1000_TORH);
4040 adapter->stats.tot += rd32(E1000_TOTH);
4041 adapter->stats.tpr += rd32(E1000_TPR);
4042
4043 adapter->stats.ptc64 += rd32(E1000_PTC64);
4044 adapter->stats.ptc127 += rd32(E1000_PTC127);
4045 adapter->stats.ptc255 += rd32(E1000_PTC255);
4046 adapter->stats.ptc511 += rd32(E1000_PTC511);
4047 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4048 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4049
4050 adapter->stats.mptc += rd32(E1000_MPTC);
4051 adapter->stats.bptc += rd32(E1000_BPTC);
4052
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004053 adapter->stats.tpt += rd32(E1000_TPT);
4054 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004055
4056 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004057 /* read internal phy specific stats */
4058 reg = rd32(E1000_CTRL_EXT);
4059 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4060 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4061 adapter->stats.tncrs += rd32(E1000_TNCRS);
4062 }
4063
Auke Kok9d5c8242008-01-24 02:22:38 -08004064 adapter->stats.tsctc += rd32(E1000_TSCTC);
4065 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4066
4067 adapter->stats.iac += rd32(E1000_IAC);
4068 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4069 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4070 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4071 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4072 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4073 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4074 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4075 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4076
4077 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004078 net_stats->multicast = adapter->stats.mprc;
4079 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004080
4081 /* Rx Errors */
4082
4083 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004084 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004085 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004086 adapter->stats.crcerrs + adapter->stats.algnerrc +
4087 adapter->stats.ruc + adapter->stats.roc +
4088 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004089 net_stats->rx_length_errors = adapter->stats.ruc +
4090 adapter->stats.roc;
4091 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4092 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4093 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004094
4095 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004096 net_stats->tx_errors = adapter->stats.ecol +
4097 adapter->stats.latecol;
4098 net_stats->tx_aborted_errors = adapter->stats.ecol;
4099 net_stats->tx_window_errors = adapter->stats.latecol;
4100 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004101
4102 /* Tx Dropped needs to be maintained elsewhere */
4103
4104 /* Phy Stats */
4105 if (hw->phy.media_type == e1000_media_type_copper) {
4106 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004107 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004108 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4109 adapter->phy_stats.idle_errors += phy_tmp;
4110 }
4111 }
4112
4113 /* Management Stats */
4114 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4115 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4116 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4117}
4118
Auke Kok9d5c8242008-01-24 02:22:38 -08004119static irqreturn_t igb_msix_other(int irq, void *data)
4120{
Alexander Duyck047e0032009-10-27 15:49:27 +00004121 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004122 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004123 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004124 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004125
Alexander Duyck7f081d42010-01-07 17:41:00 +00004126 if (icr & E1000_ICR_DRSTA)
4127 schedule_work(&adapter->reset_task);
4128
Alexander Duyck047e0032009-10-27 15:49:27 +00004129 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004130 /* HW is reporting DMA is out of sync */
4131 adapter->stats.doosync++;
4132 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004133
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004134 /* Check for a mailbox event */
4135 if (icr & E1000_ICR_VMMB)
4136 igb_msg_task(adapter);
4137
4138 if (icr & E1000_ICR_LSC) {
4139 hw->mac.get_link_status = 1;
4140 /* guard against interrupt when we're going down */
4141 if (!test_bit(__IGB_DOWN, &adapter->state))
4142 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4143 }
4144
Alexander Duyck25568a52009-10-27 23:49:59 +00004145 if (adapter->vfs_allocated_count)
4146 wr32(E1000_IMS, E1000_IMS_LSC |
4147 E1000_IMS_VMMB |
4148 E1000_IMS_DOUTSYNC);
4149 else
4150 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004151 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004152
4153 return IRQ_HANDLED;
4154}
4155
Alexander Duyck047e0032009-10-27 15:49:27 +00004156static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004157{
Alexander Duyck26b39272010-02-17 01:00:41 +00004158 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004159 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004160
Alexander Duyck047e0032009-10-27 15:49:27 +00004161 if (!q_vector->set_itr)
4162 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004163
Alexander Duyck047e0032009-10-27 15:49:27 +00004164 if (!itr_val)
4165 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004166
Alexander Duyck26b39272010-02-17 01:00:41 +00004167 if (adapter->hw.mac.type == e1000_82575)
4168 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004169 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004170 itr_val |= 0x8000000;
4171
4172 writel(itr_val, q_vector->itr_register);
4173 q_vector->set_itr = 0;
4174}
4175
4176static irqreturn_t igb_msix_ring(int irq, void *data)
4177{
4178 struct igb_q_vector *q_vector = data;
4179
4180 /* Write the ITR value calculated from the previous interrupt. */
4181 igb_write_itr(q_vector);
4182
4183 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004184
Auke Kok9d5c8242008-01-24 02:22:38 -08004185 return IRQ_HANDLED;
4186}
4187
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004188#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004189static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004190{
Alexander Duyck047e0032009-10-27 15:49:27 +00004191 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004192 struct e1000_hw *hw = &adapter->hw;
4193 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004194
Alexander Duyck047e0032009-10-27 15:49:27 +00004195 if (q_vector->cpu == cpu)
4196 goto out_no_update;
4197
4198 if (q_vector->tx_ring) {
4199 int q = q_vector->tx_ring->reg_idx;
4200 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4201 if (hw->mac.type == e1000_82575) {
4202 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4203 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4204 } else {
4205 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4206 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4207 E1000_DCA_TXCTRL_CPUID_SHIFT;
4208 }
4209 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4210 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4211 }
4212 if (q_vector->rx_ring) {
4213 int q = q_vector->rx_ring->reg_idx;
4214 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4215 if (hw->mac.type == e1000_82575) {
4216 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4217 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4218 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004219 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004220 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004221 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004222 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004223 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4224 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4225 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4226 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004227 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004228 q_vector->cpu = cpu;
4229out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004230 put_cpu();
4231}
4232
4233static void igb_setup_dca(struct igb_adapter *adapter)
4234{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004235 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004236 int i;
4237
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004238 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004239 return;
4240
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004241 /* Always use CB2 mode, difference is masked in the CB driver. */
4242 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4243
Alexander Duyck047e0032009-10-27 15:49:27 +00004244 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004245 adapter->q_vector[i]->cpu = -1;
4246 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004247 }
4248}
4249
4250static int __igb_notify_dca(struct device *dev, void *data)
4251{
4252 struct net_device *netdev = dev_get_drvdata(dev);
4253 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004254 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004255 struct e1000_hw *hw = &adapter->hw;
4256 unsigned long event = *(unsigned long *)data;
4257
4258 switch (event) {
4259 case DCA_PROVIDER_ADD:
4260 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004261 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004262 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004263 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004264 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004265 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004266 igb_setup_dca(adapter);
4267 break;
4268 }
4269 /* Fall Through since DCA is disabled. */
4270 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004271 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004272 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004273 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004274 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004275 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004276 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004277 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004278 }
4279 break;
4280 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004281
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004282 return 0;
4283}
4284
4285static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4286 void *p)
4287{
4288 int ret_val;
4289
4290 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4291 __igb_notify_dca);
4292
4293 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4294}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004295#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004296
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004297static void igb_ping_all_vfs(struct igb_adapter *adapter)
4298{
4299 struct e1000_hw *hw = &adapter->hw;
4300 u32 ping;
4301 int i;
4302
4303 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4304 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004305 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004306 ping |= E1000_VT_MSGTYPE_CTS;
4307 igb_write_mbx(hw, &ping, 1, i);
4308 }
4309}
4310
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004311static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4312{
4313 struct e1000_hw *hw = &adapter->hw;
4314 u32 vmolr = rd32(E1000_VMOLR(vf));
4315 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4316
4317 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4318 IGB_VF_FLAG_MULTI_PROMISC);
4319 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4320
4321 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4322 vmolr |= E1000_VMOLR_MPME;
4323 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4324 } else {
4325 /*
4326 * if we have hashes and we are clearing a multicast promisc
4327 * flag we need to write the hashes to the MTA as this step
4328 * was previously skipped
4329 */
4330 if (vf_data->num_vf_mc_hashes > 30) {
4331 vmolr |= E1000_VMOLR_MPME;
4332 } else if (vf_data->num_vf_mc_hashes) {
4333 int j;
4334 vmolr |= E1000_VMOLR_ROMPE;
4335 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4336 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4337 }
4338 }
4339
4340 wr32(E1000_VMOLR(vf), vmolr);
4341
4342 /* there are flags left unprocessed, likely not supported */
4343 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4344 return -EINVAL;
4345
4346 return 0;
4347
4348}
4349
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004350static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4351 u32 *msgbuf, u32 vf)
4352{
4353 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4354 u16 *hash_list = (u16 *)&msgbuf[1];
4355 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4356 int i;
4357
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004358 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004359 * to this VF for later use to restore when the PF multi cast
4360 * list changes
4361 */
4362 vf_data->num_vf_mc_hashes = n;
4363
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004364 /* only up to 30 hash values supported */
4365 if (n > 30)
4366 n = 30;
4367
4368 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004369 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004370 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004371
4372 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004373 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004374
4375 return 0;
4376}
4377
4378static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4379{
4380 struct e1000_hw *hw = &adapter->hw;
4381 struct vf_data_storage *vf_data;
4382 int i, j;
4383
4384 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004385 u32 vmolr = rd32(E1000_VMOLR(i));
4386 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4387
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004388 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004389
4390 if ((vf_data->num_vf_mc_hashes > 30) ||
4391 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4392 vmolr |= E1000_VMOLR_MPME;
4393 } else if (vf_data->num_vf_mc_hashes) {
4394 vmolr |= E1000_VMOLR_ROMPE;
4395 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4396 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4397 }
4398 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004399 }
4400}
4401
4402static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4403{
4404 struct e1000_hw *hw = &adapter->hw;
4405 u32 pool_mask, reg, vid;
4406 int i;
4407
4408 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4409
4410 /* Find the vlan filter for this id */
4411 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4412 reg = rd32(E1000_VLVF(i));
4413
4414 /* remove the vf from the pool */
4415 reg &= ~pool_mask;
4416
4417 /* if pool is empty then remove entry from vfta */
4418 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4419 (reg & E1000_VLVF_VLANID_ENABLE)) {
4420 reg = 0;
4421 vid = reg & E1000_VLVF_VLANID_MASK;
4422 igb_vfta_set(hw, vid, false);
4423 }
4424
4425 wr32(E1000_VLVF(i), reg);
4426 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004427
4428 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004429}
4430
4431static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4432{
4433 struct e1000_hw *hw = &adapter->hw;
4434 u32 reg, i;
4435
Alexander Duyck51466232009-10-27 23:47:35 +00004436 /* The vlvf table only exists on 82576 hardware and newer */
4437 if (hw->mac.type < e1000_82576)
4438 return -1;
4439
4440 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004441 if (!adapter->vfs_allocated_count)
4442 return -1;
4443
4444 /* Find the vlan filter for this id */
4445 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4446 reg = rd32(E1000_VLVF(i));
4447 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4448 vid == (reg & E1000_VLVF_VLANID_MASK))
4449 break;
4450 }
4451
4452 if (add) {
4453 if (i == E1000_VLVF_ARRAY_SIZE) {
4454 /* Did not find a matching VLAN ID entry that was
4455 * enabled. Search for a free filter entry, i.e.
4456 * one without the enable bit set
4457 */
4458 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4459 reg = rd32(E1000_VLVF(i));
4460 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4461 break;
4462 }
4463 }
4464 if (i < E1000_VLVF_ARRAY_SIZE) {
4465 /* Found an enabled/available entry */
4466 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4467
4468 /* if !enabled we need to set this up in vfta */
4469 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004470 /* add VID to filter table */
4471 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004472 reg |= E1000_VLVF_VLANID_ENABLE;
4473 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004474 reg &= ~E1000_VLVF_VLANID_MASK;
4475 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004476 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004477
4478 /* do not modify RLPML for PF devices */
4479 if (vf >= adapter->vfs_allocated_count)
4480 return 0;
4481
4482 if (!adapter->vf_data[vf].vlans_enabled) {
4483 u32 size;
4484 reg = rd32(E1000_VMOLR(vf));
4485 size = reg & E1000_VMOLR_RLPML_MASK;
4486 size += 4;
4487 reg &= ~E1000_VMOLR_RLPML_MASK;
4488 reg |= size;
4489 wr32(E1000_VMOLR(vf), reg);
4490 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004491
Alexander Duyck51466232009-10-27 23:47:35 +00004492 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004493 return 0;
4494 }
4495 } else {
4496 if (i < E1000_VLVF_ARRAY_SIZE) {
4497 /* remove vf from the pool */
4498 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4499 /* if pool is empty then remove entry from vfta */
4500 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4501 reg = 0;
4502 igb_vfta_set(hw, vid, false);
4503 }
4504 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004505
4506 /* do not modify RLPML for PF devices */
4507 if (vf >= adapter->vfs_allocated_count)
4508 return 0;
4509
4510 adapter->vf_data[vf].vlans_enabled--;
4511 if (!adapter->vf_data[vf].vlans_enabled) {
4512 u32 size;
4513 reg = rd32(E1000_VMOLR(vf));
4514 size = reg & E1000_VMOLR_RLPML_MASK;
4515 size -= 4;
4516 reg &= ~E1000_VMOLR_RLPML_MASK;
4517 reg |= size;
4518 wr32(E1000_VMOLR(vf), reg);
4519 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004520 }
4521 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004522 return 0;
4523}
4524
4525static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4526{
4527 struct e1000_hw *hw = &adapter->hw;
4528
4529 if (vid)
4530 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4531 else
4532 wr32(E1000_VMVIR(vf), 0);
4533}
4534
4535static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4536 int vf, u16 vlan, u8 qos)
4537{
4538 int err = 0;
4539 struct igb_adapter *adapter = netdev_priv(netdev);
4540
4541 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4542 return -EINVAL;
4543 if (vlan || qos) {
4544 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4545 if (err)
4546 goto out;
4547 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4548 igb_set_vmolr(adapter, vf, !vlan);
4549 adapter->vf_data[vf].pf_vlan = vlan;
4550 adapter->vf_data[vf].pf_qos = qos;
4551 dev_info(&adapter->pdev->dev,
4552 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4553 if (test_bit(__IGB_DOWN, &adapter->state)) {
4554 dev_warn(&adapter->pdev->dev,
4555 "The VF VLAN has been set,"
4556 " but the PF device is not up.\n");
4557 dev_warn(&adapter->pdev->dev,
4558 "Bring the PF device up before"
4559 " attempting to use the VF device.\n");
4560 }
4561 } else {
4562 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4563 false, vf);
4564 igb_set_vmvir(adapter, vlan, vf);
4565 igb_set_vmolr(adapter, vf, true);
4566 adapter->vf_data[vf].pf_vlan = 0;
4567 adapter->vf_data[vf].pf_qos = 0;
4568 }
4569out:
4570 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004571}
4572
4573static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4574{
4575 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4576 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4577
4578 return igb_vlvf_set(adapter, vid, add, vf);
4579}
4580
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004581static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004582{
Williams, Mitch A8151d292010-02-10 01:44:24 +00004583 /* clear flags */
4584 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004585 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004586
4587 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004588 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004589
4590 /* reset vlans for device */
4591 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00004592 if (adapter->vf_data[vf].pf_vlan)
4593 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4594 adapter->vf_data[vf].pf_vlan,
4595 adapter->vf_data[vf].pf_qos);
4596 else
4597 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004598
4599 /* reset multicast table array for vf */
4600 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4601
4602 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004603 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004604}
4605
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004606static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4607{
4608 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4609
4610 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004611 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4612 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004613
4614 /* process remaining reset events */
4615 igb_vf_reset(adapter, vf);
4616}
4617
4618static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004619{
4620 struct e1000_hw *hw = &adapter->hw;
4621 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004622 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004623 u32 reg, msgbuf[3];
4624 u8 *addr = (u8 *)(&msgbuf[1]);
4625
4626 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004627 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004628
4629 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004630 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004631
4632 /* enable transmit and receive for vf */
4633 reg = rd32(E1000_VFTE);
4634 wr32(E1000_VFTE, reg | (1 << vf));
4635 reg = rd32(E1000_VFRE);
4636 wr32(E1000_VFRE, reg | (1 << vf));
4637
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004638 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004639
4640 /* reply to reset with ack and vf mac address */
4641 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4642 memcpy(addr, vf_mac, 6);
4643 igb_write_mbx(hw, msgbuf, 3, vf);
4644}
4645
4646static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4647{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004648 unsigned char *addr = (char *)&msg[1];
4649 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004650
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004651 if (is_valid_ether_addr(addr))
4652 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004653
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004654 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004655}
4656
4657static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4658{
4659 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004660 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004661 u32 msg = E1000_VT_MSGTYPE_NACK;
4662
4663 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004664 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4665 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004666 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004667 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004668 }
4669}
4670
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004671static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004672{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004673 struct pci_dev *pdev = adapter->pdev;
4674 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004675 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004676 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004677 s32 retval;
4678
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004679 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004680
Alexander Duyckfef45f42009-12-11 22:57:34 -08004681 if (retval) {
4682 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004683 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08004684 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4685 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4686 return;
4687 goto out;
4688 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004689
4690 /* this is a message we already processed, do nothing */
4691 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004692 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004693
4694 /*
4695 * until the vf completes a reset it should not be
4696 * allowed to start any configuration.
4697 */
4698
4699 if (msgbuf[0] == E1000_VF_RESET) {
4700 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004701 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004702 }
4703
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004704 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08004705 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4706 return;
4707 retval = -1;
4708 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004709 }
4710
4711 switch ((msgbuf[0] & 0xFFFF)) {
4712 case E1000_VF_SET_MAC_ADDR:
4713 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4714 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004715 case E1000_VF_SET_PROMISC:
4716 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4717 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004718 case E1000_VF_SET_MULTICAST:
4719 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4720 break;
4721 case E1000_VF_SET_LPE:
4722 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4723 break;
4724 case E1000_VF_SET_VLAN:
Williams, Mitch A8151d292010-02-10 01:44:24 +00004725 if (adapter->vf_data[vf].pf_vlan)
4726 retval = -1;
4727 else
4728 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004729 break;
4730 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00004731 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004732 retval = -1;
4733 break;
4734 }
4735
Alexander Duyckfef45f42009-12-11 22:57:34 -08004736 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4737out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004738 /* notify the VF of the results of what it sent us */
4739 if (retval)
4740 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4741 else
4742 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4743
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004744 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004745}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004746
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004747static void igb_msg_task(struct igb_adapter *adapter)
4748{
4749 struct e1000_hw *hw = &adapter->hw;
4750 u32 vf;
4751
4752 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4753 /* process any reset requests */
4754 if (!igb_check_for_rst(hw, vf))
4755 igb_vf_reset_event(adapter, vf);
4756
4757 /* process any messages pending */
4758 if (!igb_check_for_msg(hw, vf))
4759 igb_rcv_msg_from_vf(adapter, vf);
4760
4761 /* process any acks */
4762 if (!igb_check_for_ack(hw, vf))
4763 igb_rcv_ack_from_vf(adapter, vf);
4764 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004765}
4766
Auke Kok9d5c8242008-01-24 02:22:38 -08004767/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004768 * igb_set_uta - Set unicast filter table address
4769 * @adapter: board private structure
4770 *
4771 * The unicast table address is a register array of 32-bit registers.
4772 * The table is meant to be used in a way similar to how the MTA is used
4773 * however due to certain limitations in the hardware it is necessary to
4774 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4775 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4776 **/
4777static void igb_set_uta(struct igb_adapter *adapter)
4778{
4779 struct e1000_hw *hw = &adapter->hw;
4780 int i;
4781
4782 /* The UTA table only exists on 82576 hardware and newer */
4783 if (hw->mac.type < e1000_82576)
4784 return;
4785
4786 /* we only need to do this if VMDq is enabled */
4787 if (!adapter->vfs_allocated_count)
4788 return;
4789
4790 for (i = 0; i < hw->mac.uta_reg_count; i++)
4791 array_wr32(E1000_UTA, i, ~0);
4792}
4793
4794/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004795 * igb_intr_msi - Interrupt Handler
4796 * @irq: interrupt number
4797 * @data: pointer to a network interface device structure
4798 **/
4799static irqreturn_t igb_intr_msi(int irq, void *data)
4800{
Alexander Duyck047e0032009-10-27 15:49:27 +00004801 struct igb_adapter *adapter = data;
4802 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004803 struct e1000_hw *hw = &adapter->hw;
4804 /* read ICR disables interrupts using IAM */
4805 u32 icr = rd32(E1000_ICR);
4806
Alexander Duyck047e0032009-10-27 15:49:27 +00004807 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004808
Alexander Duyck7f081d42010-01-07 17:41:00 +00004809 if (icr & E1000_ICR_DRSTA)
4810 schedule_work(&adapter->reset_task);
4811
Alexander Duyck047e0032009-10-27 15:49:27 +00004812 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004813 /* HW is reporting DMA is out of sync */
4814 adapter->stats.doosync++;
4815 }
4816
Auke Kok9d5c8242008-01-24 02:22:38 -08004817 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4818 hw->mac.get_link_status = 1;
4819 if (!test_bit(__IGB_DOWN, &adapter->state))
4820 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4821 }
4822
Alexander Duyck047e0032009-10-27 15:49:27 +00004823 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004824
4825 return IRQ_HANDLED;
4826}
4827
4828/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004829 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004830 * @irq: interrupt number
4831 * @data: pointer to a network interface device structure
4832 **/
4833static irqreturn_t igb_intr(int irq, void *data)
4834{
Alexander Duyck047e0032009-10-27 15:49:27 +00004835 struct igb_adapter *adapter = data;
4836 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004837 struct e1000_hw *hw = &adapter->hw;
4838 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4839 * need for the IMC write */
4840 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004841 if (!icr)
4842 return IRQ_NONE; /* Not our interrupt */
4843
Alexander Duyck047e0032009-10-27 15:49:27 +00004844 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004845
4846 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4847 * not set, then the adapter didn't send an interrupt */
4848 if (!(icr & E1000_ICR_INT_ASSERTED))
4849 return IRQ_NONE;
4850
Alexander Duyck7f081d42010-01-07 17:41:00 +00004851 if (icr & E1000_ICR_DRSTA)
4852 schedule_work(&adapter->reset_task);
4853
Alexander Duyck047e0032009-10-27 15:49:27 +00004854 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004855 /* HW is reporting DMA is out of sync */
4856 adapter->stats.doosync++;
4857 }
4858
Auke Kok9d5c8242008-01-24 02:22:38 -08004859 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4860 hw->mac.get_link_status = 1;
4861 /* guard against interrupt when we're going down */
4862 if (!test_bit(__IGB_DOWN, &adapter->state))
4863 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4864 }
4865
Alexander Duyck047e0032009-10-27 15:49:27 +00004866 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004867
4868 return IRQ_HANDLED;
4869}
4870
Alexander Duyck047e0032009-10-27 15:49:27 +00004871static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004872{
Alexander Duyck047e0032009-10-27 15:49:27 +00004873 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004874 struct e1000_hw *hw = &adapter->hw;
4875
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004876 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4877 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004878 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004879 igb_set_itr(adapter);
4880 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004881 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004882 }
4883
4884 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4885 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004886 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004887 else
4888 igb_irq_enable(adapter);
4889 }
4890}
4891
Auke Kok9d5c8242008-01-24 02:22:38 -08004892/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004893 * igb_poll - NAPI Rx polling callback
4894 * @napi: napi polling structure
4895 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004896 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004897static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004898{
Alexander Duyck047e0032009-10-27 15:49:27 +00004899 struct igb_q_vector *q_vector = container_of(napi,
4900 struct igb_q_vector,
4901 napi);
4902 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004903
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004904#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004905 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4906 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004907#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004908 if (q_vector->tx_ring)
4909 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004910
Alexander Duyck047e0032009-10-27 15:49:27 +00004911 if (q_vector->rx_ring)
4912 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4913
4914 if (!tx_clean_complete)
4915 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004916
Alexander Duyck46544252009-02-19 20:39:04 -08004917 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004918 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004919 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004920 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004921 }
4922
4923 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004924}
Al Viro6d8126f2008-03-16 22:23:24 +00004925
Auke Kok9d5c8242008-01-24 02:22:38 -08004926/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004927 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004928 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004929 * @shhwtstamps: timestamp structure to update
4930 * @regval: unsigned 64bit system time value.
4931 *
4932 * We need to convert the system time value stored in the RX/TXSTMP registers
4933 * into a hwtstamp which can be used by the upper level timestamping functions
4934 */
4935static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4936 struct skb_shared_hwtstamps *shhwtstamps,
4937 u64 regval)
4938{
4939 u64 ns;
4940
Alexander Duyck55cac242009-11-19 12:42:21 +00004941 /*
4942 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4943 * 24 to match clock shift we setup earlier.
4944 */
4945 if (adapter->hw.mac.type == e1000_82580)
4946 regval <<= IGB_82580_TSYNC_SHIFT;
4947
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004948 ns = timecounter_cyc2time(&adapter->clock, regval);
4949 timecompare_update(&adapter->compare, ns);
4950 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4951 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4952 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4953}
4954
4955/**
4956 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4957 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004958 * @skb: packet that was just sent
4959 *
4960 * If we were asked to do hardware stamping and such a time stamp is
4961 * available, then it must have been for this skb here because we only
4962 * allow only one such packet into the queue.
4963 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004964static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004965{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004966 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004967 union skb_shared_tx *shtx = skb_tx(skb);
4968 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004969 struct skb_shared_hwtstamps shhwtstamps;
4970 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004971
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004972 /* if skb does not support hw timestamp or TX stamp not valid exit */
4973 if (likely(!shtx->hardware) ||
4974 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4975 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004976
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004977 regval = rd32(E1000_TXSTMPL);
4978 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4979
4980 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4981 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004982}
4983
4984/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004985 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004986 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004987 * returns true if ring is completely cleaned
4988 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004989static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004990{
Alexander Duyck047e0032009-10-27 15:49:27 +00004991 struct igb_adapter *adapter = q_vector->adapter;
4992 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00004993 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004994 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004995 struct igb_buffer *buffer_info;
4996 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004997 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004998 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004999 unsigned int i, eop, count = 0;
5000 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005001
Auke Kok9d5c8242008-01-24 02:22:38 -08005002 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005003 eop = tx_ring->buffer_info[i].next_to_watch;
5004 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5005
5006 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5007 (count < tx_ring->count)) {
5008 for (cleaned = false; !cleaned; count++) {
5009 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005010 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005011 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005012 skb = buffer_info->skb;
5013
5014 if (skb) {
5015 unsigned int segs, bytecount;
5016 /* gso_segs is currently only valid for tcp */
Nick Nunley40e90c22010-02-17 01:04:37 +00005017 segs = buffer_info->gso_segs;
Auke Kok9d5c8242008-01-24 02:22:38 -08005018 /* multiply data chunks by size of headers */
5019 bytecount = ((segs - 1) * skb_headlen(skb)) +
5020 skb->len;
5021 total_packets += segs;
5022 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005023
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005024 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005025 }
5026
Alexander Duyck80785292009-10-27 15:51:47 +00005027 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005028 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005029
5030 i++;
5031 if (i == tx_ring->count)
5032 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005033 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005034 eop = tx_ring->buffer_info[i].next_to_watch;
5035 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5036 }
5037
Auke Kok9d5c8242008-01-24 02:22:38 -08005038 tx_ring->next_to_clean = i;
5039
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005040 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005041 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005042 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005043 /* Make sure that anybody stopping the queue after this
5044 * sees the new next_to_clean.
5045 */
5046 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005047 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5048 !(test_bit(__IGB_DOWN, &adapter->state))) {
5049 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005050 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005051 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005052 }
5053
5054 if (tx_ring->detect_tx_hung) {
5055 /* Detect a transmit hang in hardware, this serializes the
5056 * check with the clearing of time_stamp and movement of i */
5057 tx_ring->detect_tx_hung = false;
5058 if (tx_ring->buffer_info[i].time_stamp &&
5059 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005060 (adapter->tx_timeout_factor * HZ)) &&
5061 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005062
Auke Kok9d5c8242008-01-24 02:22:38 -08005063 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00005064 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005065 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005066 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005067 " TDH <%x>\n"
5068 " TDT <%x>\n"
5069 " next_to_use <%x>\n"
5070 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005071 "buffer_info[next_to_clean]\n"
5072 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005073 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005074 " jiffies <%lx>\n"
5075 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005076 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005077 readl(tx_ring->head),
5078 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005079 tx_ring->next_to_use,
5080 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005081 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005082 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005083 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005084 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005085 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005086 }
5087 }
5088 tx_ring->total_bytes += total_bytes;
5089 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07005090 tx_ring->tx_stats.bytes += total_bytes;
5091 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005092 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005093}
5094
Auke Kok9d5c8242008-01-24 02:22:38 -08005095/**
5096 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005097 * @q_vector: structure containing interrupt and ring information
5098 * @skb: packet to send up
5099 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005100 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005101static void igb_receive_skb(struct igb_q_vector *q_vector,
5102 struct sk_buff *skb,
5103 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005104{
Alexander Duyck047e0032009-10-27 15:49:27 +00005105 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005106
Alexander Duyck31b24b92010-03-23 18:35:18 +00005107 if (vlan_tag && adapter->vlgrp)
Alexander Duyck047e0032009-10-27 15:49:27 +00005108 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5109 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005110 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005111 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005112}
5113
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005114static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005115 u32 status_err, struct sk_buff *skb)
5116{
5117 skb->ip_summed = CHECKSUM_NONE;
5118
5119 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005120 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5121 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005122 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005123
Auke Kok9d5c8242008-01-24 02:22:38 -08005124 /* TCP/UDP checksum error bit is set */
5125 if (status_err &
5126 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005127 /*
5128 * work around errata with sctp packets where the TCPE aka
5129 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5130 * packets, (aka let the stack check the crc32c)
5131 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005132 if ((skb->len == 60) &&
5133 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005134 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005135
Auke Kok9d5c8242008-01-24 02:22:38 -08005136 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005137 return;
5138 }
5139 /* It must be a TCP or UDP packet with a valid checksum */
5140 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5141 skb->ip_summed = CHECKSUM_UNNECESSARY;
5142
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005143 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005144}
5145
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005146static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5147 struct sk_buff *skb)
5148{
5149 struct igb_adapter *adapter = q_vector->adapter;
5150 struct e1000_hw *hw = &adapter->hw;
5151 u64 regval;
5152
5153 /*
5154 * If this bit is set, then the RX registers contain the time stamp. No
5155 * other packet will be time stamped until we read these registers, so
5156 * read the registers to make them available again. Because only one
5157 * packet can be time stamped at a time, we know that the register
5158 * values must belong to this one here and therefore we don't need to
5159 * compare any of the additional attributes stored for it.
5160 *
5161 * If nothing went wrong, then it should have a skb_shared_tx that we
5162 * can turn into a skb_shared_hwtstamps.
5163 */
5164 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5165 return;
5166 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5167 return;
5168
5169 regval = rd32(E1000_RXSTMPL);
5170 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5171
5172 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5173}
Alexander Duyck4c844852009-10-27 15:52:07 +00005174static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005175 union e1000_adv_rx_desc *rx_desc)
5176{
5177 /* HW will not DMA in data larger than the given buffer, even if it
5178 * parses the (NFS, of course) header to be larger. In that case, it
5179 * fills the header buffer and spills the rest into the page.
5180 */
5181 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5182 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005183 if (hlen > rx_ring->rx_buffer_len)
5184 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005185 return hlen;
5186}
5187
Alexander Duyck047e0032009-10-27 15:49:27 +00005188static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5189 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005190{
Alexander Duyck047e0032009-10-27 15:49:27 +00005191 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005192 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00005193 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005194 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5195 struct igb_buffer *buffer_info , *next_buffer;
5196 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005197 bool cleaned = false;
5198 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005199 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005200 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005201 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005202 u32 staterr;
5203 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005204 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005205
5206 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005207 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005208 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5209 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5210
5211 while (staterr & E1000_RXD_STAT_DD) {
5212 if (*work_done >= budget)
5213 break;
5214 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005215
5216 skb = buffer_info->skb;
5217 prefetch(skb->data - NET_IP_ALIGN);
5218 buffer_info->skb = NULL;
5219
5220 i++;
5221 if (i == rx_ring->count)
5222 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005223
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005224 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5225 prefetch(next_rxd);
5226 next_buffer = &rx_ring->buffer_info[i];
5227
5228 length = le16_to_cpu(rx_desc->wb.upper.length);
5229 cleaned = true;
5230 cleaned_count++;
5231
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005232 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005233 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005234 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005235 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005236 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005237 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005238 skb_put(skb, length);
5239 goto send_up;
5240 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005241 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005242 }
5243
5244 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005245 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005246 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005247 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005248
5249 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5250 buffer_info->page,
5251 buffer_info->page_offset,
5252 length);
5253
Alexander Duyckd1eff352009-11-12 18:38:35 +00005254 if ((page_count(buffer_info->page) != 1) ||
5255 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005256 buffer_info->page = NULL;
5257 else
5258 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005259
5260 skb->len += length;
5261 skb->data_len += length;
5262 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005263 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005264
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005265 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005266 buffer_info->skb = next_buffer->skb;
5267 buffer_info->dma = next_buffer->dma;
5268 next_buffer->skb = skb;
5269 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005270 goto next_desc;
5271 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005272send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005273 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5274 dev_kfree_skb_irq(skb);
5275 goto next_desc;
5276 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005277
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005278 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005279 total_bytes += skb->len;
5280 total_packets++;
5281
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005282 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005283
5284 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005285 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005286
Alexander Duyck047e0032009-10-27 15:49:27 +00005287 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5288 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5289
5290 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005291
Auke Kok9d5c8242008-01-24 02:22:38 -08005292next_desc:
5293 rx_desc->wb.upper.status_error = 0;
5294
5295 /* return some buffers to hardware, one at a time is too slow */
5296 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005297 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005298 cleaned_count = 0;
5299 }
5300
5301 /* use prefetched values */
5302 rx_desc = next_rxd;
5303 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005304 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5305 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005306
Auke Kok9d5c8242008-01-24 02:22:38 -08005307 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005308 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005309
5310 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005311 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005312
5313 rx_ring->total_packets += total_packets;
5314 rx_ring->total_bytes += total_bytes;
5315 rx_ring->rx_stats.packets += total_packets;
5316 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005317 return cleaned;
5318}
5319
Auke Kok9d5c8242008-01-24 02:22:38 -08005320/**
5321 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5322 * @adapter: address of board private structure
5323 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005324void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005325{
Alexander Duycke694e962009-10-27 15:53:06 +00005326 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005327 union e1000_adv_rx_desc *rx_desc;
5328 struct igb_buffer *buffer_info;
5329 struct sk_buff *skb;
5330 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005331 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005332
5333 i = rx_ring->next_to_use;
5334 buffer_info = &rx_ring->buffer_info[i];
5335
Alexander Duyck4c844852009-10-27 15:52:07 +00005336 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005337
Auke Kok9d5c8242008-01-24 02:22:38 -08005338 while (cleaned_count--) {
5339 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5340
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005341 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005342 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005343 buffer_info->page = netdev_alloc_page(netdev);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005344 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005345 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005346 goto no_buffers;
5347 }
5348 buffer_info->page_offset = 0;
5349 } else {
5350 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005351 }
5352 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005353 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005354 buffer_info->page_offset,
5355 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005356 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005357 if (pci_dma_mapping_error(rx_ring->pdev,
5358 buffer_info->page_dma)) {
5359 buffer_info->page_dma = 0;
5360 rx_ring->rx_stats.alloc_failed++;
5361 goto no_buffers;
5362 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005363 }
5364
Alexander Duyck42d07812009-10-27 23:51:16 +00005365 skb = buffer_info->skb;
5366 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005367 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005368 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005369 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005370 goto no_buffers;
5371 }
5372
Auke Kok9d5c8242008-01-24 02:22:38 -08005373 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005374 }
5375 if (!buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00005376 buffer_info->dma = pci_map_single(rx_ring->pdev,
5377 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005378 bufsz,
5379 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005380 if (pci_dma_mapping_error(rx_ring->pdev,
5381 buffer_info->dma)) {
5382 buffer_info->dma = 0;
5383 rx_ring->rx_stats.alloc_failed++;
5384 goto no_buffers;
5385 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005386 }
5387 /* Refresh the desc even if buffer_addrs didn't change because
5388 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005389 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005390 rx_desc->read.pkt_addr =
5391 cpu_to_le64(buffer_info->page_dma);
5392 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5393 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005394 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005395 rx_desc->read.hdr_addr = 0;
5396 }
5397
5398 i++;
5399 if (i == rx_ring->count)
5400 i = 0;
5401 buffer_info = &rx_ring->buffer_info[i];
5402 }
5403
5404no_buffers:
5405 if (rx_ring->next_to_use != i) {
5406 rx_ring->next_to_use = i;
5407 if (i == 0)
5408 i = (rx_ring->count - 1);
5409 else
5410 i--;
5411
5412 /* Force memory writes to complete before letting h/w
5413 * know there are new descriptors to fetch. (Only
5414 * applicable for weak-ordered memory model archs,
5415 * such as IA-64). */
5416 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005417 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005418 }
5419}
5420
5421/**
5422 * igb_mii_ioctl -
5423 * @netdev:
5424 * @ifreq:
5425 * @cmd:
5426 **/
5427static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5428{
5429 struct igb_adapter *adapter = netdev_priv(netdev);
5430 struct mii_ioctl_data *data = if_mii(ifr);
5431
5432 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5433 return -EOPNOTSUPP;
5434
5435 switch (cmd) {
5436 case SIOCGMIIPHY:
5437 data->phy_id = adapter->hw.phy.addr;
5438 break;
5439 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005440 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5441 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005442 return -EIO;
5443 break;
5444 case SIOCSMIIREG:
5445 default:
5446 return -EOPNOTSUPP;
5447 }
5448 return 0;
5449}
5450
5451/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005452 * igb_hwtstamp_ioctl - control hardware time stamping
5453 * @netdev:
5454 * @ifreq:
5455 * @cmd:
5456 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005457 * Outgoing time stamping can be enabled and disabled. Play nice and
5458 * disable it when requested, although it shouldn't case any overhead
5459 * when no packet needs it. At most one packet in the queue may be
5460 * marked for time stamping, otherwise it would be impossible to tell
5461 * for sure to which packet the hardware time stamp belongs.
5462 *
5463 * Incoming time stamping has to be configured via the hardware
5464 * filters. Not all combinations are supported, in particular event
5465 * type has to be specified. Matching the kind of event packet is
5466 * not supported, with the exception of "all V2 events regardless of
5467 * level 2 or 4".
5468 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005469 **/
5470static int igb_hwtstamp_ioctl(struct net_device *netdev,
5471 struct ifreq *ifr, int cmd)
5472{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005473 struct igb_adapter *adapter = netdev_priv(netdev);
5474 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005475 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005476 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5477 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005478 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005479 bool is_l4 = false;
5480 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005481 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005482
5483 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5484 return -EFAULT;
5485
5486 /* reserved for future extensions */
5487 if (config.flags)
5488 return -EINVAL;
5489
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005490 switch (config.tx_type) {
5491 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005492 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005493 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005494 break;
5495 default:
5496 return -ERANGE;
5497 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005498
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005499 switch (config.rx_filter) {
5500 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005501 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005502 break;
5503 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5504 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5505 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5506 case HWTSTAMP_FILTER_ALL:
5507 /*
5508 * register TSYNCRXCFG must be set, therefore it is not
5509 * possible to time stamp both Sync and Delay_Req messages
5510 * => fall back to time stamping all packets
5511 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005512 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005513 config.rx_filter = HWTSTAMP_FILTER_ALL;
5514 break;
5515 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005516 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005517 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005518 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005519 break;
5520 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005521 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005522 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005523 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005524 break;
5525 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5526 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005527 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005528 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005529 is_l2 = true;
5530 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005531 config.rx_filter = HWTSTAMP_FILTER_SOME;
5532 break;
5533 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5534 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005535 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005536 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005537 is_l2 = true;
5538 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005539 config.rx_filter = HWTSTAMP_FILTER_SOME;
5540 break;
5541 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5542 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5543 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005544 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005545 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005546 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005547 break;
5548 default:
5549 return -ERANGE;
5550 }
5551
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005552 if (hw->mac.type == e1000_82575) {
5553 if (tsync_rx_ctl | tsync_tx_ctl)
5554 return -EINVAL;
5555 return 0;
5556 }
5557
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005558 /* enable/disable TX */
5559 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005560 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5561 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005562 wr32(E1000_TSYNCTXCTL, regval);
5563
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005564 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005565 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005566 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5567 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005568 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005569
5570 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005571 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5572
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005573 /* define ethertype filter for timestamped packets */
5574 if (is_l2)
5575 wr32(E1000_ETQF(3),
5576 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5577 E1000_ETQF_1588 | /* enable timestamping */
5578 ETH_P_1588)); /* 1588 eth protocol type */
5579 else
5580 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005581
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005582#define PTP_PORT 319
5583 /* L4 Queue Filter[3]: filter by destination port and protocol */
5584 if (is_l4) {
5585 u32 ftqf = (IPPROTO_UDP /* UDP */
5586 | E1000_FTQF_VF_BP /* VF not compared */
5587 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5588 | E1000_FTQF_MASK); /* mask all inputs */
5589 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005590
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005591 wr32(E1000_IMIR(3), htons(PTP_PORT));
5592 wr32(E1000_IMIREXT(3),
5593 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5594 if (hw->mac.type == e1000_82576) {
5595 /* enable source port check */
5596 wr32(E1000_SPQF(3), htons(PTP_PORT));
5597 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5598 }
5599 wr32(E1000_FTQF(3), ftqf);
5600 } else {
5601 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5602 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005603 wrfl();
5604
5605 adapter->hwtstamp_config = config;
5606
5607 /* clear TX/RX time stamp registers, just to be sure */
5608 regval = rd32(E1000_TXSTMPH);
5609 regval = rd32(E1000_RXSTMPH);
5610
5611 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5612 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005613}
5614
5615/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005616 * igb_ioctl -
5617 * @netdev:
5618 * @ifreq:
5619 * @cmd:
5620 **/
5621static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5622{
5623 switch (cmd) {
5624 case SIOCGMIIPHY:
5625 case SIOCGMIIREG:
5626 case SIOCSMIIREG:
5627 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005628 case SIOCSHWTSTAMP:
5629 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005630 default:
5631 return -EOPNOTSUPP;
5632 }
5633}
5634
Alexander Duyck009bc062009-07-23 18:08:35 +00005635s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5636{
5637 struct igb_adapter *adapter = hw->back;
5638 u16 cap_offset;
5639
5640 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5641 if (!cap_offset)
5642 return -E1000_ERR_CONFIG;
5643
5644 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5645
5646 return 0;
5647}
5648
5649s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5650{
5651 struct igb_adapter *adapter = hw->back;
5652 u16 cap_offset;
5653
5654 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5655 if (!cap_offset)
5656 return -E1000_ERR_CONFIG;
5657
5658 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5659
5660 return 0;
5661}
5662
Auke Kok9d5c8242008-01-24 02:22:38 -08005663static void igb_vlan_rx_register(struct net_device *netdev,
5664 struct vlan_group *grp)
5665{
5666 struct igb_adapter *adapter = netdev_priv(netdev);
5667 struct e1000_hw *hw = &adapter->hw;
5668 u32 ctrl, rctl;
5669
5670 igb_irq_disable(adapter);
5671 adapter->vlgrp = grp;
5672
5673 if (grp) {
5674 /* enable VLAN tag insert/strip */
5675 ctrl = rd32(E1000_CTRL);
5676 ctrl |= E1000_CTRL_VME;
5677 wr32(E1000_CTRL, ctrl);
5678
Alexander Duyck51466232009-10-27 23:47:35 +00005679 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005680 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005681 rctl &= ~E1000_RCTL_CFIEN;
5682 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005683 } else {
5684 /* disable VLAN tag insert/strip */
5685 ctrl = rd32(E1000_CTRL);
5686 ctrl &= ~E1000_CTRL_VME;
5687 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005688 }
5689
Alexander Duycke1739522009-02-19 20:39:44 -08005690 igb_rlpml_set(adapter);
5691
Auke Kok9d5c8242008-01-24 02:22:38 -08005692 if (!test_bit(__IGB_DOWN, &adapter->state))
5693 igb_irq_enable(adapter);
5694}
5695
5696static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5697{
5698 struct igb_adapter *adapter = netdev_priv(netdev);
5699 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005700 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005701
Alexander Duyck51466232009-10-27 23:47:35 +00005702 /* attempt to add filter to vlvf array */
5703 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005704
Alexander Duyck51466232009-10-27 23:47:35 +00005705 /* add the filter since PF can receive vlans w/o entry in vlvf */
5706 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005707}
5708
5709static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5710{
5711 struct igb_adapter *adapter = netdev_priv(netdev);
5712 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005713 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005714 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005715
5716 igb_irq_disable(adapter);
5717 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5718
5719 if (!test_bit(__IGB_DOWN, &adapter->state))
5720 igb_irq_enable(adapter);
5721
Alexander Duyck51466232009-10-27 23:47:35 +00005722 /* remove vlan from VLVF table array */
5723 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005724
Alexander Duyck51466232009-10-27 23:47:35 +00005725 /* if vid was not present in VLVF just remove it from table */
5726 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005727 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005728}
5729
5730static void igb_restore_vlan(struct igb_adapter *adapter)
5731{
5732 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5733
5734 if (adapter->vlgrp) {
5735 u16 vid;
5736 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5737 if (!vlan_group_get_device(adapter->vlgrp, vid))
5738 continue;
5739 igb_vlan_rx_add_vid(adapter->netdev, vid);
5740 }
5741 }
5742}
5743
5744int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5745{
Alexander Duyck090b1792009-10-27 23:51:55 +00005746 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005747 struct e1000_mac_info *mac = &adapter->hw.mac;
5748
5749 mac->autoneg = 0;
5750
Auke Kok9d5c8242008-01-24 02:22:38 -08005751 switch (spddplx) {
5752 case SPEED_10 + DUPLEX_HALF:
5753 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5754 break;
5755 case SPEED_10 + DUPLEX_FULL:
5756 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5757 break;
5758 case SPEED_100 + DUPLEX_HALF:
5759 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5760 break;
5761 case SPEED_100 + DUPLEX_FULL:
5762 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5763 break;
5764 case SPEED_1000 + DUPLEX_FULL:
5765 mac->autoneg = 1;
5766 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5767 break;
5768 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5769 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005770 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005771 return -EINVAL;
5772 }
5773 return 0;
5774}
5775
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005776static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005777{
5778 struct net_device *netdev = pci_get_drvdata(pdev);
5779 struct igb_adapter *adapter = netdev_priv(netdev);
5780 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005781 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005782 u32 wufc = adapter->wol;
5783#ifdef CONFIG_PM
5784 int retval = 0;
5785#endif
5786
5787 netif_device_detach(netdev);
5788
Alexander Duycka88f10e2008-07-08 15:13:38 -07005789 if (netif_running(netdev))
5790 igb_close(netdev);
5791
Alexander Duyck047e0032009-10-27 15:49:27 +00005792 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005793
5794#ifdef CONFIG_PM
5795 retval = pci_save_state(pdev);
5796 if (retval)
5797 return retval;
5798#endif
5799
5800 status = rd32(E1000_STATUS);
5801 if (status & E1000_STATUS_LU)
5802 wufc &= ~E1000_WUFC_LNKC;
5803
5804 if (wufc) {
5805 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005806 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005807
5808 /* turn on all-multi mode if wake on multicast is enabled */
5809 if (wufc & E1000_WUFC_MC) {
5810 rctl = rd32(E1000_RCTL);
5811 rctl |= E1000_RCTL_MPE;
5812 wr32(E1000_RCTL, rctl);
5813 }
5814
5815 ctrl = rd32(E1000_CTRL);
5816 /* advertise wake from D3Cold */
5817 #define E1000_CTRL_ADVD3WUC 0x00100000
5818 /* phy power management enable */
5819 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5820 ctrl |= E1000_CTRL_ADVD3WUC;
5821 wr32(E1000_CTRL, ctrl);
5822
Auke Kok9d5c8242008-01-24 02:22:38 -08005823 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00005824 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005825
5826 wr32(E1000_WUC, E1000_WUC_PME_EN);
5827 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005828 } else {
5829 wr32(E1000_WUC, 0);
5830 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005831 }
5832
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005833 *enable_wake = wufc || adapter->en_mng_pt;
5834 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00005835 igb_power_down_link(adapter);
5836 else
5837 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005838
5839 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5840 * would have already happened in close and is redundant. */
5841 igb_release_hw_control(adapter);
5842
5843 pci_disable_device(pdev);
5844
Auke Kok9d5c8242008-01-24 02:22:38 -08005845 return 0;
5846}
5847
5848#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005849static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5850{
5851 int retval;
5852 bool wake;
5853
5854 retval = __igb_shutdown(pdev, &wake);
5855 if (retval)
5856 return retval;
5857
5858 if (wake) {
5859 pci_prepare_to_sleep(pdev);
5860 } else {
5861 pci_wake_from_d3(pdev, false);
5862 pci_set_power_state(pdev, PCI_D3hot);
5863 }
5864
5865 return 0;
5866}
5867
Auke Kok9d5c8242008-01-24 02:22:38 -08005868static int igb_resume(struct pci_dev *pdev)
5869{
5870 struct net_device *netdev = pci_get_drvdata(pdev);
5871 struct igb_adapter *adapter = netdev_priv(netdev);
5872 struct e1000_hw *hw = &adapter->hw;
5873 u32 err;
5874
5875 pci_set_power_state(pdev, PCI_D0);
5876 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00005877 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005878
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005879 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005880 if (err) {
5881 dev_err(&pdev->dev,
5882 "igb: Cannot enable PCI device from suspend\n");
5883 return err;
5884 }
5885 pci_set_master(pdev);
5886
5887 pci_enable_wake(pdev, PCI_D3hot, 0);
5888 pci_enable_wake(pdev, PCI_D3cold, 0);
5889
Alexander Duyck047e0032009-10-27 15:49:27 +00005890 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005891 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5892 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005893 }
5894
Auke Kok9d5c8242008-01-24 02:22:38 -08005895 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005896
5897 /* let the f/w know that the h/w is now under the control of the
5898 * driver. */
5899 igb_get_hw_control(adapter);
5900
Auke Kok9d5c8242008-01-24 02:22:38 -08005901 wr32(E1000_WUS, ~0);
5902
Alexander Duycka88f10e2008-07-08 15:13:38 -07005903 if (netif_running(netdev)) {
5904 err = igb_open(netdev);
5905 if (err)
5906 return err;
5907 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005908
5909 netif_device_attach(netdev);
5910
Auke Kok9d5c8242008-01-24 02:22:38 -08005911 return 0;
5912}
5913#endif
5914
5915static void igb_shutdown(struct pci_dev *pdev)
5916{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005917 bool wake;
5918
5919 __igb_shutdown(pdev, &wake);
5920
5921 if (system_state == SYSTEM_POWER_OFF) {
5922 pci_wake_from_d3(pdev, wake);
5923 pci_set_power_state(pdev, PCI_D3hot);
5924 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005925}
5926
5927#ifdef CONFIG_NET_POLL_CONTROLLER
5928/*
5929 * Polling 'interrupt' - used by things like netconsole to send skbs
5930 * without having to re-enable interrupts. It's not called while
5931 * the interrupt routine is executing.
5932 */
5933static void igb_netpoll(struct net_device *netdev)
5934{
5935 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005936 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005937 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005938
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005939 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005940 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005941 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005942 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005943 return;
5944 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005945
Alexander Duyck047e0032009-10-27 15:49:27 +00005946 for (i = 0; i < adapter->num_q_vectors; i++) {
5947 struct igb_q_vector *q_vector = adapter->q_vector[i];
5948 wr32(E1000_EIMC, q_vector->eims_value);
5949 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005950 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005951}
5952#endif /* CONFIG_NET_POLL_CONTROLLER */
5953
5954/**
5955 * igb_io_error_detected - called when PCI error is detected
5956 * @pdev: Pointer to PCI device
5957 * @state: The current pci connection state
5958 *
5959 * This function is called after a PCI bus error affecting
5960 * this device has been detected.
5961 */
5962static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5963 pci_channel_state_t state)
5964{
5965 struct net_device *netdev = pci_get_drvdata(pdev);
5966 struct igb_adapter *adapter = netdev_priv(netdev);
5967
5968 netif_device_detach(netdev);
5969
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005970 if (state == pci_channel_io_perm_failure)
5971 return PCI_ERS_RESULT_DISCONNECT;
5972
Auke Kok9d5c8242008-01-24 02:22:38 -08005973 if (netif_running(netdev))
5974 igb_down(adapter);
5975 pci_disable_device(pdev);
5976
5977 /* Request a slot slot reset. */
5978 return PCI_ERS_RESULT_NEED_RESET;
5979}
5980
5981/**
5982 * igb_io_slot_reset - called after the pci bus has been reset.
5983 * @pdev: Pointer to PCI device
5984 *
5985 * Restart the card from scratch, as if from a cold-boot. Implementation
5986 * resembles the first-half of the igb_resume routine.
5987 */
5988static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5989{
5990 struct net_device *netdev = pci_get_drvdata(pdev);
5991 struct igb_adapter *adapter = netdev_priv(netdev);
5992 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08005993 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005994 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005995
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005996 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005997 dev_err(&pdev->dev,
5998 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08005999 result = PCI_ERS_RESULT_DISCONNECT;
6000 } else {
6001 pci_set_master(pdev);
6002 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006003 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006004
6005 pci_enable_wake(pdev, PCI_D3hot, 0);
6006 pci_enable_wake(pdev, PCI_D3cold, 0);
6007
6008 igb_reset(adapter);
6009 wr32(E1000_WUS, ~0);
6010 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006011 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006012
Jeff Kirsherea943d42008-12-11 20:34:19 -08006013 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6014 if (err) {
6015 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6016 "failed 0x%0x\n", err);
6017 /* non-fatal, continue */
6018 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006019
Alexander Duyck40a914f2008-11-27 00:24:37 -08006020 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006021}
6022
6023/**
6024 * igb_io_resume - called when traffic can start flowing again.
6025 * @pdev: Pointer to PCI device
6026 *
6027 * This callback is called when the error recovery driver tells us that
6028 * its OK to resume normal operation. Implementation resembles the
6029 * second-half of the igb_resume routine.
6030 */
6031static void igb_io_resume(struct pci_dev *pdev)
6032{
6033 struct net_device *netdev = pci_get_drvdata(pdev);
6034 struct igb_adapter *adapter = netdev_priv(netdev);
6035
Auke Kok9d5c8242008-01-24 02:22:38 -08006036 if (netif_running(netdev)) {
6037 if (igb_up(adapter)) {
6038 dev_err(&pdev->dev, "igb_up failed after reset\n");
6039 return;
6040 }
6041 }
6042
6043 netif_device_attach(netdev);
6044
6045 /* let the f/w know that the h/w is now under the control of the
6046 * driver. */
6047 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006048}
6049
Alexander Duyck26ad9172009-10-05 06:32:49 +00006050static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6051 u8 qsel)
6052{
6053 u32 rar_low, rar_high;
6054 struct e1000_hw *hw = &adapter->hw;
6055
6056 /* HW expects these in little endian so we reverse the byte order
6057 * from network order (big endian) to little endian
6058 */
6059 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6060 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6061 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6062
6063 /* Indicate to hardware the Address is Valid. */
6064 rar_high |= E1000_RAH_AV;
6065
6066 if (hw->mac.type == e1000_82575)
6067 rar_high |= E1000_RAH_POOL_1 * qsel;
6068 else
6069 rar_high |= E1000_RAH_POOL_1 << qsel;
6070
6071 wr32(E1000_RAL(index), rar_low);
6072 wrfl();
6073 wr32(E1000_RAH(index), rar_high);
6074 wrfl();
6075}
6076
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006077static int igb_set_vf_mac(struct igb_adapter *adapter,
6078 int vf, unsigned char *mac_addr)
6079{
6080 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006081 /* VF MAC addresses start at end of receive addresses and moves
6082 * torwards the first, as a result a collision should not be possible */
6083 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006084
Alexander Duyck37680112009-02-19 20:40:30 -08006085 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006086
Alexander Duyck26ad9172009-10-05 06:32:49 +00006087 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006088
6089 return 0;
6090}
6091
Williams, Mitch A8151d292010-02-10 01:44:24 +00006092static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6093{
6094 struct igb_adapter *adapter = netdev_priv(netdev);
6095 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6096 return -EINVAL;
6097 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6098 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6099 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6100 " change effective.");
6101 if (test_bit(__IGB_DOWN, &adapter->state)) {
6102 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6103 " but the PF device is not up.\n");
6104 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6105 " attempting to use the VF device.\n");
6106 }
6107 return igb_set_vf_mac(adapter, vf, mac);
6108}
6109
6110static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6111{
6112 return -EOPNOTSUPP;
6113}
6114
6115static int igb_ndo_get_vf_config(struct net_device *netdev,
6116 int vf, struct ifla_vf_info *ivi)
6117{
6118 struct igb_adapter *adapter = netdev_priv(netdev);
6119 if (vf >= adapter->vfs_allocated_count)
6120 return -EINVAL;
6121 ivi->vf = vf;
6122 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6123 ivi->tx_rate = 0;
6124 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6125 ivi->qos = adapter->vf_data[vf].pf_qos;
6126 return 0;
6127}
6128
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006129static void igb_vmm_control(struct igb_adapter *adapter)
6130{
6131 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006132 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006133
Alexander Duyckd4960302009-10-27 15:53:45 +00006134 /* replication is not supported for 82575 */
6135 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006136 return;
6137
Alexander Duyck10d8e902009-10-27 15:54:04 +00006138 /* enable replication vlan tag stripping */
6139 reg = rd32(E1000_RPLOLR);
6140 reg |= E1000_RPLOLR_STRVLAN;
6141 wr32(E1000_RPLOLR, reg);
6142
6143 /* notify HW that the MAC is adding vlan tags */
6144 reg = rd32(E1000_DTXCTL);
6145 reg |= E1000_DTXCTL_VLAN_ADDED;
6146 wr32(E1000_DTXCTL, reg);
6147
Alexander Duyckd4960302009-10-27 15:53:45 +00006148 if (adapter->vfs_allocated_count) {
6149 igb_vmdq_set_loopback_pf(hw, true);
6150 igb_vmdq_set_replication_pf(hw, true);
6151 } else {
6152 igb_vmdq_set_loopback_pf(hw, false);
6153 igb_vmdq_set_replication_pf(hw, false);
6154 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006155}
6156
Auke Kok9d5c8242008-01-24 02:22:38 -08006157/* igb_main.c */