blob: cba0115a6a5792e02979b7cf4f8f004a785954fb [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck55cac242009-11-19 12:42:21 +000052#define DRV_VERSION "2.1.0-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000063static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyck55cac242009-11-19 12:42:21 +000064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
79 /* required last entry */
80 {0, }
81};
82
83MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
84
85void igb_reset(struct igb_adapter *);
86static int igb_setup_all_tx_resources(struct igb_adapter *);
87static int igb_setup_all_rx_resources(struct igb_adapter *);
88static void igb_free_all_tx_resources(struct igb_adapter *);
89static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000090static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080091void igb_update_stats(struct igb_adapter *);
92static int igb_probe(struct pci_dev *, const struct pci_device_id *);
93static void __devexit igb_remove(struct pci_dev *pdev);
94static int igb_sw_init(struct igb_adapter *);
95static int igb_open(struct net_device *);
96static int igb_close(struct net_device *);
97static void igb_configure_tx(struct igb_adapter *);
98static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080099static void igb_clean_all_tx_rings(struct igb_adapter *);
100static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700101static void igb_clean_tx_ring(struct igb_ring *);
102static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000103static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800104static void igb_update_phy_info(unsigned long);
105static void igb_watchdog(unsigned long);
106static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000107static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800108static struct net_device_stats *igb_get_stats(struct net_device *);
109static int igb_change_mtu(struct net_device *, int);
110static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000111static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static irqreturn_t igb_intr(int irq, void *);
113static irqreturn_t igb_intr_msi(int irq, void *);
114static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000115static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700116#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700118static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700119#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000120static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700121static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000122static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *);
126static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000130static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800131static void igb_ping_all_vfs(struct igb_adapter *);
132static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800133static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800135static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000136static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
137static int igb_ndo_set_vf_vlan(struct net_device *netdev,
138 int vf, u16 vlan, u8 qos);
139static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
140static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
141 struct ifla_vf_info *ivi);
Auke Kok9d5c8242008-01-24 02:22:38 -0800142
Auke Kok9d5c8242008-01-24 02:22:38 -0800143#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000144static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_resume(struct pci_dev *);
146#endif
147static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700149static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
150static struct notifier_block dca_notifier = {
151 .notifier_call = igb_notify_dca,
152 .next = NULL,
153 .priority = 0
154};
155#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800156#ifdef CONFIG_NET_POLL_CONTROLLER
157/* for netdump / net console */
158static void igb_netpoll(struct net_device *);
159#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800160#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000161static unsigned int max_vfs = 0;
162module_param(max_vfs, uint, 0);
163MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
164 "per physical function");
165#endif /* CONFIG_PCI_IOV */
166
Auke Kok9d5c8242008-01-24 02:22:38 -0800167static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
168 pci_channel_state_t);
169static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
170static void igb_io_resume(struct pci_dev *);
171
172static struct pci_error_handlers igb_err_handler = {
173 .error_detected = igb_io_error_detected,
174 .slot_reset = igb_io_slot_reset,
175 .resume = igb_io_resume,
176};
177
178
179static struct pci_driver igb_driver = {
180 .name = igb_driver_name,
181 .id_table = igb_pci_tbl,
182 .probe = igb_probe,
183 .remove = __devexit_p(igb_remove),
184#ifdef CONFIG_PM
185 /* Power Managment Hooks */
186 .suspend = igb_suspend,
187 .resume = igb_resume,
188#endif
189 .shutdown = igb_shutdown,
190 .err_handler = &igb_err_handler
191};
192
193MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
194MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
195MODULE_LICENSE("GPL");
196MODULE_VERSION(DRV_VERSION);
197
Patrick Ohly38c845c2009-02-12 05:03:41 +0000198/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000199 * igb_read_clock - read raw cycle counter (to be used by time counter)
200 */
201static cycle_t igb_read_clock(const struct cyclecounter *tc)
202{
203 struct igb_adapter *adapter =
204 container_of(tc, struct igb_adapter, cycles);
205 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000206 u64 stamp = 0;
207 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000208
Alexander Duyck55cac242009-11-19 12:42:21 +0000209 /*
210 * The timestamp latches on lowest register read. For the 82580
211 * the lowest register is SYSTIMR instead of SYSTIML. However we never
212 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
213 */
214 if (hw->mac.type == e1000_82580) {
215 stamp = rd32(E1000_SYSTIMR) >> 8;
216 shift = IGB_82580_TSYNC_SHIFT;
217 }
218
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000219 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
220 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000221 return stamp;
222}
223
Auke Kok9d5c8242008-01-24 02:22:38 -0800224#ifdef DEBUG
225/**
226 * igb_get_hw_dev_name - return device name string
227 * used by hardware layer to print debugging information
228 **/
229char *igb_get_hw_dev_name(struct e1000_hw *hw)
230{
231 struct igb_adapter *adapter = hw->back;
232 return adapter->netdev->name;
233}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000234
235/**
236 * igb_get_time_str - format current NIC and system time as string
237 */
238static char *igb_get_time_str(struct igb_adapter *adapter,
239 char buffer[160])
240{
241 cycle_t hw = adapter->cycles.read(&adapter->cycles);
242 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
243 struct timespec sys;
244 struct timespec delta;
245 getnstimeofday(&sys);
246
247 delta = timespec_sub(nic, sys);
248
249 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000250 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
251 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000252 (long)nic.tv_sec, nic.tv_nsec,
253 (long)sys.tv_sec, sys.tv_nsec,
254 (long)delta.tv_sec, delta.tv_nsec);
255
256 return buffer;
257}
Auke Kok9d5c8242008-01-24 02:22:38 -0800258#endif
259
260/**
261 * igb_init_module - Driver Registration Routine
262 *
263 * igb_init_module is the first routine called when the driver is
264 * loaded. All it does is register with the PCI subsystem.
265 **/
266static int __init igb_init_module(void)
267{
268 int ret;
269 printk(KERN_INFO "%s - version %s\n",
270 igb_driver_string, igb_driver_version);
271
272 printk(KERN_INFO "%s\n", igb_copyright);
273
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700274#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700275 dca_register_notify(&dca_notifier);
276#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800277 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800278 return ret;
279}
280
281module_init(igb_init_module);
282
283/**
284 * igb_exit_module - Driver Exit Cleanup Routine
285 *
286 * igb_exit_module is called just before the driver is removed
287 * from memory.
288 **/
289static void __exit igb_exit_module(void)
290{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700291#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700292 dca_unregister_notify(&dca_notifier);
293#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800294 pci_unregister_driver(&igb_driver);
295}
296
297module_exit(igb_exit_module);
298
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800299#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
300/**
301 * igb_cache_ring_register - Descriptor ring to register mapping
302 * @adapter: board private structure to initialize
303 *
304 * Once we know the feature-set enabled for the device, we'll cache
305 * the register offset the descriptor ring is assigned to.
306 **/
307static void igb_cache_ring_register(struct igb_adapter *adapter)
308{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000309 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000310 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800311
312 switch (adapter->hw.mac.type) {
313 case e1000_82576:
314 /* The queues are allocated for virtualization such that VF 0
315 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
316 * In order to avoid collision we start at the first free queue
317 * and continue consuming queues in the same sequence
318 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000319 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000320 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000321 adapter->rx_ring[i]->reg_idx = rbase_offset +
322 Q_IDX_82576(i);
Alexander Duycka99955f2009-11-12 18:37:19 +0000323 for (; j < adapter->rss_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000324 adapter->tx_ring[j]->reg_idx = rbase_offset +
325 Q_IDX_82576(j);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000326 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800327 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000328 case e1000_82580:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800329 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000330 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000331 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000332 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000333 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800334 break;
335 }
336}
337
Alexander Duyck047e0032009-10-27 15:49:27 +0000338static void igb_free_queues(struct igb_adapter *adapter)
339{
Alexander Duyck3025a442010-02-17 01:02:39 +0000340 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000341
Alexander Duyck3025a442010-02-17 01:02:39 +0000342 for (i = 0; i < adapter->num_tx_queues; i++) {
343 kfree(adapter->tx_ring[i]);
344 adapter->tx_ring[i] = NULL;
345 }
346 for (i = 0; i < adapter->num_rx_queues; i++) {
347 kfree(adapter->rx_ring[i]);
348 adapter->rx_ring[i] = NULL;
349 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000350 adapter->num_rx_queues = 0;
351 adapter->num_tx_queues = 0;
352}
353
Auke Kok9d5c8242008-01-24 02:22:38 -0800354/**
355 * igb_alloc_queues - Allocate memory for all rings
356 * @adapter: board private structure to initialize
357 *
358 * We allocate one ring per queue at run-time since we don't know the
359 * number of queues at compile-time.
360 **/
361static int igb_alloc_queues(struct igb_adapter *adapter)
362{
Alexander Duyck3025a442010-02-17 01:02:39 +0000363 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800364 int i;
365
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700366 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000367 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
368 if (!ring)
369 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800370 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700371 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000372 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000373 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000374 /* For 82575, context index must be unique per ring. */
375 if (adapter->hw.mac.type == e1000_82575)
376 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000377 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700378 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000379
Auke Kok9d5c8242008-01-24 02:22:38 -0800380 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000381 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
382 if (!ring)
383 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800384 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700385 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000386 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000387 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000388 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000389 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
390 /* set flag indicating ring supports SCTP checksum offload */
391 if (adapter->hw.mac.type >= e1000_82576)
392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000393 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800394 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800395
396 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000397
Auke Kok9d5c8242008-01-24 02:22:38 -0800398 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800399
Alexander Duyck047e0032009-10-27 15:49:27 +0000400err:
401 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700402
Alexander Duyck047e0032009-10-27 15:49:27 +0000403 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700404}
405
Auke Kok9d5c8242008-01-24 02:22:38 -0800406#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000407static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800408{
409 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000410 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800411 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700412 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000413 int rx_queue = IGB_N0_QUEUE;
414 int tx_queue = IGB_N0_QUEUE;
415
416 if (q_vector->rx_ring)
417 rx_queue = q_vector->rx_ring->reg_idx;
418 if (q_vector->tx_ring)
419 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700420
421 switch (hw->mac.type) {
422 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800423 /* The 82575 assigns vectors using a bitmask, which matches the
424 bitmask for the EICR/EIMS/EIMC registers. To assign one
425 or more queues to a vector, we write the appropriate bits
426 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000427 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800428 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000429 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800430 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000431 if (!adapter->msix_entries && msix_vector == 0)
432 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800433 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000434 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700435 break;
436 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800437 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700438 Each queue has a single entry in the table to which we write
439 a vector number along with a "valid" bit. Sadly, the layout
440 of the table is somewhat counterintuitive. */
441 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000442 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700443 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000444 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800445 /* vector goes into low byte of register */
446 ivar = ivar & 0xFFFFFF00;
447 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000448 } else {
449 /* vector goes into third byte of register */
450 ivar = ivar & 0xFF00FFFF;
451 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700452 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700453 array_wr32(E1000_IVAR0, index, ivar);
454 }
455 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000456 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700457 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000458 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800459 /* vector goes into second byte of register */
460 ivar = ivar & 0xFFFF00FF;
461 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000462 } else {
463 /* vector goes into high byte of register */
464 ivar = ivar & 0x00FFFFFF;
465 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700466 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700467 array_wr32(E1000_IVAR0, index, ivar);
468 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000469 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700470 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000471 case e1000_82580:
472 /* 82580 uses the same table-based approach as 82576 but has fewer
473 entries as a result we carry over for queues greater than 4. */
474 if (rx_queue > IGB_N0_QUEUE) {
475 index = (rx_queue >> 1);
476 ivar = array_rd32(E1000_IVAR0, index);
477 if (rx_queue & 0x1) {
478 /* vector goes into third byte of register */
479 ivar = ivar & 0xFF00FFFF;
480 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
481 } else {
482 /* vector goes into low byte of register */
483 ivar = ivar & 0xFFFFFF00;
484 ivar |= msix_vector | E1000_IVAR_VALID;
485 }
486 array_wr32(E1000_IVAR0, index, ivar);
487 }
488 if (tx_queue > IGB_N0_QUEUE) {
489 index = (tx_queue >> 1);
490 ivar = array_rd32(E1000_IVAR0, index);
491 if (tx_queue & 0x1) {
492 /* vector goes into high byte of register */
493 ivar = ivar & 0x00FFFFFF;
494 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
495 } else {
496 /* vector goes into second byte of register */
497 ivar = ivar & 0xFFFF00FF;
498 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
499 }
500 array_wr32(E1000_IVAR0, index, ivar);
501 }
502 q_vector->eims_value = 1 << msix_vector;
503 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700504 default:
505 BUG();
506 break;
507 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000508
509 /* add q_vector eims value to global eims_enable_mask */
510 adapter->eims_enable_mask |= q_vector->eims_value;
511
512 /* configure q_vector to set itr on first interrupt */
513 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800514}
515
516/**
517 * igb_configure_msix - Configure MSI-X hardware
518 *
519 * igb_configure_msix sets up the hardware to properly
520 * generate MSI-X interrupts.
521 **/
522static void igb_configure_msix(struct igb_adapter *adapter)
523{
524 u32 tmp;
525 int i, vector = 0;
526 struct e1000_hw *hw = &adapter->hw;
527
528 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800529
530 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700531 switch (hw->mac.type) {
532 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800533 tmp = rd32(E1000_CTRL_EXT);
534 /* enable MSI-X PBA support*/
535 tmp |= E1000_CTRL_EXT_PBA_CLR;
536
537 /* Auto-Mask interrupts upon ICR read. */
538 tmp |= E1000_CTRL_EXT_EIAME;
539 tmp |= E1000_CTRL_EXT_IRCA;
540
541 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000542
543 /* enable msix_other interrupt */
544 array_wr32(E1000_MSIXBM(0), vector++,
545 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700546 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800547
Alexander Duyck2d064c02008-07-08 15:10:12 -0700548 break;
549
550 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000551 case e1000_82580:
Alexander Duyck047e0032009-10-27 15:49:27 +0000552 /* Turn on MSI-X capability first, or our settings
553 * won't stick. And it will take days to debug. */
554 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
555 E1000_GPIE_PBA | E1000_GPIE_EIAME |
556 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700557
Alexander Duyck047e0032009-10-27 15:49:27 +0000558 /* enable msix_other interrupt */
559 adapter->eims_other = 1 << vector;
560 tmp = (vector++ | E1000_IVAR_VALID) << 8;
561
562 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700563 break;
564 default:
565 /* do nothing, since nothing else supports MSI-X */
566 break;
567 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000568
569 adapter->eims_enable_mask |= adapter->eims_other;
570
Alexander Duyck26b39272010-02-17 01:00:41 +0000571 for (i = 0; i < adapter->num_q_vectors; i++)
572 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000573
Auke Kok9d5c8242008-01-24 02:22:38 -0800574 wrfl();
575}
576
577/**
578 * igb_request_msix - Initialize MSI-X interrupts
579 *
580 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
581 * kernel.
582 **/
583static int igb_request_msix(struct igb_adapter *adapter)
584{
585 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000586 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 int i, err = 0, vector = 0;
588
Auke Kok9d5c8242008-01-24 02:22:38 -0800589 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800590 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800591 if (err)
592 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000593 vector++;
594
595 for (i = 0; i < adapter->num_q_vectors; i++) {
596 struct igb_q_vector *q_vector = adapter->q_vector[i];
597
598 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
599
600 if (q_vector->rx_ring && q_vector->tx_ring)
601 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
602 q_vector->rx_ring->queue_index);
603 else if (q_vector->tx_ring)
604 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
605 q_vector->tx_ring->queue_index);
606 else if (q_vector->rx_ring)
607 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
608 q_vector->rx_ring->queue_index);
609 else
610 sprintf(q_vector->name, "%s-unused", netdev->name);
611
612 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800613 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000614 q_vector);
615 if (err)
616 goto out;
617 vector++;
618 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800619
Auke Kok9d5c8242008-01-24 02:22:38 -0800620 igb_configure_msix(adapter);
621 return 0;
622out:
623 return err;
624}
625
626static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
627{
628 if (adapter->msix_entries) {
629 pci_disable_msix(adapter->pdev);
630 kfree(adapter->msix_entries);
631 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000632 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800633 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000634 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800635}
636
Alexander Duyck047e0032009-10-27 15:49:27 +0000637/**
638 * igb_free_q_vectors - Free memory allocated for interrupt vectors
639 * @adapter: board private structure to initialize
640 *
641 * This function frees the memory allocated to the q_vectors. In addition if
642 * NAPI is enabled it will delete any references to the NAPI struct prior
643 * to freeing the q_vector.
644 **/
645static void igb_free_q_vectors(struct igb_adapter *adapter)
646{
647 int v_idx;
648
649 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
650 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
651 adapter->q_vector[v_idx] = NULL;
652 netif_napi_del(&q_vector->napi);
653 kfree(q_vector);
654 }
655 adapter->num_q_vectors = 0;
656}
657
658/**
659 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
660 *
661 * This function resets the device so that it has 0 rx queues, tx queues, and
662 * MSI-X interrupts allocated.
663 */
664static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
665{
666 igb_free_queues(adapter);
667 igb_free_q_vectors(adapter);
668 igb_reset_interrupt_capability(adapter);
669}
Auke Kok9d5c8242008-01-24 02:22:38 -0800670
671/**
672 * igb_set_interrupt_capability - set MSI or MSI-X if supported
673 *
674 * Attempt to configure interrupts using the best available
675 * capabilities of the hardware and kernel.
676 **/
677static void igb_set_interrupt_capability(struct igb_adapter *adapter)
678{
679 int err;
680 int numvecs, i;
681
Alexander Duyck83b71802009-02-06 23:15:45 +0000682 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +0000683 adapter->num_rx_queues = adapter->rss_queues;
684 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +0000685
Alexander Duyck047e0032009-10-27 15:49:27 +0000686 /* start with one vector for every rx queue */
687 numvecs = adapter->num_rx_queues;
688
689 /* if tx handler is seperate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +0000690 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
691 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +0000692
693 /* store the number of vectors reserved for queues */
694 adapter->num_q_vectors = numvecs;
695
696 /* add 1 vector for link status interrupts */
697 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL);
700 if (!adapter->msix_entries)
701 goto msi_only;
702
703 for (i = 0; i < numvecs; i++)
704 adapter->msix_entries[i].entry = i;
705
706 err = pci_enable_msix(adapter->pdev,
707 adapter->msix_entries,
708 numvecs);
709 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700710 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800711
712 igb_reset_interrupt_capability(adapter);
713
714 /* If we can't do MSI-X, try MSI */
715msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000716#ifdef CONFIG_PCI_IOV
717 /* disable SR-IOV for non MSI-X configurations */
718 if (adapter->vf_data) {
719 struct e1000_hw *hw = &adapter->hw;
720 /* disable iov and allow time for transactions to clear */
721 pci_disable_sriov(adapter->pdev);
722 msleep(500);
723
724 kfree(adapter->vf_data);
725 adapter->vf_data = NULL;
726 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
727 msleep(100);
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 }
730#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000731 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +0000732 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000733 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800734 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700735 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000736 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800737 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700738 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700739out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700740 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700741 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800742 return;
743}
744
745/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000746 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
747 * @adapter: board private structure to initialize
748 *
749 * We allocate one q_vector per queue interrupt. If allocation fails we
750 * return -ENOMEM.
751 **/
752static int igb_alloc_q_vectors(struct igb_adapter *adapter)
753{
754 struct igb_q_vector *q_vector;
755 struct e1000_hw *hw = &adapter->hw;
756 int v_idx;
757
758 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
759 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
760 if (!q_vector)
761 goto err_out;
762 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
764 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000765 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
766 adapter->q_vector[v_idx] = q_vector;
767 }
768 return 0;
769
770err_out:
771 while (v_idx) {
772 v_idx--;
773 q_vector = adapter->q_vector[v_idx];
774 netif_napi_del(&q_vector->napi);
775 kfree(q_vector);
776 adapter->q_vector[v_idx] = NULL;
777 }
778 return -ENOMEM;
779}
780
781static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
782 int ring_idx, int v_idx)
783{
Alexander Duyck3025a442010-02-17 01:02:39 +0000784 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000785
Alexander Duyck3025a442010-02-17 01:02:39 +0000786 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000787 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000788 q_vector->itr_val = adapter->rx_itr_setting;
789 if (q_vector->itr_val && q_vector->itr_val <= 3)
790 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000791}
792
793static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
794 int ring_idx, int v_idx)
795{
Alexander Duyck3025a442010-02-17 01:02:39 +0000796 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000797
Alexander Duyck3025a442010-02-17 01:02:39 +0000798 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000799 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000800 q_vector->itr_val = adapter->tx_itr_setting;
801 if (q_vector->itr_val && q_vector->itr_val <= 3)
802 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803}
804
805/**
806 * igb_map_ring_to_vector - maps allocated queues to vectors
807 *
808 * This function maps the recently allocated queues to vectors.
809 **/
810static int igb_map_ring_to_vector(struct igb_adapter *adapter)
811{
812 int i;
813 int v_idx = 0;
814
815 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
816 (adapter->num_q_vectors < adapter->num_tx_queues))
817 return -ENOMEM;
818
819 if (adapter->num_q_vectors >=
820 (adapter->num_rx_queues + adapter->num_tx_queues)) {
821 for (i = 0; i < adapter->num_rx_queues; i++)
822 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
823 for (i = 0; i < adapter->num_tx_queues; i++)
824 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
825 } else {
826 for (i = 0; i < adapter->num_rx_queues; i++) {
827 if (i < adapter->num_tx_queues)
828 igb_map_tx_ring_to_vector(adapter, i, v_idx);
829 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
830 }
831 for (; i < adapter->num_tx_queues; i++)
832 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
833 }
834 return 0;
835}
836
837/**
838 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
839 *
840 * This function initializes the interrupts and allocates all of the queues.
841 **/
842static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
843{
844 struct pci_dev *pdev = adapter->pdev;
845 int err;
846
847 igb_set_interrupt_capability(adapter);
848
849 err = igb_alloc_q_vectors(adapter);
850 if (err) {
851 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
852 goto err_alloc_q_vectors;
853 }
854
855 err = igb_alloc_queues(adapter);
856 if (err) {
857 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
858 goto err_alloc_queues;
859 }
860
861 err = igb_map_ring_to_vector(adapter);
862 if (err) {
863 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
864 goto err_map_queues;
865 }
866
867
868 return 0;
869err_map_queues:
870 igb_free_queues(adapter);
871err_alloc_queues:
872 igb_free_q_vectors(adapter);
873err_alloc_q_vectors:
874 igb_reset_interrupt_capability(adapter);
875 return err;
876}
877
878/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800879 * igb_request_irq - initialize interrupts
880 *
881 * Attempts to configure interrupts using the best available
882 * capabilities of the hardware and kernel.
883 **/
884static int igb_request_irq(struct igb_adapter *adapter)
885{
886 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000887 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800888 int err = 0;
889
890 if (adapter->msix_entries) {
891 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700892 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800893 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800894 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000895 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800896 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700897 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800898 igb_free_all_tx_resources(adapter);
899 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000900 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800901 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000902 adapter->num_q_vectors = 1;
903 err = igb_alloc_q_vectors(adapter);
904 if (err) {
905 dev_err(&pdev->dev,
906 "Unable to allocate memory for vectors\n");
907 goto request_done;
908 }
909 err = igb_alloc_queues(adapter);
910 if (err) {
911 dev_err(&pdev->dev,
912 "Unable to allocate memory for queues\n");
913 igb_free_q_vectors(adapter);
914 goto request_done;
915 }
916 igb_setup_all_tx_resources(adapter);
917 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700918 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000919 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800920 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700921
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700922 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -0800923 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000924 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800925 if (!err)
926 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000927
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 /* fall back to legacy interrupts */
929 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700930 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800931 }
932
Joe Perchesa0607fd2009-11-18 23:29:17 -0800933 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000934 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800935
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800936 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800937 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
938 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800939
940request_done:
941 return err;
942}
943
944static void igb_free_irq(struct igb_adapter *adapter)
945{
Auke Kok9d5c8242008-01-24 02:22:38 -0800946 if (adapter->msix_entries) {
947 int vector = 0, i;
948
Alexander Duyck047e0032009-10-27 15:49:27 +0000949 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800950
Alexander Duyck047e0032009-10-27 15:49:27 +0000951 for (i = 0; i < adapter->num_q_vectors; i++) {
952 struct igb_q_vector *q_vector = adapter->q_vector[i];
953 free_irq(adapter->msix_entries[vector++].vector,
954 q_vector);
955 }
956 } else {
957 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800958 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800959}
960
961/**
962 * igb_irq_disable - Mask off interrupt generation on the NIC
963 * @adapter: board private structure
964 **/
965static void igb_irq_disable(struct igb_adapter *adapter)
966{
967 struct e1000_hw *hw = &adapter->hw;
968
Alexander Duyck25568a52009-10-27 23:49:59 +0000969 /*
970 * we need to be careful when disabling interrupts. The VFs are also
971 * mapped into these registers and so clearing the bits can cause
972 * issues on the VF drivers so we only need to clear what we set
973 */
Auke Kok9d5c8242008-01-24 02:22:38 -0800974 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000975 u32 regval = rd32(E1000_EIAM);
976 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
977 wr32(E1000_EIMC, adapter->eims_enable_mask);
978 regval = rd32(E1000_EIAC);
979 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800980 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700981
982 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800983 wr32(E1000_IMC, ~0);
984 wrfl();
985 synchronize_irq(adapter->pdev->irq);
986}
987
988/**
989 * igb_irq_enable - Enable default interrupt generation settings
990 * @adapter: board private structure
991 **/
992static void igb_irq_enable(struct igb_adapter *adapter)
993{
994 struct e1000_hw *hw = &adapter->hw;
995
996 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +0000997 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000998 u32 regval = rd32(E1000_EIAC);
999 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1000 regval = rd32(E1000_EIAM);
1001 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001002 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001003 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001004 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001005 ims |= E1000_IMS_VMMB;
1006 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001007 if (adapter->hw.mac.type == e1000_82580)
1008 ims |= E1000_IMS_DRSTA;
1009
Alexander Duyck25568a52009-10-27 23:49:59 +00001010 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001011 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001012 wr32(E1000_IMS, IMS_ENABLE_MASK |
1013 E1000_IMS_DRSTA);
1014 wr32(E1000_IAM, IMS_ENABLE_MASK |
1015 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001016 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001017}
1018
1019static void igb_update_mng_vlan(struct igb_adapter *adapter)
1020{
Alexander Duyck51466232009-10-27 23:47:35 +00001021 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001022 u16 vid = adapter->hw.mng_cookie.vlan_id;
1023 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001024
Alexander Duyck51466232009-10-27 23:47:35 +00001025 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1026 /* add VID to filter table */
1027 igb_vfta_set(hw, vid, true);
1028 adapter->mng_vlan_id = vid;
1029 } else {
1030 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1031 }
1032
1033 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1034 (vid != old_vid) &&
1035 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1036 /* remove VID from filter table */
1037 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001038 }
1039}
1040
1041/**
1042 * igb_release_hw_control - release control of the h/w to f/w
1043 * @adapter: address of board private structure
1044 *
1045 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1046 * For ASF and Pass Through versions of f/w this means that the
1047 * driver is no longer loaded.
1048 *
1049 **/
1050static void igb_release_hw_control(struct igb_adapter *adapter)
1051{
1052 struct e1000_hw *hw = &adapter->hw;
1053 u32 ctrl_ext;
1054
1055 /* Let firmware take over control of h/w */
1056 ctrl_ext = rd32(E1000_CTRL_EXT);
1057 wr32(E1000_CTRL_EXT,
1058 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1059}
1060
Auke Kok9d5c8242008-01-24 02:22:38 -08001061/**
1062 * igb_get_hw_control - get control of the h/w from f/w
1063 * @adapter: address of board private structure
1064 *
1065 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1066 * For ASF and Pass Through versions of f/w this means that
1067 * the driver is loaded.
1068 *
1069 **/
1070static void igb_get_hw_control(struct igb_adapter *adapter)
1071{
1072 struct e1000_hw *hw = &adapter->hw;
1073 u32 ctrl_ext;
1074
1075 /* Let firmware know the driver has taken over */
1076 ctrl_ext = rd32(E1000_CTRL_EXT);
1077 wr32(E1000_CTRL_EXT,
1078 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1079}
1080
Auke Kok9d5c8242008-01-24 02:22:38 -08001081/**
1082 * igb_configure - configure the hardware for RX and TX
1083 * @adapter: private board structure
1084 **/
1085static void igb_configure(struct igb_adapter *adapter)
1086{
1087 struct net_device *netdev = adapter->netdev;
1088 int i;
1089
1090 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001091 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001092
1093 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001094
Alexander Duyck85b430b2009-10-27 15:50:29 +00001095 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001096 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001097 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001098
1099 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001100 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001101
1102 igb_rx_fifo_flush_82575(&adapter->hw);
1103
Alexander Duyckc493ea42009-03-20 00:16:50 +00001104 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001105 * at least 1 descriptor unused to make sure
1106 * next_to_use != next_to_clean */
1107 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001108 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001109 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001110 }
1111
1112
1113 adapter->tx_queue_len = netdev->tx_queue_len;
1114}
1115
Nick Nunley88a268c2010-02-17 01:01:59 +00001116/**
1117 * igb_power_up_link - Power up the phy/serdes link
1118 * @adapter: address of board private structure
1119 **/
1120void igb_power_up_link(struct igb_adapter *adapter)
1121{
1122 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1123 igb_power_up_phy_copper(&adapter->hw);
1124 else
1125 igb_power_up_serdes_link_82575(&adapter->hw);
1126}
1127
1128/**
1129 * igb_power_down_link - Power down the phy/serdes link
1130 * @adapter: address of board private structure
1131 */
1132static void igb_power_down_link(struct igb_adapter *adapter)
1133{
1134 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1135 igb_power_down_phy_copper_82575(&adapter->hw);
1136 else
1137 igb_shutdown_serdes_link_82575(&adapter->hw);
1138}
Auke Kok9d5c8242008-01-24 02:22:38 -08001139
1140/**
1141 * igb_up - Open the interface and prepare it to handle traffic
1142 * @adapter: board private structure
1143 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001144int igb_up(struct igb_adapter *adapter)
1145{
1146 struct e1000_hw *hw = &adapter->hw;
1147 int i;
1148
1149 /* hardware has been reset, we need to reload some things */
1150 igb_configure(adapter);
1151
1152 clear_bit(__IGB_DOWN, &adapter->state);
1153
Alexander Duyck047e0032009-10-27 15:49:27 +00001154 for (i = 0; i < adapter->num_q_vectors; i++) {
1155 struct igb_q_vector *q_vector = adapter->q_vector[i];
1156 napi_enable(&q_vector->napi);
1157 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001158 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001159 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001160 else
1161 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001162
1163 /* Clear any pending interrupts. */
1164 rd32(E1000_ICR);
1165 igb_irq_enable(adapter);
1166
Alexander Duyckd4960302009-10-27 15:53:45 +00001167 /* notify VFs that reset has been completed */
1168 if (adapter->vfs_allocated_count) {
1169 u32 reg_data = rd32(E1000_CTRL_EXT);
1170 reg_data |= E1000_CTRL_EXT_PFRSTD;
1171 wr32(E1000_CTRL_EXT, reg_data);
1172 }
1173
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001174 netif_tx_start_all_queues(adapter->netdev);
1175
Alexander Duyck25568a52009-10-27 23:49:59 +00001176 /* start the watchdog. */
1177 hw->mac.get_link_status = 1;
1178 schedule_work(&adapter->watchdog_task);
1179
Auke Kok9d5c8242008-01-24 02:22:38 -08001180 return 0;
1181}
1182
1183void igb_down(struct igb_adapter *adapter)
1184{
Auke Kok9d5c8242008-01-24 02:22:38 -08001185 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001186 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001187 u32 tctl, rctl;
1188 int i;
1189
1190 /* signal that we're down so the interrupt handler does not
1191 * reschedule our watchdog timer */
1192 set_bit(__IGB_DOWN, &adapter->state);
1193
1194 /* disable receives in the hardware */
1195 rctl = rd32(E1000_RCTL);
1196 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1197 /* flush and sleep below */
1198
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001199 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001200
1201 /* disable transmits in the hardware */
1202 tctl = rd32(E1000_TCTL);
1203 tctl &= ~E1000_TCTL_EN;
1204 wr32(E1000_TCTL, tctl);
1205 /* flush both disables and wait for them to finish */
1206 wrfl();
1207 msleep(10);
1208
Alexander Duyck047e0032009-10-27 15:49:27 +00001209 for (i = 0; i < adapter->num_q_vectors; i++) {
1210 struct igb_q_vector *q_vector = adapter->q_vector[i];
1211 napi_disable(&q_vector->napi);
1212 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001213
Auke Kok9d5c8242008-01-24 02:22:38 -08001214 igb_irq_disable(adapter);
1215
1216 del_timer_sync(&adapter->watchdog_timer);
1217 del_timer_sync(&adapter->phy_info_timer);
1218
1219 netdev->tx_queue_len = adapter->tx_queue_len;
1220 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001221
1222 /* record the stats before reset*/
1223 igb_update_stats(adapter);
1224
Auke Kok9d5c8242008-01-24 02:22:38 -08001225 adapter->link_speed = 0;
1226 adapter->link_duplex = 0;
1227
Jeff Kirsher30236822008-06-24 17:01:15 -07001228 if (!pci_channel_offline(adapter->pdev))
1229 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001230 igb_clean_all_tx_rings(adapter);
1231 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001232#ifdef CONFIG_IGB_DCA
1233
1234 /* since we reset the hardware DCA settings were cleared */
1235 igb_setup_dca(adapter);
1236#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001237}
1238
1239void igb_reinit_locked(struct igb_adapter *adapter)
1240{
1241 WARN_ON(in_interrupt());
1242 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1243 msleep(1);
1244 igb_down(adapter);
1245 igb_up(adapter);
1246 clear_bit(__IGB_RESETTING, &adapter->state);
1247}
1248
1249void igb_reset(struct igb_adapter *adapter)
1250{
Alexander Duyck090b1792009-10-27 23:51:55 +00001251 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001252 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001253 struct e1000_mac_info *mac = &hw->mac;
1254 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001255 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1256 u16 hwm;
1257
1258 /* Repartition Pba for greater than 9k mtu
1259 * To take effect CTRL.RST is required.
1260 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001261 switch (mac->type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001262 case e1000_82580:
1263 pba = rd32(E1000_RXPBS);
1264 pba = igb_rxpbs_adjust_82580(pba);
1265 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001266 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001267 pba = rd32(E1000_RXPBS);
1268 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001269 break;
1270 case e1000_82575:
1271 default:
1272 pba = E1000_PBA_34K;
1273 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001274 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001275
Alexander Duyck2d064c02008-07-08 15:10:12 -07001276 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1277 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 /* adjust PBA for jumbo frames */
1279 wr32(E1000_PBA, pba);
1280
1281 /* To maintain wire speed transmits, the Tx FIFO should be
1282 * large enough to accommodate two full transmit packets,
1283 * rounded up to the next 1KB and expressed in KB. Likewise,
1284 * the Rx FIFO should be large enough to accommodate at least
1285 * one full receive packet and is similarly rounded up and
1286 * expressed in KB. */
1287 pba = rd32(E1000_PBA);
1288 /* upper 16 bits has Tx packet buffer allocation size in KB */
1289 tx_space = pba >> 16;
1290 /* lower 16 bits has Rx packet buffer allocation size in KB */
1291 pba &= 0xffff;
1292 /* the tx fifo also stores 16 bytes of information about the tx
1293 * but don't include ethernet FCS because hardware appends it */
1294 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001295 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001296 ETH_FCS_LEN) * 2;
1297 min_tx_space = ALIGN(min_tx_space, 1024);
1298 min_tx_space >>= 10;
1299 /* software strips receive CRC, so leave room for it */
1300 min_rx_space = adapter->max_frame_size;
1301 min_rx_space = ALIGN(min_rx_space, 1024);
1302 min_rx_space >>= 10;
1303
1304 /* If current Tx allocation is less than the min Tx FIFO size,
1305 * and the min Tx FIFO size is less than the current Rx FIFO
1306 * allocation, take space away from current Rx allocation */
1307 if (tx_space < min_tx_space &&
1308 ((min_tx_space - tx_space) < pba)) {
1309 pba = pba - (min_tx_space - tx_space);
1310
1311 /* if short on rx space, rx wins and must trump tx
1312 * adjustment */
1313 if (pba < min_rx_space)
1314 pba = min_rx_space;
1315 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001316 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001317 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001318
1319 /* flow control settings */
1320 /* The high water mark must be low enough to fit one full frame
1321 * (or the size used for early receive) above it in the Rx FIFO.
1322 * Set it to the lower of:
1323 * - 90% of the Rx FIFO size, or
1324 * - the full Rx FIFO size minus one full frame */
1325 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001326 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyckd405ea32009-12-23 13:21:27 +00001328 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1329 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001330 fc->pause_time = 0xFFFF;
1331 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001332 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001333
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001334 /* disable receive for all VFs and wait one second */
1335 if (adapter->vfs_allocated_count) {
1336 int i;
1337 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001338 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001339
1340 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001341 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001342
1343 /* disable transmits and receives */
1344 wr32(E1000_VFRE, 0);
1345 wr32(E1000_VFTE, 0);
1346 }
1347
Auke Kok9d5c8242008-01-24 02:22:38 -08001348 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001349 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001350 wr32(E1000_WUC, 0);
1351
Alexander Duyck330a6d62009-10-27 23:51:35 +00001352 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001353 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001354
Alexander Duyck55cac242009-11-19 12:42:21 +00001355 if (hw->mac.type == e1000_82580) {
1356 u32 reg = rd32(E1000_PCIEMISC);
1357 wr32(E1000_PCIEMISC,
1358 reg & ~E1000_PCIEMISC_LX_DECISION);
1359 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001360 if (!netif_running(adapter->netdev))
1361 igb_power_down_link(adapter);
1362
Auke Kok9d5c8242008-01-24 02:22:38 -08001363 igb_update_mng_vlan(adapter);
1364
1365 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1366 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1367
Alexander Duyck330a6d62009-10-27 23:51:35 +00001368 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001369}
1370
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001371static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001372 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001373 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001374 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001375 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001376 .ndo_set_rx_mode = igb_set_rx_mode,
1377 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001378 .ndo_set_mac_address = igb_set_mac,
1379 .ndo_change_mtu = igb_change_mtu,
1380 .ndo_do_ioctl = igb_ioctl,
1381 .ndo_tx_timeout = igb_tx_timeout,
1382 .ndo_validate_addr = eth_validate_addr,
1383 .ndo_vlan_rx_register = igb_vlan_rx_register,
1384 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1385 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001386 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1387 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1388 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1389 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001390#ifdef CONFIG_NET_POLL_CONTROLLER
1391 .ndo_poll_controller = igb_netpoll,
1392#endif
1393};
1394
Taku Izumi42bfd332008-06-20 12:10:30 +09001395/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001396 * igb_probe - Device Initialization Routine
1397 * @pdev: PCI device information struct
1398 * @ent: entry in igb_pci_tbl
1399 *
1400 * Returns 0 on success, negative on failure
1401 *
1402 * igb_probe initializes an adapter identified by a pci_dev structure.
1403 * The OS initialization, configuring of the adapter private structure,
1404 * and a hardware reset occur.
1405 **/
1406static int __devinit igb_probe(struct pci_dev *pdev,
1407 const struct pci_device_id *ent)
1408{
1409 struct net_device *netdev;
1410 struct igb_adapter *adapter;
1411 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001412 u16 eeprom_data = 0;
1413 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001414 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1415 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001416 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001417 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1418 u32 part_num;
1419
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001420 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001421 if (err)
1422 return err;
1423
1424 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001425 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001426 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001427 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001428 if (!err)
1429 pci_using_dac = 1;
1430 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001431 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001432 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001433 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001434 if (err) {
1435 dev_err(&pdev->dev, "No usable DMA "
1436 "configuration, aborting\n");
1437 goto err_dma;
1438 }
1439 }
1440 }
1441
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001442 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1443 IORESOURCE_MEM),
1444 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001445 if (err)
1446 goto err_pci_reg;
1447
Frans Pop19d5afd2009-10-02 10:04:12 -07001448 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001449
Auke Kok9d5c8242008-01-24 02:22:38 -08001450 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001451 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001452
1453 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001454 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1455 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001456 if (!netdev)
1457 goto err_alloc_etherdev;
1458
1459 SET_NETDEV_DEV(netdev, &pdev->dev);
1460
1461 pci_set_drvdata(pdev, netdev);
1462 adapter = netdev_priv(netdev);
1463 adapter->netdev = netdev;
1464 adapter->pdev = pdev;
1465 hw = &adapter->hw;
1466 hw->back = adapter;
1467 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1468
1469 mmio_start = pci_resource_start(pdev, 0);
1470 mmio_len = pci_resource_len(pdev, 0);
1471
1472 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001473 hw->hw_addr = ioremap(mmio_start, mmio_len);
1474 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 goto err_ioremap;
1476
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001477 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001478 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001479 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001480
1481 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1482
1483 netdev->mem_start = mmio_start;
1484 netdev->mem_end = mmio_start + mmio_len;
1485
Auke Kok9d5c8242008-01-24 02:22:38 -08001486 /* PCI config space info */
1487 hw->vendor_id = pdev->vendor;
1488 hw->device_id = pdev->device;
1489 hw->revision_id = pdev->revision;
1490 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1491 hw->subsystem_device_id = pdev->subsystem_device;
1492
Auke Kok9d5c8242008-01-24 02:22:38 -08001493 /* Copy the default MAC, PHY and NVM function pointers */
1494 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1495 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1496 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1497 /* Initialize skew-specific constants */
1498 err = ei->get_invariants(hw);
1499 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001500 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001501
Alexander Duyck450c87c2009-02-06 23:22:11 +00001502 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001503 err = igb_sw_init(adapter);
1504 if (err)
1505 goto err_sw_init;
1506
1507 igb_get_bus_info_pcie(hw);
1508
1509 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001510
1511 /* Copper options */
1512 if (hw->phy.media_type == e1000_media_type_copper) {
1513 hw->phy.mdix = AUTO_ALL_MODES;
1514 hw->phy.disable_polarity_correction = false;
1515 hw->phy.ms_type = e1000_ms_hw_default;
1516 }
1517
1518 if (igb_check_reset_block(hw))
1519 dev_info(&pdev->dev,
1520 "PHY reset is blocked due to SOL/IDER session.\n");
1521
1522 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001523 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001524 NETIF_F_HW_VLAN_TX |
1525 NETIF_F_HW_VLAN_RX |
1526 NETIF_F_HW_VLAN_FILTER;
1527
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001528 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001529 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001530 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001531 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001532
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001533 netdev->vlan_features |= NETIF_F_TSO;
1534 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001535 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001536 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001537 netdev->vlan_features |= NETIF_F_SG;
1538
Auke Kok9d5c8242008-01-24 02:22:38 -08001539 if (pci_using_dac)
1540 netdev->features |= NETIF_F_HIGHDMA;
1541
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001542 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001543 netdev->features |= NETIF_F_SCTP_CSUM;
1544
Alexander Duyck330a6d62009-10-27 23:51:35 +00001545 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001546
1547 /* before reading the NVM, reset the controller to put the device in a
1548 * known good starting state */
1549 hw->mac.ops.reset_hw(hw);
1550
1551 /* make sure the NVM is good */
1552 if (igb_validate_nvm_checksum(hw) < 0) {
1553 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1554 err = -EIO;
1555 goto err_eeprom;
1556 }
1557
1558 /* copy the MAC address out of the NVM */
1559 if (hw->mac.ops.read_mac_addr(hw))
1560 dev_err(&pdev->dev, "NVM Read Error\n");
1561
1562 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1563 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1564
1565 if (!is_valid_ether_addr(netdev->perm_addr)) {
1566 dev_err(&pdev->dev, "Invalid MAC Address\n");
1567 err = -EIO;
1568 goto err_eeprom;
1569 }
1570
Alexander Duyck0e340482009-03-20 00:17:08 +00001571 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1572 (unsigned long) adapter);
1573 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1574 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001575
1576 INIT_WORK(&adapter->reset_task, igb_reset_task);
1577 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1578
Alexander Duyck450c87c2009-02-06 23:22:11 +00001579 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001580 adapter->fc_autoneg = true;
1581 hw->mac.autoneg = true;
1582 hw->phy.autoneg_advertised = 0x2f;
1583
Alexander Duyck0cce1192009-07-23 18:10:24 +00001584 hw->fc.requested_mode = e1000_fc_default;
1585 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001586
Auke Kok9d5c8242008-01-24 02:22:38 -08001587 igb_validate_mdi_setting(hw);
1588
Auke Kok9d5c8242008-01-24 02:22:38 -08001589 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1590 * enable the ACPI Magic Packet filter
1591 */
1592
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001593 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001594 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001595 else if (hw->mac.type == e1000_82580)
1596 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1597 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1598 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001599 else if (hw->bus.func == 1)
1600 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001601
1602 if (eeprom_data & eeprom_apme_mask)
1603 adapter->eeprom_wol |= E1000_WUFC_MAG;
1604
1605 /* now that we have the eeprom settings, apply the special cases where
1606 * the eeprom may be wrong or the board simply won't support wake on
1607 * lan on a particular port */
1608 switch (pdev->device) {
1609 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1610 adapter->eeprom_wol = 0;
1611 break;
1612 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001613 case E1000_DEV_ID_82576_FIBER:
1614 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001615 /* Wake events only supported on port A for dual fiber
1616 * regardless of eeprom setting */
1617 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1618 adapter->eeprom_wol = 0;
1619 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001620 case E1000_DEV_ID_82576_QUAD_COPPER:
1621 /* if quad port adapter, disable WoL on all but port A */
1622 if (global_quad_port_a != 0)
1623 adapter->eeprom_wol = 0;
1624 else
1625 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1626 /* Reset for multiple quad port adapters */
1627 if (++global_quad_port_a == 4)
1628 global_quad_port_a = 0;
1629 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001630 }
1631
1632 /* initialize the wol settings based on the eeprom settings */
1633 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001634 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001635
1636 /* reset the hardware with the new settings */
1637 igb_reset(adapter);
1638
1639 /* let the f/w know that the h/w is now under the control of the
1640 * driver. */
1641 igb_get_hw_control(adapter);
1642
Auke Kok9d5c8242008-01-24 02:22:38 -08001643 strcpy(netdev->name, "eth%d");
1644 err = register_netdev(netdev);
1645 if (err)
1646 goto err_register;
1647
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001648 /* carrier off reporting is important to ethtool even BEFORE open */
1649 netif_carrier_off(netdev);
1650
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001651#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001652 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001653 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001654 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001655 igb_setup_dca(adapter);
1656 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001657
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001658#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001659 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1660 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001661 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001662 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001663 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1664 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001665 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1666 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1667 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1668 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001669 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001670
1671 igb_read_part_num(hw, &part_num);
1672 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1673 (part_num >> 8), (part_num & 0xff));
1674
1675 dev_info(&pdev->dev,
1676 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1677 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001678 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001679 adapter->num_rx_queues, adapter->num_tx_queues);
1680
Auke Kok9d5c8242008-01-24 02:22:38 -08001681 return 0;
1682
1683err_register:
1684 igb_release_hw_control(adapter);
1685err_eeprom:
1686 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001687 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001688
1689 if (hw->flash_address)
1690 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001691err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001692 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001693 iounmap(hw->hw_addr);
1694err_ioremap:
1695 free_netdev(netdev);
1696err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00001697 pci_release_selected_regions(pdev,
1698 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001699err_pci_reg:
1700err_dma:
1701 pci_disable_device(pdev);
1702 return err;
1703}
1704
1705/**
1706 * igb_remove - Device Removal Routine
1707 * @pdev: PCI device information struct
1708 *
1709 * igb_remove is called by the PCI subsystem to alert the driver
1710 * that it should release a PCI device. The could be caused by a
1711 * Hot-Plug event, or because the driver is going to be removed from
1712 * memory.
1713 **/
1714static void __devexit igb_remove(struct pci_dev *pdev)
1715{
1716 struct net_device *netdev = pci_get_drvdata(pdev);
1717 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001718 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001719
1720 /* flush_scheduled work may reschedule our watchdog task, so
1721 * explicitly disable watchdog tasks from being rescheduled */
1722 set_bit(__IGB_DOWN, &adapter->state);
1723 del_timer_sync(&adapter->watchdog_timer);
1724 del_timer_sync(&adapter->phy_info_timer);
1725
1726 flush_scheduled_work();
1727
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001728#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001729 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001730 dev_info(&pdev->dev, "DCA disabled\n");
1731 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001732 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001733 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001734 }
1735#endif
1736
Auke Kok9d5c8242008-01-24 02:22:38 -08001737 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1738 * would have already happened in close and is redundant. */
1739 igb_release_hw_control(adapter);
1740
1741 unregister_netdev(netdev);
1742
Alexander Duyck047e0032009-10-27 15:49:27 +00001743 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001744
Alexander Duyck37680112009-02-19 20:40:30 -08001745#ifdef CONFIG_PCI_IOV
1746 /* reclaim resources allocated to VFs */
1747 if (adapter->vf_data) {
1748 /* disable iov and allow time for transactions to clear */
1749 pci_disable_sriov(pdev);
1750 msleep(500);
1751
1752 kfree(adapter->vf_data);
1753 adapter->vf_data = NULL;
1754 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1755 msleep(100);
1756 dev_info(&pdev->dev, "IOV Disabled\n");
1757 }
1758#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00001759
Alexander Duyck28b07592009-02-06 23:20:31 +00001760 iounmap(hw->hw_addr);
1761 if (hw->flash_address)
1762 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00001763 pci_release_selected_regions(pdev,
1764 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001765
1766 free_netdev(netdev);
1767
Frans Pop19d5afd2009-10-02 10:04:12 -07001768 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001769
Auke Kok9d5c8242008-01-24 02:22:38 -08001770 pci_disable_device(pdev);
1771}
1772
1773/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001774 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1775 * @adapter: board private structure to initialize
1776 *
1777 * This function initializes the vf specific data storage and then attempts to
1778 * allocate the VFs. The reason for ordering it this way is because it is much
1779 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1780 * the memory for the VFs.
1781 **/
1782static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1783{
1784#ifdef CONFIG_PCI_IOV
1785 struct pci_dev *pdev = adapter->pdev;
1786
1787 if (adapter->vfs_allocated_count > 7)
1788 adapter->vfs_allocated_count = 7;
1789
1790 if (adapter->vfs_allocated_count) {
1791 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1792 sizeof(struct vf_data_storage),
1793 GFP_KERNEL);
1794 /* if allocation failed then we do not support SR-IOV */
1795 if (!adapter->vf_data) {
1796 adapter->vfs_allocated_count = 0;
1797 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1798 "Data Storage\n");
1799 }
1800 }
1801
1802 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1803 kfree(adapter->vf_data);
1804 adapter->vf_data = NULL;
1805#endif /* CONFIG_PCI_IOV */
1806 adapter->vfs_allocated_count = 0;
1807#ifdef CONFIG_PCI_IOV
1808 } else {
1809 unsigned char mac_addr[ETH_ALEN];
1810 int i;
1811 dev_info(&pdev->dev, "%d vfs allocated\n",
1812 adapter->vfs_allocated_count);
1813 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1814 random_ether_addr(mac_addr);
1815 igb_set_vf_mac(adapter, i, mac_addr);
1816 }
1817 }
1818#endif /* CONFIG_PCI_IOV */
1819}
1820
Alexander Duyck115f4592009-11-12 18:37:00 +00001821
1822/**
1823 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1824 * @adapter: board private structure to initialize
1825 *
1826 * igb_init_hw_timer initializes the function pointer and values for the hw
1827 * timer found in hardware.
1828 **/
1829static void igb_init_hw_timer(struct igb_adapter *adapter)
1830{
1831 struct e1000_hw *hw = &adapter->hw;
1832
1833 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001834 case e1000_82580:
1835 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1836 adapter->cycles.read = igb_read_clock;
1837 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1838 adapter->cycles.mult = 1;
1839 /*
1840 * The 82580 timesync updates the system timer every 8ns by 8ns
1841 * and the value cannot be shifted. Instead we need to shift
1842 * the registers to generate a 64bit timer value. As a result
1843 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1844 * 24 in order to generate a larger value for synchronization.
1845 */
1846 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1847 /* disable system timer temporarily by setting bit 31 */
1848 wr32(E1000_TSAUXC, 0x80000000);
1849 wrfl();
1850
1851 /* Set registers so that rollover occurs soon to test this. */
1852 wr32(E1000_SYSTIMR, 0x00000000);
1853 wr32(E1000_SYSTIML, 0x80000000);
1854 wr32(E1000_SYSTIMH, 0x000000FF);
1855 wrfl();
1856
1857 /* enable system timer by clearing bit 31 */
1858 wr32(E1000_TSAUXC, 0x0);
1859 wrfl();
1860
1861 timecounter_init(&adapter->clock,
1862 &adapter->cycles,
1863 ktime_to_ns(ktime_get_real()));
1864 /*
1865 * Synchronize our NIC clock against system wall clock. NIC
1866 * time stamp reading requires ~3us per sample, each sample
1867 * was pretty stable even under load => only require 10
1868 * samples for each offset comparison.
1869 */
1870 memset(&adapter->compare, 0, sizeof(adapter->compare));
1871 adapter->compare.source = &adapter->clock;
1872 adapter->compare.target = ktime_get_real;
1873 adapter->compare.num_samples = 10;
1874 timecompare_update(&adapter->compare, 0);
1875 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00001876 case e1000_82576:
1877 /*
1878 * Initialize hardware timer: we keep it running just in case
1879 * that some program needs it later on.
1880 */
1881 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1882 adapter->cycles.read = igb_read_clock;
1883 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1884 adapter->cycles.mult = 1;
1885 /**
1886 * Scale the NIC clock cycle by a large factor so that
1887 * relatively small clock corrections can be added or
1888 * substracted at each clock tick. The drawbacks of a large
1889 * factor are a) that the clock register overflows more quickly
1890 * (not such a big deal) and b) that the increment per tick has
1891 * to fit into 24 bits. As a result we need to use a shift of
1892 * 19 so we can fit a value of 16 into the TIMINCA register.
1893 */
1894 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1895 wr32(E1000_TIMINCA,
1896 (1 << E1000_TIMINCA_16NS_SHIFT) |
1897 (16 << IGB_82576_TSYNC_SHIFT));
1898
1899 /* Set registers so that rollover occurs soon to test this. */
1900 wr32(E1000_SYSTIML, 0x00000000);
1901 wr32(E1000_SYSTIMH, 0xFF800000);
1902 wrfl();
1903
1904 timecounter_init(&adapter->clock,
1905 &adapter->cycles,
1906 ktime_to_ns(ktime_get_real()));
1907 /*
1908 * Synchronize our NIC clock against system wall clock. NIC
1909 * time stamp reading requires ~3us per sample, each sample
1910 * was pretty stable even under load => only require 10
1911 * samples for each offset comparison.
1912 */
1913 memset(&adapter->compare, 0, sizeof(adapter->compare));
1914 adapter->compare.source = &adapter->clock;
1915 adapter->compare.target = ktime_get_real;
1916 adapter->compare.num_samples = 10;
1917 timecompare_update(&adapter->compare, 0);
1918 break;
1919 case e1000_82575:
1920 /* 82575 does not support timesync */
1921 default:
1922 break;
1923 }
1924
1925}
1926
Alexander Duycka6b623e2009-10-27 23:47:53 +00001927/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001928 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1929 * @adapter: board private structure to initialize
1930 *
1931 * igb_sw_init initializes the Adapter private data structure.
1932 * Fields are initialized based on PCI device information and
1933 * OS network device settings (MTU size).
1934 **/
1935static int __devinit igb_sw_init(struct igb_adapter *adapter)
1936{
1937 struct e1000_hw *hw = &adapter->hw;
1938 struct net_device *netdev = adapter->netdev;
1939 struct pci_dev *pdev = adapter->pdev;
1940
1941 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1942
Alexander Duyck68fd9912008-11-20 00:48:10 -08001943 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1944 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001945 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1946 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1947
Auke Kok9d5c8242008-01-24 02:22:38 -08001948 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1949 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1950
Alexander Duycka6b623e2009-10-27 23:47:53 +00001951#ifdef CONFIG_PCI_IOV
1952 if (hw->mac.type == e1000_82576)
1953 adapter->vfs_allocated_count = max_vfs;
1954
1955#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00001956 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1957
1958 /*
1959 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1960 * then we should combine the queues into a queue pair in order to
1961 * conserve interrupts due to limited supply
1962 */
1963 if ((adapter->rss_queues > 4) ||
1964 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1965 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1966
Alexander Duycka6b623e2009-10-27 23:47:53 +00001967 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001968 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001969 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1970 return -ENOMEM;
1971 }
1972
Alexander Duyck115f4592009-11-12 18:37:00 +00001973 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00001974 igb_probe_vfs(adapter);
1975
Auke Kok9d5c8242008-01-24 02:22:38 -08001976 /* Explicitly disable IRQ since the NIC can be in any state. */
1977 igb_irq_disable(adapter);
1978
1979 set_bit(__IGB_DOWN, &adapter->state);
1980 return 0;
1981}
1982
1983/**
1984 * igb_open - Called when a network interface is made active
1985 * @netdev: network interface device structure
1986 *
1987 * Returns 0 on success, negative value on failure
1988 *
1989 * The open entry point is called when a network interface is made
1990 * active by the system (IFF_UP). At this point all resources needed
1991 * for transmit and receive operations are allocated, the interrupt
1992 * handler is registered with the OS, the watchdog timer is started,
1993 * and the stack is notified that the interface is ready.
1994 **/
1995static int igb_open(struct net_device *netdev)
1996{
1997 struct igb_adapter *adapter = netdev_priv(netdev);
1998 struct e1000_hw *hw = &adapter->hw;
1999 int err;
2000 int i;
2001
2002 /* disallow open during test */
2003 if (test_bit(__IGB_TESTING, &adapter->state))
2004 return -EBUSY;
2005
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002006 netif_carrier_off(netdev);
2007
Auke Kok9d5c8242008-01-24 02:22:38 -08002008 /* allocate transmit descriptors */
2009 err = igb_setup_all_tx_resources(adapter);
2010 if (err)
2011 goto err_setup_tx;
2012
2013 /* allocate receive descriptors */
2014 err = igb_setup_all_rx_resources(adapter);
2015 if (err)
2016 goto err_setup_rx;
2017
Nick Nunley88a268c2010-02-17 01:01:59 +00002018 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002019
Auke Kok9d5c8242008-01-24 02:22:38 -08002020 /* before we allocate an interrupt, we must be ready to handle it.
2021 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2022 * as soon as we call pci_request_irq, so we have to setup our
2023 * clean_rx handler before we do so. */
2024 igb_configure(adapter);
2025
2026 err = igb_request_irq(adapter);
2027 if (err)
2028 goto err_req_irq;
2029
2030 /* From here on the code is the same as igb_up() */
2031 clear_bit(__IGB_DOWN, &adapter->state);
2032
Alexander Duyck047e0032009-10-27 15:49:27 +00002033 for (i = 0; i < adapter->num_q_vectors; i++) {
2034 struct igb_q_vector *q_vector = adapter->q_vector[i];
2035 napi_enable(&q_vector->napi);
2036 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002037
2038 /* Clear any pending interrupts. */
2039 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002040
2041 igb_irq_enable(adapter);
2042
Alexander Duyckd4960302009-10-27 15:53:45 +00002043 /* notify VFs that reset has been completed */
2044 if (adapter->vfs_allocated_count) {
2045 u32 reg_data = rd32(E1000_CTRL_EXT);
2046 reg_data |= E1000_CTRL_EXT_PFRSTD;
2047 wr32(E1000_CTRL_EXT, reg_data);
2048 }
2049
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002050 netif_tx_start_all_queues(netdev);
2051
Alexander Duyck25568a52009-10-27 23:49:59 +00002052 /* start the watchdog. */
2053 hw->mac.get_link_status = 1;
2054 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002055
2056 return 0;
2057
2058err_req_irq:
2059 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002060 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002061 igb_free_all_rx_resources(adapter);
2062err_setup_rx:
2063 igb_free_all_tx_resources(adapter);
2064err_setup_tx:
2065 igb_reset(adapter);
2066
2067 return err;
2068}
2069
2070/**
2071 * igb_close - Disables a network interface
2072 * @netdev: network interface device structure
2073 *
2074 * Returns 0, this is not allowed to fail
2075 *
2076 * The close entry point is called when an interface is de-activated
2077 * by the OS. The hardware is still under the driver's control, but
2078 * needs to be disabled. A global MAC reset is issued to stop the
2079 * hardware, and all transmit and receive resources are freed.
2080 **/
2081static int igb_close(struct net_device *netdev)
2082{
2083 struct igb_adapter *adapter = netdev_priv(netdev);
2084
2085 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2086 igb_down(adapter);
2087
2088 igb_free_irq(adapter);
2089
2090 igb_free_all_tx_resources(adapter);
2091 igb_free_all_rx_resources(adapter);
2092
Auke Kok9d5c8242008-01-24 02:22:38 -08002093 return 0;
2094}
2095
2096/**
2097 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002098 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2099 *
2100 * Return 0 on success, negative on failure
2101 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002102int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002103{
Alexander Duyck80785292009-10-27 15:51:47 +00002104 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002105 int size;
2106
2107 size = sizeof(struct igb_buffer) * tx_ring->count;
2108 tx_ring->buffer_info = vmalloc(size);
2109 if (!tx_ring->buffer_info)
2110 goto err;
2111 memset(tx_ring->buffer_info, 0, size);
2112
2113 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002114 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002115 tx_ring->size = ALIGN(tx_ring->size, 4096);
2116
Alexander Duyck439705e2009-10-27 23:49:20 +00002117 tx_ring->desc = pci_alloc_consistent(pdev,
2118 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 &tx_ring->dma);
2120
2121 if (!tx_ring->desc)
2122 goto err;
2123
Auke Kok9d5c8242008-01-24 02:22:38 -08002124 tx_ring->next_to_use = 0;
2125 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 return 0;
2127
2128err:
2129 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002130 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002131 "Unable to allocate memory for the transmit descriptor ring\n");
2132 return -ENOMEM;
2133}
2134
2135/**
2136 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2137 * (Descriptors) for all queues
2138 * @adapter: board private structure
2139 *
2140 * Return 0 on success, negative on failure
2141 **/
2142static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2143{
Alexander Duyck439705e2009-10-27 23:49:20 +00002144 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002145 int i, err = 0;
2146
2147 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002148 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002149 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002150 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002151 "Allocation for Tx Queue %u failed\n", i);
2152 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002153 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002154 break;
2155 }
2156 }
2157
Alexander Duycka99955f2009-11-12 18:37:19 +00002158 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002159 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002160 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002161 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002162 return err;
2163}
2164
2165/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002166 * igb_setup_tctl - configure the transmit control registers
2167 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002168 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002169void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002170{
Auke Kok9d5c8242008-01-24 02:22:38 -08002171 struct e1000_hw *hw = &adapter->hw;
2172 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002173
Alexander Duyck85b430b2009-10-27 15:50:29 +00002174 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2175 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002176
2177 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002178 tctl = rd32(E1000_TCTL);
2179 tctl &= ~E1000_TCTL_CT;
2180 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2181 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2182
2183 igb_config_collision_dist(hw);
2184
Auke Kok9d5c8242008-01-24 02:22:38 -08002185 /* Enable transmits */
2186 tctl |= E1000_TCTL_EN;
2187
2188 wr32(E1000_TCTL, tctl);
2189}
2190
2191/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002192 * igb_configure_tx_ring - Configure transmit ring after Reset
2193 * @adapter: board private structure
2194 * @ring: tx ring to configure
2195 *
2196 * Configure a transmit ring after a reset.
2197 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002198void igb_configure_tx_ring(struct igb_adapter *adapter,
2199 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002200{
2201 struct e1000_hw *hw = &adapter->hw;
2202 u32 txdctl;
2203 u64 tdba = ring->dma;
2204 int reg_idx = ring->reg_idx;
2205
2206 /* disable the queue */
2207 txdctl = rd32(E1000_TXDCTL(reg_idx));
2208 wr32(E1000_TXDCTL(reg_idx),
2209 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2210 wrfl();
2211 mdelay(10);
2212
2213 wr32(E1000_TDLEN(reg_idx),
2214 ring->count * sizeof(union e1000_adv_tx_desc));
2215 wr32(E1000_TDBAL(reg_idx),
2216 tdba & 0x00000000ffffffffULL);
2217 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2218
Alexander Duyckfce99e32009-10-27 15:51:27 +00002219 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2220 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2221 writel(0, ring->head);
2222 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002223
2224 txdctl |= IGB_TX_PTHRESH;
2225 txdctl |= IGB_TX_HTHRESH << 8;
2226 txdctl |= IGB_TX_WTHRESH << 16;
2227
2228 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2229 wr32(E1000_TXDCTL(reg_idx), txdctl);
2230}
2231
2232/**
2233 * igb_configure_tx - Configure transmit Unit after Reset
2234 * @adapter: board private structure
2235 *
2236 * Configure the Tx unit of the MAC after a reset.
2237 **/
2238static void igb_configure_tx(struct igb_adapter *adapter)
2239{
2240 int i;
2241
2242 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002243 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002244}
2245
2246/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002247 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002248 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2249 *
2250 * Returns 0 on success, negative on failure
2251 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002252int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002253{
Alexander Duyck80785292009-10-27 15:51:47 +00002254 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002255 int size, desc_len;
2256
2257 size = sizeof(struct igb_buffer) * rx_ring->count;
2258 rx_ring->buffer_info = vmalloc(size);
2259 if (!rx_ring->buffer_info)
2260 goto err;
2261 memset(rx_ring->buffer_info, 0, size);
2262
2263 desc_len = sizeof(union e1000_adv_rx_desc);
2264
2265 /* Round up to nearest 4K */
2266 rx_ring->size = rx_ring->count * desc_len;
2267 rx_ring->size = ALIGN(rx_ring->size, 4096);
2268
2269 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2270 &rx_ring->dma);
2271
2272 if (!rx_ring->desc)
2273 goto err;
2274
2275 rx_ring->next_to_clean = 0;
2276 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002277
Auke Kok9d5c8242008-01-24 02:22:38 -08002278 return 0;
2279
2280err:
2281 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002282 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002283 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002284 "the receive descriptor ring\n");
2285 return -ENOMEM;
2286}
2287
2288/**
2289 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2290 * (Descriptors) for all queues
2291 * @adapter: board private structure
2292 *
2293 * Return 0 on success, negative on failure
2294 **/
2295static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2296{
Alexander Duyck439705e2009-10-27 23:49:20 +00002297 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002298 int i, err = 0;
2299
2300 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002301 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002302 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002303 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002304 "Allocation for Rx Queue %u failed\n", i);
2305 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002306 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002307 break;
2308 }
2309 }
2310
2311 return err;
2312}
2313
2314/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002315 * igb_setup_mrqc - configure the multiple receive queue control registers
2316 * @adapter: Board private structure
2317 **/
2318static void igb_setup_mrqc(struct igb_adapter *adapter)
2319{
2320 struct e1000_hw *hw = &adapter->hw;
2321 u32 mrqc, rxcsum;
2322 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2323 union e1000_reta {
2324 u32 dword;
2325 u8 bytes[4];
2326 } reta;
2327 static const u8 rsshash[40] = {
2328 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2329 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2330 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2331 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2332
2333 /* Fill out hash function seeds */
2334 for (j = 0; j < 10; j++) {
2335 u32 rsskey = rsshash[(j * 4)];
2336 rsskey |= rsshash[(j * 4) + 1] << 8;
2337 rsskey |= rsshash[(j * 4) + 2] << 16;
2338 rsskey |= rsshash[(j * 4) + 3] << 24;
2339 array_wr32(E1000_RSSRK(0), j, rsskey);
2340 }
2341
Alexander Duycka99955f2009-11-12 18:37:19 +00002342 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002343
2344 if (adapter->vfs_allocated_count) {
2345 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2346 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00002347 case e1000_82580:
2348 num_rx_queues = 1;
2349 shift = 0;
2350 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002351 case e1000_82576:
2352 shift = 3;
2353 num_rx_queues = 2;
2354 break;
2355 case e1000_82575:
2356 shift = 2;
2357 shift2 = 6;
2358 default:
2359 break;
2360 }
2361 } else {
2362 if (hw->mac.type == e1000_82575)
2363 shift = 6;
2364 }
2365
2366 for (j = 0; j < (32 * 4); j++) {
2367 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2368 if (shift2)
2369 reta.bytes[j & 3] |= num_rx_queues << shift2;
2370 if ((j & 3) == 3)
2371 wr32(E1000_RETA(j >> 2), reta.dword);
2372 }
2373
2374 /*
2375 * Disable raw packet checksumming so that RSS hash is placed in
2376 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2377 * offloads as they are enabled by default
2378 */
2379 rxcsum = rd32(E1000_RXCSUM);
2380 rxcsum |= E1000_RXCSUM_PCSD;
2381
2382 if (adapter->hw.mac.type >= e1000_82576)
2383 /* Enable Receive Checksum Offload for SCTP */
2384 rxcsum |= E1000_RXCSUM_CRCOFL;
2385
2386 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2387 wr32(E1000_RXCSUM, rxcsum);
2388
2389 /* If VMDq is enabled then we set the appropriate mode for that, else
2390 * we default to RSS so that an RSS hash is calculated per packet even
2391 * if we are only using one queue */
2392 if (adapter->vfs_allocated_count) {
2393 if (hw->mac.type > e1000_82575) {
2394 /* Set the default pool for the PF's first queue */
2395 u32 vtctl = rd32(E1000_VT_CTL);
2396 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2397 E1000_VT_CTL_DISABLE_DEF_POOL);
2398 vtctl |= adapter->vfs_allocated_count <<
2399 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2400 wr32(E1000_VT_CTL, vtctl);
2401 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002402 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002403 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2404 else
2405 mrqc = E1000_MRQC_ENABLE_VMDQ;
2406 } else {
2407 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2408 }
2409 igb_vmm_control(adapter);
2410
2411 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2412 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2413 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2414 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2415 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2416 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2417 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2418 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2419
2420 wr32(E1000_MRQC, mrqc);
2421}
2422
2423/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002424 * igb_setup_rctl - configure the receive control registers
2425 * @adapter: Board private structure
2426 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002427void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002428{
2429 struct e1000_hw *hw = &adapter->hw;
2430 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002431
2432 rctl = rd32(E1000_RCTL);
2433
2434 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002435 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002436
Alexander Duyck69d728b2008-11-25 01:04:03 -08002437 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002438 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002439
Auke Kok87cb7e82008-07-08 15:08:29 -07002440 /*
2441 * enable stripping of CRC. It's unlikely this will break BMC
2442 * redirection as it did with e1000. Newer features require
2443 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002444 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002445 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002446
Alexander Duyck559e9c42009-10-27 23:52:50 +00002447 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002448 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002449
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002450 /* enable LPE to prevent packets larger than max_frame_size */
2451 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002452
Alexander Duyck952f72a2009-10-27 15:51:07 +00002453 /* disable queue 0 to prevent tail write w/o re-config */
2454 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002455
Alexander Duycke1739522009-02-19 20:39:44 -08002456 /* Attention!!! For SR-IOV PF driver operations you must enable
2457 * queue drop for all VF and PF queues to prevent head of line blocking
2458 * if an un-trusted VF does not provide descriptors to hardware.
2459 */
2460 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002461 /* set all queue drop enable bits */
2462 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002463 }
2464
Auke Kok9d5c8242008-01-24 02:22:38 -08002465 wr32(E1000_RCTL, rctl);
2466}
2467
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002468static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2469 int vfn)
2470{
2471 struct e1000_hw *hw = &adapter->hw;
2472 u32 vmolr;
2473
2474 /* if it isn't the PF check to see if VFs are enabled and
2475 * increase the size to support vlan tags */
2476 if (vfn < adapter->vfs_allocated_count &&
2477 adapter->vf_data[vfn].vlans_enabled)
2478 size += VLAN_TAG_SIZE;
2479
2480 vmolr = rd32(E1000_VMOLR(vfn));
2481 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2482 vmolr |= size | E1000_VMOLR_LPE;
2483 wr32(E1000_VMOLR(vfn), vmolr);
2484
2485 return 0;
2486}
2487
Auke Kok9d5c8242008-01-24 02:22:38 -08002488/**
Alexander Duycke1739522009-02-19 20:39:44 -08002489 * igb_rlpml_set - set maximum receive packet size
2490 * @adapter: board private structure
2491 *
2492 * Configure maximum receivable packet size.
2493 **/
2494static void igb_rlpml_set(struct igb_adapter *adapter)
2495{
2496 u32 max_frame_size = adapter->max_frame_size;
2497 struct e1000_hw *hw = &adapter->hw;
2498 u16 pf_id = adapter->vfs_allocated_count;
2499
2500 if (adapter->vlgrp)
2501 max_frame_size += VLAN_TAG_SIZE;
2502
2503 /* if vfs are enabled we set RLPML to the largest possible request
2504 * size and set the VMOLR RLPML to the size we need */
2505 if (pf_id) {
2506 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002507 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002508 }
2509
2510 wr32(E1000_RLPML, max_frame_size);
2511}
2512
Williams, Mitch A8151d292010-02-10 01:44:24 +00002513static inline void igb_set_vmolr(struct igb_adapter *adapter,
2514 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002515{
2516 struct e1000_hw *hw = &adapter->hw;
2517 u32 vmolr;
2518
2519 /*
2520 * This register exists only on 82576 and newer so if we are older then
2521 * we should exit and do nothing
2522 */
2523 if (hw->mac.type < e1000_82576)
2524 return;
2525
2526 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002527 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2528 if (aupe)
2529 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2530 else
2531 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002532
2533 /* clear all bits that might not be set */
2534 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2535
Alexander Duycka99955f2009-11-12 18:37:19 +00002536 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002537 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2538 /*
2539 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2540 * multicast packets
2541 */
2542 if (vfn <= adapter->vfs_allocated_count)
2543 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2544
2545 wr32(E1000_VMOLR(vfn), vmolr);
2546}
2547
Alexander Duycke1739522009-02-19 20:39:44 -08002548/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002549 * igb_configure_rx_ring - Configure a receive ring after Reset
2550 * @adapter: board private structure
2551 * @ring: receive ring to be configured
2552 *
2553 * Configure the Rx unit of the MAC after a reset.
2554 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002555void igb_configure_rx_ring(struct igb_adapter *adapter,
2556 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002557{
2558 struct e1000_hw *hw = &adapter->hw;
2559 u64 rdba = ring->dma;
2560 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002561 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002562
2563 /* disable the queue */
2564 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2565 wr32(E1000_RXDCTL(reg_idx),
2566 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2567
2568 /* Set DMA base address registers */
2569 wr32(E1000_RDBAL(reg_idx),
2570 rdba & 0x00000000ffffffffULL);
2571 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2572 wr32(E1000_RDLEN(reg_idx),
2573 ring->count * sizeof(union e1000_adv_rx_desc));
2574
2575 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002576 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2577 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2578 writel(0, ring->head);
2579 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002580
Alexander Duyck952f72a2009-10-27 15:51:07 +00002581 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002582 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2583 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002584 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2585#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2586 srrctl |= IGB_RXBUFFER_16384 >>
2587 E1000_SRRCTL_BSIZEPKT_SHIFT;
2588#else
2589 srrctl |= (PAGE_SIZE / 2) >>
2590 E1000_SRRCTL_BSIZEPKT_SHIFT;
2591#endif
2592 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2593 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002594 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002595 E1000_SRRCTL_BSIZEPKT_SHIFT;
2596 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2597 }
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00002598 /* Only set Drop Enable if we are supporting multiple queues */
2599 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2600 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002601
2602 wr32(E1000_SRRCTL(reg_idx), srrctl);
2603
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002604 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002605 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002606
Alexander Duyck85b430b2009-10-27 15:50:29 +00002607 /* enable receive descriptor fetching */
2608 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2609 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2610 rxdctl &= 0xFFF00000;
2611 rxdctl |= IGB_RX_PTHRESH;
2612 rxdctl |= IGB_RX_HTHRESH << 8;
2613 rxdctl |= IGB_RX_WTHRESH << 16;
2614 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2615}
2616
2617/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002618 * igb_configure_rx - Configure receive Unit after Reset
2619 * @adapter: board private structure
2620 *
2621 * Configure the Rx unit of the MAC after a reset.
2622 **/
2623static void igb_configure_rx(struct igb_adapter *adapter)
2624{
Hannes Eder91075842009-02-18 19:36:04 -08002625 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002626
Alexander Duyck68d480c2009-10-05 06:33:08 +00002627 /* set UTA to appropriate mode */
2628 igb_set_uta(adapter);
2629
Alexander Duyck26ad9172009-10-05 06:32:49 +00002630 /* set the correct pool for the PF default MAC address in entry 0 */
2631 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2632 adapter->vfs_allocated_count);
2633
Alexander Duyck06cf2662009-10-27 15:53:25 +00002634 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2635 * the Base and Length of the Rx Descriptor Ring */
2636 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002637 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002638}
2639
2640/**
2641 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002642 * @tx_ring: Tx descriptor ring for a specific queue
2643 *
2644 * Free all transmit software resources
2645 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002646void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002647{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002648 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002649
2650 vfree(tx_ring->buffer_info);
2651 tx_ring->buffer_info = NULL;
2652
Alexander Duyck439705e2009-10-27 23:49:20 +00002653 /* if not set, then don't free */
2654 if (!tx_ring->desc)
2655 return;
2656
Alexander Duyck80785292009-10-27 15:51:47 +00002657 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2658 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002659
2660 tx_ring->desc = NULL;
2661}
2662
2663/**
2664 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2665 * @adapter: board private structure
2666 *
2667 * Free all transmit software resources
2668 **/
2669static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2670{
2671 int i;
2672
2673 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002674 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002675}
2676
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002677void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2678 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002679{
Alexander Duyck6366ad32009-12-02 16:47:18 +00002680 if (buffer_info->dma) {
2681 if (buffer_info->mapped_as_page)
2682 pci_unmap_page(tx_ring->pdev,
2683 buffer_info->dma,
2684 buffer_info->length,
2685 PCI_DMA_TODEVICE);
2686 else
2687 pci_unmap_single(tx_ring->pdev,
2688 buffer_info->dma,
2689 buffer_info->length,
2690 PCI_DMA_TODEVICE);
2691 buffer_info->dma = 0;
2692 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002693 if (buffer_info->skb) {
2694 dev_kfree_skb_any(buffer_info->skb);
2695 buffer_info->skb = NULL;
2696 }
2697 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00002698 buffer_info->length = 0;
2699 buffer_info->next_to_watch = 0;
2700 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002701}
2702
2703/**
2704 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002705 * @tx_ring: ring to be cleaned
2706 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002707static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002708{
2709 struct igb_buffer *buffer_info;
2710 unsigned long size;
2711 unsigned int i;
2712
2713 if (!tx_ring->buffer_info)
2714 return;
2715 /* Free all the Tx ring sk_buffs */
2716
2717 for (i = 0; i < tx_ring->count; i++) {
2718 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002719 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 }
2721
2722 size = sizeof(struct igb_buffer) * tx_ring->count;
2723 memset(tx_ring->buffer_info, 0, size);
2724
2725 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002726 memset(tx_ring->desc, 0, tx_ring->size);
2727
2728 tx_ring->next_to_use = 0;
2729 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002730}
2731
2732/**
2733 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2734 * @adapter: board private structure
2735 **/
2736static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2737{
2738 int i;
2739
2740 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002741 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002742}
2743
2744/**
2745 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002746 * @rx_ring: ring to clean the resources from
2747 *
2748 * Free all receive software resources
2749 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002750void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002751{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002752 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002753
2754 vfree(rx_ring->buffer_info);
2755 rx_ring->buffer_info = NULL;
2756
Alexander Duyck439705e2009-10-27 23:49:20 +00002757 /* if not set, then don't free */
2758 if (!rx_ring->desc)
2759 return;
2760
Alexander Duyck80785292009-10-27 15:51:47 +00002761 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2762 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002763
2764 rx_ring->desc = NULL;
2765}
2766
2767/**
2768 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2769 * @adapter: board private structure
2770 *
2771 * Free all receive software resources
2772 **/
2773static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2774{
2775 int i;
2776
2777 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002778 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002779}
2780
2781/**
2782 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002783 * @rx_ring: ring to free buffers from
2784 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002785static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002786{
2787 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002788 unsigned long size;
2789 unsigned int i;
2790
2791 if (!rx_ring->buffer_info)
2792 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002793
Auke Kok9d5c8242008-01-24 02:22:38 -08002794 /* Free all the Rx ring sk_buffs */
2795 for (i = 0; i < rx_ring->count; i++) {
2796 buffer_info = &rx_ring->buffer_info[i];
2797 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002798 pci_unmap_single(rx_ring->pdev,
2799 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002800 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002801 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002802 buffer_info->dma = 0;
2803 }
2804
2805 if (buffer_info->skb) {
2806 dev_kfree_skb(buffer_info->skb);
2807 buffer_info->skb = NULL;
2808 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002809 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002810 pci_unmap_page(rx_ring->pdev,
2811 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002812 PAGE_SIZE / 2,
2813 PCI_DMA_FROMDEVICE);
2814 buffer_info->page_dma = 0;
2815 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002816 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002817 put_page(buffer_info->page);
2818 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002819 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002820 }
2821 }
2822
Auke Kok9d5c8242008-01-24 02:22:38 -08002823 size = sizeof(struct igb_buffer) * rx_ring->count;
2824 memset(rx_ring->buffer_info, 0, size);
2825
2826 /* Zero out the descriptor ring */
2827 memset(rx_ring->desc, 0, rx_ring->size);
2828
2829 rx_ring->next_to_clean = 0;
2830 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002831}
2832
2833/**
2834 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2835 * @adapter: board private structure
2836 **/
2837static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2838{
2839 int i;
2840
2841 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002842 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002843}
2844
2845/**
2846 * igb_set_mac - Change the Ethernet Address of the NIC
2847 * @netdev: network interface device structure
2848 * @p: pointer to an address structure
2849 *
2850 * Returns 0 on success, negative on failure
2851 **/
2852static int igb_set_mac(struct net_device *netdev, void *p)
2853{
2854 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002855 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002856 struct sockaddr *addr = p;
2857
2858 if (!is_valid_ether_addr(addr->sa_data))
2859 return -EADDRNOTAVAIL;
2860
2861 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002862 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002863
Alexander Duyck26ad9172009-10-05 06:32:49 +00002864 /* set the correct pool for the new PF MAC address in entry 0 */
2865 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2866 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002867
Auke Kok9d5c8242008-01-24 02:22:38 -08002868 return 0;
2869}
2870
2871/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002872 * igb_write_mc_addr_list - write multicast addresses to MTA
2873 * @netdev: network interface device structure
2874 *
2875 * Writes multicast address list to the MTA hash table.
2876 * Returns: -ENOMEM on failure
2877 * 0 on no addresses written
2878 * X on writing X addresses to MTA
2879 **/
2880static int igb_write_mc_addr_list(struct net_device *netdev)
2881{
2882 struct igb_adapter *adapter = netdev_priv(netdev);
2883 struct e1000_hw *hw = &adapter->hw;
2884 struct dev_mc_list *mc_ptr = netdev->mc_list;
2885 u8 *mta_list;
2886 u32 vmolr = 0;
2887 int i;
2888
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002889 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002890 /* nothing to program, so clear mc list */
2891 igb_update_mc_addr_list(hw, NULL, 0);
2892 igb_restore_vf_multicasts(adapter);
2893 return 0;
2894 }
2895
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002896 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002897 if (!mta_list)
2898 return -ENOMEM;
2899
2900 /* set vmolr receive overflow multicast bit */
2901 vmolr |= E1000_VMOLR_ROMPE;
2902
2903 /* The shared function expects a packed array of only addresses. */
2904 mc_ptr = netdev->mc_list;
2905
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002906 for (i = 0; i < netdev_mc_count(netdev); i++) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002907 if (!mc_ptr)
2908 break;
2909 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2910 mc_ptr = mc_ptr->next;
2911 }
2912 igb_update_mc_addr_list(hw, mta_list, i);
2913 kfree(mta_list);
2914
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002915 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002916}
2917
2918/**
2919 * igb_write_uc_addr_list - write unicast addresses to RAR table
2920 * @netdev: network interface device structure
2921 *
2922 * Writes unicast address list to the RAR table.
2923 * Returns: -ENOMEM on failure/insufficient address space
2924 * 0 on no addresses written
2925 * X on writing X addresses to the RAR table
2926 **/
2927static int igb_write_uc_addr_list(struct net_device *netdev)
2928{
2929 struct igb_adapter *adapter = netdev_priv(netdev);
2930 struct e1000_hw *hw = &adapter->hw;
2931 unsigned int vfn = adapter->vfs_allocated_count;
2932 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2933 int count = 0;
2934
2935 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002936 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00002937 return -ENOMEM;
2938
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002939 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002940 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002941
2942 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002943 if (!rar_entries)
2944 break;
2945 igb_rar_set_qsel(adapter, ha->addr,
2946 rar_entries--,
2947 vfn);
2948 count++;
2949 }
2950 }
2951 /* write the addresses in reverse order to avoid write combining */
2952 for (; rar_entries > 0 ; rar_entries--) {
2953 wr32(E1000_RAH(rar_entries), 0);
2954 wr32(E1000_RAL(rar_entries), 0);
2955 }
2956 wrfl();
2957
2958 return count;
2959}
2960
2961/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002962 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002963 * @netdev: network interface device structure
2964 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002965 * The set_rx_mode entry point is called whenever the unicast or multicast
2966 * address lists or the network interface flags are updated. This routine is
2967 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002968 * promiscuous mode, and all-multi behavior.
2969 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002970static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002971{
2972 struct igb_adapter *adapter = netdev_priv(netdev);
2973 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002974 unsigned int vfn = adapter->vfs_allocated_count;
2975 u32 rctl, vmolr = 0;
2976 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002977
2978 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002979 rctl = rd32(E1000_RCTL);
2980
Alexander Duyck68d480c2009-10-05 06:33:08 +00002981 /* clear the effected bits */
2982 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2983
Patrick McHardy746b9f02008-07-16 20:15:45 -07002984 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002985 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002986 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002987 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002988 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002989 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002990 vmolr |= E1000_VMOLR_MPME;
2991 } else {
2992 /*
2993 * Write addresses to the MTA, if the attempt fails
2994 * then we should just turn on promiscous mode so
2995 * that we can at least receive multicast traffic
2996 */
2997 count = igb_write_mc_addr_list(netdev);
2998 if (count < 0) {
2999 rctl |= E1000_RCTL_MPE;
3000 vmolr |= E1000_VMOLR_MPME;
3001 } else if (count) {
3002 vmolr |= E1000_VMOLR_ROMPE;
3003 }
3004 }
3005 /*
3006 * Write addresses to available RAR registers, if there is not
3007 * sufficient space to store all the addresses then enable
3008 * unicast promiscous mode
3009 */
3010 count = igb_write_uc_addr_list(netdev);
3011 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003012 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003013 vmolr |= E1000_VMOLR_ROPE;
3014 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003015 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003016 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003017 wr32(E1000_RCTL, rctl);
3018
Alexander Duyck68d480c2009-10-05 06:33:08 +00003019 /*
3020 * In order to support SR-IOV and eventually VMDq it is necessary to set
3021 * the VMOLR to enable the appropriate modes. Without this workaround
3022 * we will have issues with VLAN tag stripping not being done for frames
3023 * that are only arriving because we are the default pool
3024 */
3025 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003026 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003027
Alexander Duyck68d480c2009-10-05 06:33:08 +00003028 vmolr |= rd32(E1000_VMOLR(vfn)) &
3029 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3030 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003031 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003032}
3033
3034/* Need to wait a few seconds after link up to get diagnostic information from
3035 * the phy */
3036static void igb_update_phy_info(unsigned long data)
3037{
3038 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003039 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003040}
3041
3042/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003043 * igb_has_link - check shared code for link and determine up/down
3044 * @adapter: pointer to driver private info
3045 **/
Nick Nunley31455352010-02-17 01:01:21 +00003046bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003047{
3048 struct e1000_hw *hw = &adapter->hw;
3049 bool link_active = false;
3050 s32 ret_val = 0;
3051
3052 /* get_link_status is set on LSC (link status) interrupt or
3053 * rx sequence error interrupt. get_link_status will stay
3054 * false until the e1000_check_for_link establishes link
3055 * for copper adapters ONLY
3056 */
3057 switch (hw->phy.media_type) {
3058 case e1000_media_type_copper:
3059 if (hw->mac.get_link_status) {
3060 ret_val = hw->mac.ops.check_for_link(hw);
3061 link_active = !hw->mac.get_link_status;
3062 } else {
3063 link_active = true;
3064 }
3065 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003066 case e1000_media_type_internal_serdes:
3067 ret_val = hw->mac.ops.check_for_link(hw);
3068 link_active = hw->mac.serdes_has_link;
3069 break;
3070 default:
3071 case e1000_media_type_unknown:
3072 break;
3073 }
3074
3075 return link_active;
3076}
3077
3078/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003079 * igb_watchdog - Timer Call-back
3080 * @data: pointer to adapter cast into an unsigned long
3081 **/
3082static void igb_watchdog(unsigned long data)
3083{
3084 struct igb_adapter *adapter = (struct igb_adapter *)data;
3085 /* Do the rest outside of interrupt context */
3086 schedule_work(&adapter->watchdog_task);
3087}
3088
3089static void igb_watchdog_task(struct work_struct *work)
3090{
3091 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003092 struct igb_adapter,
3093 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003094 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003095 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003096 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003097 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003098
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003099 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003100 if (link) {
3101 if (!netif_carrier_ok(netdev)) {
3102 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003103 hw->mac.ops.get_speed_and_duplex(hw,
3104 &adapter->link_speed,
3105 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003106
3107 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003108 /* Links status message must follow this format */
3109 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003110 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003111 netdev->name,
3112 adapter->link_speed,
3113 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003114 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003115 ((ctrl & E1000_CTRL_TFCE) &&
3116 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3117 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3118 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003119
3120 /* tweak tx_queue_len according to speed/duplex and
3121 * adjust the timeout factor */
3122 netdev->tx_queue_len = adapter->tx_queue_len;
3123 adapter->tx_timeout_factor = 1;
3124 switch (adapter->link_speed) {
3125 case SPEED_10:
3126 netdev->tx_queue_len = 10;
3127 adapter->tx_timeout_factor = 14;
3128 break;
3129 case SPEED_100:
3130 netdev->tx_queue_len = 100;
3131 /* maybe add some timeout factor ? */
3132 break;
3133 }
3134
3135 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003136
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003137 igb_ping_all_vfs(adapter);
3138
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003139 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003140 if (!test_bit(__IGB_DOWN, &adapter->state))
3141 mod_timer(&adapter->phy_info_timer,
3142 round_jiffies(jiffies + 2 * HZ));
3143 }
3144 } else {
3145 if (netif_carrier_ok(netdev)) {
3146 adapter->link_speed = 0;
3147 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003148 /* Links status message must follow this format */
3149 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3150 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003151 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003152
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003153 igb_ping_all_vfs(adapter);
3154
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003155 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003156 if (!test_bit(__IGB_DOWN, &adapter->state))
3157 mod_timer(&adapter->phy_info_timer,
3158 round_jiffies(jiffies + 2 * HZ));
3159 }
3160 }
3161
Auke Kok9d5c8242008-01-24 02:22:38 -08003162 igb_update_stats(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003163
Alexander Duyckdbabb062009-11-12 18:38:16 +00003164 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003165 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003166 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003167 /* We've lost link, so the controller stops DMA,
3168 * but we've got queued Tx work that's never going
3169 * to get done, so reset controller to flush Tx.
3170 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003171 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3172 adapter->tx_timeout_count++;
3173 schedule_work(&adapter->reset_task);
3174 /* return immediately since reset is imminent */
3175 return;
3176 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003177 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003178
Alexander Duyckdbabb062009-11-12 18:38:16 +00003179 /* Force detection of hung controller every watchdog period */
3180 tx_ring->detect_tx_hung = true;
3181 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003182
Auke Kok9d5c8242008-01-24 02:22:38 -08003183 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003184 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003185 u32 eics = 0;
3186 for (i = 0; i < adapter->num_q_vectors; i++) {
3187 struct igb_q_vector *q_vector = adapter->q_vector[i];
3188 eics |= q_vector->eims_value;
3189 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003190 wr32(E1000_EICS, eics);
3191 } else {
3192 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3193 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003194
Auke Kok9d5c8242008-01-24 02:22:38 -08003195 /* Reset the timer */
3196 if (!test_bit(__IGB_DOWN, &adapter->state))
3197 mod_timer(&adapter->watchdog_timer,
3198 round_jiffies(jiffies + 2 * HZ));
3199}
3200
3201enum latency_range {
3202 lowest_latency = 0,
3203 low_latency = 1,
3204 bulk_latency = 2,
3205 latency_invalid = 255
3206};
3207
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003208/**
3209 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3210 *
3211 * Stores a new ITR value based on strictly on packet size. This
3212 * algorithm is less sophisticated than that used in igb_update_itr,
3213 * due to the difficulty of synchronizing statistics across multiple
3214 * receive rings. The divisors and thresholds used by this fuction
3215 * were determined based on theoretical maximum wire speed and testing
3216 * data, in order to minimize response time while increasing bulk
3217 * throughput.
3218 * This functionality is controlled by the InterruptThrottleRate module
3219 * parameter (see igb_param.c)
3220 * NOTE: This function is called only when operating in a multiqueue
3221 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003222 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003223 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003224static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003225{
Alexander Duyck047e0032009-10-27 15:49:27 +00003226 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003227 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003228 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003229
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003230 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3231 * ints/sec - ITR timer value of 120 ticks.
3232 */
3233 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003234 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003235 goto set_itr_val;
3236 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003237
3238 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3239 struct igb_ring *ring = q_vector->rx_ring;
3240 avg_wire_size = ring->total_bytes / ring->total_packets;
3241 }
3242
3243 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3244 struct igb_ring *ring = q_vector->tx_ring;
3245 avg_wire_size = max_t(u32, avg_wire_size,
3246 (ring->total_bytes /
3247 ring->total_packets));
3248 }
3249
3250 /* if avg_wire_size isn't set no work was done */
3251 if (!avg_wire_size)
3252 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003253
3254 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3255 avg_wire_size += 24;
3256
3257 /* Don't starve jumbo frames */
3258 avg_wire_size = min(avg_wire_size, 3000);
3259
3260 /* Give a little boost to mid-size frames */
3261 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3262 new_val = avg_wire_size / 3;
3263 else
3264 new_val = avg_wire_size / 2;
3265
Nick Nunleyabe1c362010-02-17 01:03:19 +00003266 /* when in itr mode 3 do not exceed 20K ints/sec */
3267 if (adapter->rx_itr_setting == 3 && new_val < 196)
3268 new_val = 196;
3269
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003270set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003271 if (new_val != q_vector->itr_val) {
3272 q_vector->itr_val = new_val;
3273 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003274 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003275clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003276 if (q_vector->rx_ring) {
3277 q_vector->rx_ring->total_bytes = 0;
3278 q_vector->rx_ring->total_packets = 0;
3279 }
3280 if (q_vector->tx_ring) {
3281 q_vector->tx_ring->total_bytes = 0;
3282 q_vector->tx_ring->total_packets = 0;
3283 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003284}
3285
3286/**
3287 * igb_update_itr - update the dynamic ITR value based on statistics
3288 * Stores a new ITR value based on packets and byte
3289 * counts during the last interrupt. The advantage of per interrupt
3290 * computation is faster updates and more accurate ITR for the current
3291 * traffic pattern. Constants in this function were computed
3292 * based on theoretical maximum wire speed and thresholds were set based
3293 * on testing data as well as attempting to minimize response time
3294 * while increasing bulk throughput.
3295 * this functionality is controlled by the InterruptThrottleRate module
3296 * parameter (see igb_param.c)
3297 * NOTE: These calculations are only valid when operating in a single-
3298 * queue environment.
3299 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003300 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003301 * @packets: the number of packets during this measurement interval
3302 * @bytes: the number of bytes during this measurement interval
3303 **/
3304static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3305 int packets, int bytes)
3306{
3307 unsigned int retval = itr_setting;
3308
3309 if (packets == 0)
3310 goto update_itr_done;
3311
3312 switch (itr_setting) {
3313 case lowest_latency:
3314 /* handle TSO and jumbo frames */
3315 if (bytes/packets > 8000)
3316 retval = bulk_latency;
3317 else if ((packets < 5) && (bytes > 512))
3318 retval = low_latency;
3319 break;
3320 case low_latency: /* 50 usec aka 20000 ints/s */
3321 if (bytes > 10000) {
3322 /* this if handles the TSO accounting */
3323 if (bytes/packets > 8000) {
3324 retval = bulk_latency;
3325 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3326 retval = bulk_latency;
3327 } else if ((packets > 35)) {
3328 retval = lowest_latency;
3329 }
3330 } else if (bytes/packets > 2000) {
3331 retval = bulk_latency;
3332 } else if (packets <= 2 && bytes < 512) {
3333 retval = lowest_latency;
3334 }
3335 break;
3336 case bulk_latency: /* 250 usec aka 4000 ints/s */
3337 if (bytes > 25000) {
3338 if (packets > 35)
3339 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003340 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003341 retval = low_latency;
3342 }
3343 break;
3344 }
3345
3346update_itr_done:
3347 return retval;
3348}
3349
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003350static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003351{
Alexander Duyck047e0032009-10-27 15:49:27 +00003352 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003353 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003354 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003355
3356 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3357 if (adapter->link_speed != SPEED_1000) {
3358 current_itr = 0;
3359 new_itr = 4000;
3360 goto set_itr_now;
3361 }
3362
3363 adapter->rx_itr = igb_update_itr(adapter,
3364 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003365 q_vector->rx_ring->total_packets,
3366 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003367
Alexander Duyck047e0032009-10-27 15:49:27 +00003368 adapter->tx_itr = igb_update_itr(adapter,
3369 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003370 q_vector->tx_ring->total_packets,
3371 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003372 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003373
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003374 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003375 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003376 current_itr = low_latency;
3377
Auke Kok9d5c8242008-01-24 02:22:38 -08003378 switch (current_itr) {
3379 /* counts and packets in update_itr are dependent on these numbers */
3380 case lowest_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003381 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003382 break;
3383 case low_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003384 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003385 break;
3386 case bulk_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003387 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003388 break;
3389 default:
3390 break;
3391 }
3392
3393set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003394 q_vector->rx_ring->total_bytes = 0;
3395 q_vector->rx_ring->total_packets = 0;
3396 q_vector->tx_ring->total_bytes = 0;
3397 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003398
Alexander Duyck047e0032009-10-27 15:49:27 +00003399 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003400 /* this attempts to bias the interrupt rate towards Bulk
3401 * by adding intermediate steps when interrupt rate is
3402 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003403 new_itr = new_itr > q_vector->itr_val ?
3404 max((new_itr * q_vector->itr_val) /
3405 (new_itr + (q_vector->itr_val >> 2)),
3406 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003407 new_itr;
3408 /* Don't write the value here; it resets the adapter's
3409 * internal timer, and causes us to delay far longer than
3410 * we should between interrupts. Instead, we write the ITR
3411 * value at the beginning of the next interrupt so the timing
3412 * ends up being correct.
3413 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003414 q_vector->itr_val = new_itr;
3415 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003416 }
3417
3418 return;
3419}
3420
Auke Kok9d5c8242008-01-24 02:22:38 -08003421#define IGB_TX_FLAGS_CSUM 0x00000001
3422#define IGB_TX_FLAGS_VLAN 0x00000002
3423#define IGB_TX_FLAGS_TSO 0x00000004
3424#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003425#define IGB_TX_FLAGS_TSTAMP 0x00000010
3426#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3427#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003428
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003429static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003430 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3431{
3432 struct e1000_adv_tx_context_desc *context_desc;
3433 unsigned int i;
3434 int err;
3435 struct igb_buffer *buffer_info;
3436 u32 info = 0, tu_cmd = 0;
3437 u32 mss_l4len_idx, l4len;
3438 *hdr_len = 0;
3439
3440 if (skb_header_cloned(skb)) {
3441 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3442 if (err)
3443 return err;
3444 }
3445
3446 l4len = tcp_hdrlen(skb);
3447 *hdr_len += l4len;
3448
3449 if (skb->protocol == htons(ETH_P_IP)) {
3450 struct iphdr *iph = ip_hdr(skb);
3451 iph->tot_len = 0;
3452 iph->check = 0;
3453 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3454 iph->daddr, 0,
3455 IPPROTO_TCP,
3456 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003457 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003458 ipv6_hdr(skb)->payload_len = 0;
3459 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3460 &ipv6_hdr(skb)->daddr,
3461 0, IPPROTO_TCP, 0);
3462 }
3463
3464 i = tx_ring->next_to_use;
3465
3466 buffer_info = &tx_ring->buffer_info[i];
3467 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3468 /* VLAN MACLEN IPLEN */
3469 if (tx_flags & IGB_TX_FLAGS_VLAN)
3470 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3471 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3472 *hdr_len += skb_network_offset(skb);
3473 info |= skb_network_header_len(skb);
3474 *hdr_len += skb_network_header_len(skb);
3475 context_desc->vlan_macip_lens = cpu_to_le32(info);
3476
3477 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3478 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3479
3480 if (skb->protocol == htons(ETH_P_IP))
3481 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3482 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3483
3484 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3485
3486 /* MSS L4LEN IDX */
3487 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3488 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3489
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003490 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003491 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3492 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003493
3494 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3495 context_desc->seqnum_seed = 0;
3496
3497 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003498 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003499 buffer_info->dma = 0;
3500 i++;
3501 if (i == tx_ring->count)
3502 i = 0;
3503
3504 tx_ring->next_to_use = i;
3505
3506 return true;
3507}
3508
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003509static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3510 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003511{
3512 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003513 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003514 struct igb_buffer *buffer_info;
3515 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003516 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003517
3518 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3519 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3520 i = tx_ring->next_to_use;
3521 buffer_info = &tx_ring->buffer_info[i];
3522 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3523
3524 if (tx_flags & IGB_TX_FLAGS_VLAN)
3525 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003526
Auke Kok9d5c8242008-01-24 02:22:38 -08003527 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3528 if (skb->ip_summed == CHECKSUM_PARTIAL)
3529 info |= skb_network_header_len(skb);
3530
3531 context_desc->vlan_macip_lens = cpu_to_le32(info);
3532
3533 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3534
3535 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003536 __be16 protocol;
3537
3538 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3539 const struct vlan_ethhdr *vhdr =
3540 (const struct vlan_ethhdr*)skb->data;
3541
3542 protocol = vhdr->h_vlan_encapsulated_proto;
3543 } else {
3544 protocol = skb->protocol;
3545 }
3546
3547 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003548 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003549 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003550 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3551 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003552 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3553 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003554 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003555 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003556 /* XXX what about other V6 headers?? */
3557 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3558 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003559 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3560 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003561 break;
3562 default:
3563 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003564 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003565 "partial checksum but proto=%x!\n",
3566 skb->protocol);
3567 break;
3568 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003569 }
3570
3571 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3572 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003573 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003574 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003575 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003576
3577 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003578 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003579 buffer_info->dma = 0;
3580
3581 i++;
3582 if (i == tx_ring->count)
3583 i = 0;
3584 tx_ring->next_to_use = i;
3585
3586 return true;
3587 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003588 return false;
3589}
3590
3591#define IGB_MAX_TXD_PWR 16
3592#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3593
Alexander Duyck80785292009-10-27 15:51:47 +00003594static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003595 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003596{
3597 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003598 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003599 unsigned int len = skb_headlen(skb);
3600 unsigned int count = 0, i;
3601 unsigned int f;
3602
3603 i = tx_ring->next_to_use;
3604
3605 buffer_info = &tx_ring->buffer_info[i];
3606 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3607 buffer_info->length = len;
3608 /* set time_stamp *before* dma to help avoid a possible race */
3609 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003610 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003611 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3612 PCI_DMA_TODEVICE);
3613 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3614 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08003615
3616 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3617 struct skb_frag_struct *frag;
3618
Alexander Duyck85811452010-01-23 01:35:00 -08003619 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003620 i++;
3621 if (i == tx_ring->count)
3622 i = 0;
3623
Auke Kok9d5c8242008-01-24 02:22:38 -08003624 frag = &skb_shinfo(skb)->frags[f];
3625 len = frag->size;
3626
3627 buffer_info = &tx_ring->buffer_info[i];
3628 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3629 buffer_info->length = len;
3630 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003631 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003632 buffer_info->mapped_as_page = true;
3633 buffer_info->dma = pci_map_page(pdev,
3634 frag->page,
3635 frag->page_offset,
3636 len,
3637 PCI_DMA_TODEVICE);
3638 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3639 goto dma_error;
3640
Auke Kok9d5c8242008-01-24 02:22:38 -08003641 }
3642
Auke Kok9d5c8242008-01-24 02:22:38 -08003643 tx_ring->buffer_info[i].skb = skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003644 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003645
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003646 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003647
3648dma_error:
3649 dev_err(&pdev->dev, "TX DMA map failed\n");
3650
3651 /* clear timestamp and dma mappings for failed buffer_info mapping */
3652 buffer_info->dma = 0;
3653 buffer_info->time_stamp = 0;
3654 buffer_info->length = 0;
3655 buffer_info->next_to_watch = 0;
3656 buffer_info->mapped_as_page = false;
3657 count--;
3658
3659 /* clear timestamp and dma mappings for remaining portion of packet */
3660 while (count >= 0) {
3661 count--;
3662 i--;
3663 if (i < 0)
3664 i += tx_ring->count;
3665 buffer_info = &tx_ring->buffer_info[i];
3666 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3667 }
3668
3669 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003670}
3671
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003672static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003673 int tx_flags, int count, u32 paylen,
3674 u8 hdr_len)
3675{
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003676 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 struct igb_buffer *buffer_info;
3678 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003679 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08003680
3681 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3682 E1000_ADVTXD_DCMD_DEXT);
3683
3684 if (tx_flags & IGB_TX_FLAGS_VLAN)
3685 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3686
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003687 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3688 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3689
Auke Kok9d5c8242008-01-24 02:22:38 -08003690 if (tx_flags & IGB_TX_FLAGS_TSO) {
3691 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3692
3693 /* insert tcp checksum */
3694 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3695
3696 /* insert ip checksum */
3697 if (tx_flags & IGB_TX_FLAGS_IPV4)
3698 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3699
3700 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3701 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3702 }
3703
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003704 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3705 (tx_flags & (IGB_TX_FLAGS_CSUM |
3706 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003707 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003708 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003709
3710 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3711
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003712 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08003713 buffer_info = &tx_ring->buffer_info[i];
3714 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3715 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3716 tx_desc->read.cmd_type_len =
3717 cpu_to_le32(cmd_type_len | buffer_info->length);
3718 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003719 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08003720 i++;
3721 if (i == tx_ring->count)
3722 i = 0;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003723 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003724
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003725 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003726 /* Force memory writes to complete before letting h/w
3727 * know there are new descriptors to fetch. (Only
3728 * applicable for weak-ordered memory model archs,
3729 * such as IA-64). */
3730 wmb();
3731
3732 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003733 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003734 /* we need this if more than one processor can write to our tail
3735 * at a time, it syncronizes IO on IA64/Altix systems */
3736 mmiowb();
3737}
3738
Alexander Duycke694e962009-10-27 15:53:06 +00003739static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003740{
Alexander Duycke694e962009-10-27 15:53:06 +00003741 struct net_device *netdev = tx_ring->netdev;
3742
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003743 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003744
Auke Kok9d5c8242008-01-24 02:22:38 -08003745 /* Herbert's original patch had:
3746 * smp_mb__after_netif_stop_queue();
3747 * but since that doesn't exist yet, just open code it. */
3748 smp_mb();
3749
3750 /* We need to check again in a case another CPU has just
3751 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003752 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003753 return -EBUSY;
3754
3755 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003756 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00003757 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003758 return 0;
3759}
3760
Alexander Duycke694e962009-10-27 15:53:06 +00003761static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003762{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003763 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003764 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003765 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003766}
3767
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003768netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3769 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003770{
Alexander Duycke694e962009-10-27 15:53:06 +00003771 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003772 unsigned int first;
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 unsigned int tx_flags = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003774 u8 hdr_len = 0;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003775 int tso = 0, count;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003776 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003777
Auke Kok9d5c8242008-01-24 02:22:38 -08003778 /* need: 1 descriptor per page,
3779 * + 2 desc gap to keep tail from touching head,
3780 * + 1 desc for skb->data,
3781 * + 1 desc for context descriptor,
3782 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003783 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003784 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003785 return NETDEV_TX_BUSY;
3786 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003787
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003788 if (unlikely(shtx->hardware)) {
3789 shtx->in_progress = 1;
3790 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003791 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003792
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003793 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003794 tx_flags |= IGB_TX_FLAGS_VLAN;
3795 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3796 }
3797
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003798 if (skb->protocol == htons(ETH_P_IP))
3799 tx_flags |= IGB_TX_FLAGS_IPV4;
3800
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003801 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003802 if (skb_is_gso(skb)) {
3803 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003804
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003805 if (tso < 0) {
3806 dev_kfree_skb_any(skb);
3807 return NETDEV_TX_OK;
3808 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003809 }
3810
3811 if (tso)
3812 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003813 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003814 (skb->ip_summed == CHECKSUM_PARTIAL))
3815 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003816
Alexander Duyck65689fe2009-03-20 00:17:43 +00003817 /*
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003818 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00003819 * has occured and we need to rewind the descriptor queue
3820 */
Alexander Duyck80785292009-10-27 15:51:47 +00003821 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003822 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003823 dev_kfree_skb_any(skb);
3824 tx_ring->buffer_info[first].time_stamp = 0;
3825 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003826 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003827 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003828
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003829 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3830
3831 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003832 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003833
Auke Kok9d5c8242008-01-24 02:22:38 -08003834 return NETDEV_TX_OK;
3835}
3836
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003837static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3838 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003839{
3840 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003841 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003842 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003843
3844 if (test_bit(__IGB_DOWN, &adapter->state)) {
3845 dev_kfree_skb_any(skb);
3846 return NETDEV_TX_OK;
3847 }
3848
3849 if (skb->len <= 0) {
3850 dev_kfree_skb_any(skb);
3851 return NETDEV_TX_OK;
3852 }
3853
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003854 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003855 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003856
3857 /* This goes back to the question of how to logically map a tx queue
3858 * to a flow. Right now, performance is impacted slightly negatively
3859 * if using multiple tx queues. If the stack breaks away from a
3860 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003861 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003862}
3863
3864/**
3865 * igb_tx_timeout - Respond to a Tx Hang
3866 * @netdev: network interface device structure
3867 **/
3868static void igb_tx_timeout(struct net_device *netdev)
3869{
3870 struct igb_adapter *adapter = netdev_priv(netdev);
3871 struct e1000_hw *hw = &adapter->hw;
3872
3873 /* Do the reset outside of interrupt context */
3874 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003875
Alexander Duyck55cac242009-11-19 12:42:21 +00003876 if (hw->mac.type == e1000_82580)
3877 hw->dev_spec._82575.global_device_reset = true;
3878
Auke Kok9d5c8242008-01-24 02:22:38 -08003879 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003880 wr32(E1000_EICS,
3881 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003882}
3883
3884static void igb_reset_task(struct work_struct *work)
3885{
3886 struct igb_adapter *adapter;
3887 adapter = container_of(work, struct igb_adapter, reset_task);
3888
3889 igb_reinit_locked(adapter);
3890}
3891
3892/**
3893 * igb_get_stats - Get System Network Statistics
3894 * @netdev: network interface device structure
3895 *
3896 * Returns the address of the device statistics structure.
3897 * The statistics are actually updated from the timer callback.
3898 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003899static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003900{
Auke Kok9d5c8242008-01-24 02:22:38 -08003901 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003902 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003903}
3904
3905/**
3906 * igb_change_mtu - Change the Maximum Transfer Unit
3907 * @netdev: network interface device structure
3908 * @new_mtu: new value for maximum frame size
3909 *
3910 * Returns 0 on success, negative on failure
3911 **/
3912static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3913{
3914 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00003915 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003917 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003918
Alexander Duyckc809d222009-10-27 23:52:13 +00003919 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003920 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003921 return -EINVAL;
3922 }
3923
Auke Kok9d5c8242008-01-24 02:22:38 -08003924 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003925 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003926 return -EINVAL;
3927 }
3928
3929 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3930 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003931
Auke Kok9d5c8242008-01-24 02:22:38 -08003932 /* igb_down has a dependency on max_frame_size */
3933 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00003934
Auke Kok9d5c8242008-01-24 02:22:38 -08003935 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3936 * means we reserve 2 more, this pushes us to allocate from the next
3937 * larger slab size.
3938 * i.e. RXBUFFER_2048 --> size-4096 slab
3939 */
3940
Alexander Duyck7d95b712009-10-27 15:50:08 +00003941 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003942 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003943 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003944 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003945 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003946 rx_buffer_len = IGB_RXBUFFER_128;
3947
3948 if (netif_running(netdev))
3949 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003950
Alexander Duyck090b1792009-10-27 23:51:55 +00003951 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08003952 netdev->mtu, new_mtu);
3953 netdev->mtu = new_mtu;
3954
Alexander Duyck4c844852009-10-27 15:52:07 +00003955 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003956 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00003957
Auke Kok9d5c8242008-01-24 02:22:38 -08003958 if (netif_running(netdev))
3959 igb_up(adapter);
3960 else
3961 igb_reset(adapter);
3962
3963 clear_bit(__IGB_RESETTING, &adapter->state);
3964
3965 return 0;
3966}
3967
3968/**
3969 * igb_update_stats - Update the board statistics counters
3970 * @adapter: board private structure
3971 **/
3972
3973void igb_update_stats(struct igb_adapter *adapter)
3974{
Alexander Duyck128e45e2009-11-12 18:37:38 +00003975 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003976 struct e1000_hw *hw = &adapter->hw;
3977 struct pci_dev *pdev = adapter->pdev;
Nick Nunley43915c7c2010-02-17 01:03:58 +00003978 u32 rnbc, reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08003979 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003980 int i;
3981 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003982
3983#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3984
3985 /*
3986 * Prevent stats update while adapter is being reset, or if the pci
3987 * connection is down.
3988 */
3989 if (adapter->link_speed == 0)
3990 return;
3991 if (pci_channel_offline(pdev))
3992 return;
3993
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003994 bytes = 0;
3995 packets = 0;
3996 for (i = 0; i < adapter->num_rx_queues; i++) {
3997 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00003998 struct igb_ring *ring = adapter->rx_ring[i];
3999 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004000 net_stats->rx_fifo_errors += rqdpc_tmp;
Alexander Duyck3025a442010-02-17 01:02:39 +00004001 bytes += ring->rx_stats.bytes;
4002 packets += ring->rx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004003 }
4004
Alexander Duyck128e45e2009-11-12 18:37:38 +00004005 net_stats->rx_bytes = bytes;
4006 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004007
4008 bytes = 0;
4009 packets = 0;
4010 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004011 struct igb_ring *ring = adapter->tx_ring[i];
4012 bytes += ring->tx_stats.bytes;
4013 packets += ring->tx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004014 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004015 net_stats->tx_bytes = bytes;
4016 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004017
4018 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004019 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4020 adapter->stats.gprc += rd32(E1000_GPRC);
4021 adapter->stats.gorc += rd32(E1000_GORCL);
4022 rd32(E1000_GORCH); /* clear GORCL */
4023 adapter->stats.bprc += rd32(E1000_BPRC);
4024 adapter->stats.mprc += rd32(E1000_MPRC);
4025 adapter->stats.roc += rd32(E1000_ROC);
4026
4027 adapter->stats.prc64 += rd32(E1000_PRC64);
4028 adapter->stats.prc127 += rd32(E1000_PRC127);
4029 adapter->stats.prc255 += rd32(E1000_PRC255);
4030 adapter->stats.prc511 += rd32(E1000_PRC511);
4031 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4032 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4033 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4034 adapter->stats.sec += rd32(E1000_SEC);
4035
4036 adapter->stats.mpc += rd32(E1000_MPC);
4037 adapter->stats.scc += rd32(E1000_SCC);
4038 adapter->stats.ecol += rd32(E1000_ECOL);
4039 adapter->stats.mcc += rd32(E1000_MCC);
4040 adapter->stats.latecol += rd32(E1000_LATECOL);
4041 adapter->stats.dc += rd32(E1000_DC);
4042 adapter->stats.rlec += rd32(E1000_RLEC);
4043 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4044 adapter->stats.xontxc += rd32(E1000_XONTXC);
4045 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4046 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4047 adapter->stats.fcruc += rd32(E1000_FCRUC);
4048 adapter->stats.gptc += rd32(E1000_GPTC);
4049 adapter->stats.gotc += rd32(E1000_GOTCL);
4050 rd32(E1000_GOTCH); /* clear GOTCL */
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004051 rnbc = rd32(E1000_RNBC);
4052 adapter->stats.rnbc += rnbc;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004053 net_stats->rx_fifo_errors += rnbc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004054 adapter->stats.ruc += rd32(E1000_RUC);
4055 adapter->stats.rfc += rd32(E1000_RFC);
4056 adapter->stats.rjc += rd32(E1000_RJC);
4057 adapter->stats.tor += rd32(E1000_TORH);
4058 adapter->stats.tot += rd32(E1000_TOTH);
4059 adapter->stats.tpr += rd32(E1000_TPR);
4060
4061 adapter->stats.ptc64 += rd32(E1000_PTC64);
4062 adapter->stats.ptc127 += rd32(E1000_PTC127);
4063 adapter->stats.ptc255 += rd32(E1000_PTC255);
4064 adapter->stats.ptc511 += rd32(E1000_PTC511);
4065 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4066 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4067
4068 adapter->stats.mptc += rd32(E1000_MPTC);
4069 adapter->stats.bptc += rd32(E1000_BPTC);
4070
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004071 adapter->stats.tpt += rd32(E1000_TPT);
4072 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004073
4074 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004075 /* read internal phy specific stats */
4076 reg = rd32(E1000_CTRL_EXT);
4077 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4078 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4079 adapter->stats.tncrs += rd32(E1000_TNCRS);
4080 }
4081
Auke Kok9d5c8242008-01-24 02:22:38 -08004082 adapter->stats.tsctc += rd32(E1000_TSCTC);
4083 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4084
4085 adapter->stats.iac += rd32(E1000_IAC);
4086 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4087 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4088 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4089 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4090 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4091 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4092 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4093 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4094
4095 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004096 net_stats->multicast = adapter->stats.mprc;
4097 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004098
4099 /* Rx Errors */
4100
4101 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004102 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004103 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004104 adapter->stats.crcerrs + adapter->stats.algnerrc +
4105 adapter->stats.ruc + adapter->stats.roc +
4106 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004107 net_stats->rx_length_errors = adapter->stats.ruc +
4108 adapter->stats.roc;
4109 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4110 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4111 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004112
4113 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004114 net_stats->tx_errors = adapter->stats.ecol +
4115 adapter->stats.latecol;
4116 net_stats->tx_aborted_errors = adapter->stats.ecol;
4117 net_stats->tx_window_errors = adapter->stats.latecol;
4118 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004119
4120 /* Tx Dropped needs to be maintained elsewhere */
4121
4122 /* Phy Stats */
4123 if (hw->phy.media_type == e1000_media_type_copper) {
4124 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004125 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004126 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4127 adapter->phy_stats.idle_errors += phy_tmp;
4128 }
4129 }
4130
4131 /* Management Stats */
4132 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4133 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4134 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4135}
4136
Auke Kok9d5c8242008-01-24 02:22:38 -08004137static irqreturn_t igb_msix_other(int irq, void *data)
4138{
Alexander Duyck047e0032009-10-27 15:49:27 +00004139 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004140 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004141 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004142 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004143
Alexander Duyck7f081d42010-01-07 17:41:00 +00004144 if (icr & E1000_ICR_DRSTA)
4145 schedule_work(&adapter->reset_task);
4146
Alexander Duyck047e0032009-10-27 15:49:27 +00004147 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004148 /* HW is reporting DMA is out of sync */
4149 adapter->stats.doosync++;
4150 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004151
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004152 /* Check for a mailbox event */
4153 if (icr & E1000_ICR_VMMB)
4154 igb_msg_task(adapter);
4155
4156 if (icr & E1000_ICR_LSC) {
4157 hw->mac.get_link_status = 1;
4158 /* guard against interrupt when we're going down */
4159 if (!test_bit(__IGB_DOWN, &adapter->state))
4160 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4161 }
4162
Alexander Duyck25568a52009-10-27 23:49:59 +00004163 if (adapter->vfs_allocated_count)
4164 wr32(E1000_IMS, E1000_IMS_LSC |
4165 E1000_IMS_VMMB |
4166 E1000_IMS_DOUTSYNC);
4167 else
4168 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004169 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004170
4171 return IRQ_HANDLED;
4172}
4173
Alexander Duyck047e0032009-10-27 15:49:27 +00004174static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004175{
Alexander Duyck26b39272010-02-17 01:00:41 +00004176 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004177 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004178
Alexander Duyck047e0032009-10-27 15:49:27 +00004179 if (!q_vector->set_itr)
4180 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004181
Alexander Duyck047e0032009-10-27 15:49:27 +00004182 if (!itr_val)
4183 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004184
Alexander Duyck26b39272010-02-17 01:00:41 +00004185 if (adapter->hw.mac.type == e1000_82575)
4186 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004187 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004188 itr_val |= 0x8000000;
4189
4190 writel(itr_val, q_vector->itr_register);
4191 q_vector->set_itr = 0;
4192}
4193
4194static irqreturn_t igb_msix_ring(int irq, void *data)
4195{
4196 struct igb_q_vector *q_vector = data;
4197
4198 /* Write the ITR value calculated from the previous interrupt. */
4199 igb_write_itr(q_vector);
4200
4201 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004202
Auke Kok9d5c8242008-01-24 02:22:38 -08004203 return IRQ_HANDLED;
4204}
4205
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004206#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004207static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004208{
Alexander Duyck047e0032009-10-27 15:49:27 +00004209 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004210 struct e1000_hw *hw = &adapter->hw;
4211 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004212
Alexander Duyck047e0032009-10-27 15:49:27 +00004213 if (q_vector->cpu == cpu)
4214 goto out_no_update;
4215
4216 if (q_vector->tx_ring) {
4217 int q = q_vector->tx_ring->reg_idx;
4218 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4219 if (hw->mac.type == e1000_82575) {
4220 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4221 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4222 } else {
4223 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4224 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4225 E1000_DCA_TXCTRL_CPUID_SHIFT;
4226 }
4227 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4228 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4229 }
4230 if (q_vector->rx_ring) {
4231 int q = q_vector->rx_ring->reg_idx;
4232 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4233 if (hw->mac.type == e1000_82575) {
4234 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4235 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4236 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004237 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004238 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004239 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004240 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004241 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4242 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4243 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4244 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004245 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004246 q_vector->cpu = cpu;
4247out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004248 put_cpu();
4249}
4250
4251static void igb_setup_dca(struct igb_adapter *adapter)
4252{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004253 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004254 int i;
4255
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004256 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004257 return;
4258
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004259 /* Always use CB2 mode, difference is masked in the CB driver. */
4260 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4261
Alexander Duyck047e0032009-10-27 15:49:27 +00004262 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004263 adapter->q_vector[i]->cpu = -1;
4264 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004265 }
4266}
4267
4268static int __igb_notify_dca(struct device *dev, void *data)
4269{
4270 struct net_device *netdev = dev_get_drvdata(dev);
4271 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004272 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004273 struct e1000_hw *hw = &adapter->hw;
4274 unsigned long event = *(unsigned long *)data;
4275
4276 switch (event) {
4277 case DCA_PROVIDER_ADD:
4278 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004279 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004280 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004281 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004282 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004283 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004284 igb_setup_dca(adapter);
4285 break;
4286 }
4287 /* Fall Through since DCA is disabled. */
4288 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004289 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004290 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004291 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004292 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004293 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004294 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004295 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004296 }
4297 break;
4298 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004299
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004300 return 0;
4301}
4302
4303static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4304 void *p)
4305{
4306 int ret_val;
4307
4308 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4309 __igb_notify_dca);
4310
4311 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4312}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004313#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004314
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004315static void igb_ping_all_vfs(struct igb_adapter *adapter)
4316{
4317 struct e1000_hw *hw = &adapter->hw;
4318 u32 ping;
4319 int i;
4320
4321 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4322 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004323 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004324 ping |= E1000_VT_MSGTYPE_CTS;
4325 igb_write_mbx(hw, &ping, 1, i);
4326 }
4327}
4328
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004329static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4330{
4331 struct e1000_hw *hw = &adapter->hw;
4332 u32 vmolr = rd32(E1000_VMOLR(vf));
4333 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4334
4335 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4336 IGB_VF_FLAG_MULTI_PROMISC);
4337 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4338
4339 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4340 vmolr |= E1000_VMOLR_MPME;
4341 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4342 } else {
4343 /*
4344 * if we have hashes and we are clearing a multicast promisc
4345 * flag we need to write the hashes to the MTA as this step
4346 * was previously skipped
4347 */
4348 if (vf_data->num_vf_mc_hashes > 30) {
4349 vmolr |= E1000_VMOLR_MPME;
4350 } else if (vf_data->num_vf_mc_hashes) {
4351 int j;
4352 vmolr |= E1000_VMOLR_ROMPE;
4353 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4354 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4355 }
4356 }
4357
4358 wr32(E1000_VMOLR(vf), vmolr);
4359
4360 /* there are flags left unprocessed, likely not supported */
4361 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4362 return -EINVAL;
4363
4364 return 0;
4365
4366}
4367
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004368static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4369 u32 *msgbuf, u32 vf)
4370{
4371 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4372 u16 *hash_list = (u16 *)&msgbuf[1];
4373 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4374 int i;
4375
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004376 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004377 * to this VF for later use to restore when the PF multi cast
4378 * list changes
4379 */
4380 vf_data->num_vf_mc_hashes = n;
4381
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004382 /* only up to 30 hash values supported */
4383 if (n > 30)
4384 n = 30;
4385
4386 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004387 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004388 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004389
4390 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004391 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004392
4393 return 0;
4394}
4395
4396static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4397{
4398 struct e1000_hw *hw = &adapter->hw;
4399 struct vf_data_storage *vf_data;
4400 int i, j;
4401
4402 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004403 u32 vmolr = rd32(E1000_VMOLR(i));
4404 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4405
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004406 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004407
4408 if ((vf_data->num_vf_mc_hashes > 30) ||
4409 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4410 vmolr |= E1000_VMOLR_MPME;
4411 } else if (vf_data->num_vf_mc_hashes) {
4412 vmolr |= E1000_VMOLR_ROMPE;
4413 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4414 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4415 }
4416 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004417 }
4418}
4419
4420static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4421{
4422 struct e1000_hw *hw = &adapter->hw;
4423 u32 pool_mask, reg, vid;
4424 int i;
4425
4426 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4427
4428 /* Find the vlan filter for this id */
4429 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4430 reg = rd32(E1000_VLVF(i));
4431
4432 /* remove the vf from the pool */
4433 reg &= ~pool_mask;
4434
4435 /* if pool is empty then remove entry from vfta */
4436 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4437 (reg & E1000_VLVF_VLANID_ENABLE)) {
4438 reg = 0;
4439 vid = reg & E1000_VLVF_VLANID_MASK;
4440 igb_vfta_set(hw, vid, false);
4441 }
4442
4443 wr32(E1000_VLVF(i), reg);
4444 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004445
4446 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004447}
4448
4449static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4450{
4451 struct e1000_hw *hw = &adapter->hw;
4452 u32 reg, i;
4453
Alexander Duyck51466232009-10-27 23:47:35 +00004454 /* The vlvf table only exists on 82576 hardware and newer */
4455 if (hw->mac.type < e1000_82576)
4456 return -1;
4457
4458 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004459 if (!adapter->vfs_allocated_count)
4460 return -1;
4461
4462 /* Find the vlan filter for this id */
4463 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4464 reg = rd32(E1000_VLVF(i));
4465 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4466 vid == (reg & E1000_VLVF_VLANID_MASK))
4467 break;
4468 }
4469
4470 if (add) {
4471 if (i == E1000_VLVF_ARRAY_SIZE) {
4472 /* Did not find a matching VLAN ID entry that was
4473 * enabled. Search for a free filter entry, i.e.
4474 * one without the enable bit set
4475 */
4476 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4477 reg = rd32(E1000_VLVF(i));
4478 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4479 break;
4480 }
4481 }
4482 if (i < E1000_VLVF_ARRAY_SIZE) {
4483 /* Found an enabled/available entry */
4484 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4485
4486 /* if !enabled we need to set this up in vfta */
4487 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004488 /* add VID to filter table */
4489 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004490 reg |= E1000_VLVF_VLANID_ENABLE;
4491 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004492 reg &= ~E1000_VLVF_VLANID_MASK;
4493 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004494 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004495
4496 /* do not modify RLPML for PF devices */
4497 if (vf >= adapter->vfs_allocated_count)
4498 return 0;
4499
4500 if (!adapter->vf_data[vf].vlans_enabled) {
4501 u32 size;
4502 reg = rd32(E1000_VMOLR(vf));
4503 size = reg & E1000_VMOLR_RLPML_MASK;
4504 size += 4;
4505 reg &= ~E1000_VMOLR_RLPML_MASK;
4506 reg |= size;
4507 wr32(E1000_VMOLR(vf), reg);
4508 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004509
Alexander Duyck51466232009-10-27 23:47:35 +00004510 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004511 return 0;
4512 }
4513 } else {
4514 if (i < E1000_VLVF_ARRAY_SIZE) {
4515 /* remove vf from the pool */
4516 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4517 /* if pool is empty then remove entry from vfta */
4518 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4519 reg = 0;
4520 igb_vfta_set(hw, vid, false);
4521 }
4522 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004523
4524 /* do not modify RLPML for PF devices */
4525 if (vf >= adapter->vfs_allocated_count)
4526 return 0;
4527
4528 adapter->vf_data[vf].vlans_enabled--;
4529 if (!adapter->vf_data[vf].vlans_enabled) {
4530 u32 size;
4531 reg = rd32(E1000_VMOLR(vf));
4532 size = reg & E1000_VMOLR_RLPML_MASK;
4533 size -= 4;
4534 reg &= ~E1000_VMOLR_RLPML_MASK;
4535 reg |= size;
4536 wr32(E1000_VMOLR(vf), reg);
4537 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004538 }
4539 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004540 return 0;
4541}
4542
4543static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4544{
4545 struct e1000_hw *hw = &adapter->hw;
4546
4547 if (vid)
4548 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4549 else
4550 wr32(E1000_VMVIR(vf), 0);
4551}
4552
4553static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4554 int vf, u16 vlan, u8 qos)
4555{
4556 int err = 0;
4557 struct igb_adapter *adapter = netdev_priv(netdev);
4558
4559 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4560 return -EINVAL;
4561 if (vlan || qos) {
4562 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4563 if (err)
4564 goto out;
4565 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4566 igb_set_vmolr(adapter, vf, !vlan);
4567 adapter->vf_data[vf].pf_vlan = vlan;
4568 adapter->vf_data[vf].pf_qos = qos;
4569 dev_info(&adapter->pdev->dev,
4570 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4571 if (test_bit(__IGB_DOWN, &adapter->state)) {
4572 dev_warn(&adapter->pdev->dev,
4573 "The VF VLAN has been set,"
4574 " but the PF device is not up.\n");
4575 dev_warn(&adapter->pdev->dev,
4576 "Bring the PF device up before"
4577 " attempting to use the VF device.\n");
4578 }
4579 } else {
4580 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4581 false, vf);
4582 igb_set_vmvir(adapter, vlan, vf);
4583 igb_set_vmolr(adapter, vf, true);
4584 adapter->vf_data[vf].pf_vlan = 0;
4585 adapter->vf_data[vf].pf_qos = 0;
4586 }
4587out:
4588 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004589}
4590
4591static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4592{
4593 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4594 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4595
4596 return igb_vlvf_set(adapter, vid, add, vf);
4597}
4598
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004599static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004600{
Williams, Mitch A8151d292010-02-10 01:44:24 +00004601 /* clear flags */
4602 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004603 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004604
4605 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004606 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004607
4608 /* reset vlans for device */
4609 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00004610 if (adapter->vf_data[vf].pf_vlan)
4611 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4612 adapter->vf_data[vf].pf_vlan,
4613 adapter->vf_data[vf].pf_qos);
4614 else
4615 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004616
4617 /* reset multicast table array for vf */
4618 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4619
4620 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004621 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004622}
4623
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004624static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4625{
4626 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4627
4628 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004629 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4630 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004631
4632 /* process remaining reset events */
4633 igb_vf_reset(adapter, vf);
4634}
4635
4636static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004637{
4638 struct e1000_hw *hw = &adapter->hw;
4639 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004640 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004641 u32 reg, msgbuf[3];
4642 u8 *addr = (u8 *)(&msgbuf[1]);
4643
4644 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004645 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004646
4647 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004648 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004649
4650 /* enable transmit and receive for vf */
4651 reg = rd32(E1000_VFTE);
4652 wr32(E1000_VFTE, reg | (1 << vf));
4653 reg = rd32(E1000_VFRE);
4654 wr32(E1000_VFRE, reg | (1 << vf));
4655
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004656 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004657
4658 /* reply to reset with ack and vf mac address */
4659 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4660 memcpy(addr, vf_mac, 6);
4661 igb_write_mbx(hw, msgbuf, 3, vf);
4662}
4663
4664static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4665{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004666 unsigned char *addr = (char *)&msg[1];
4667 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004668
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004669 if (is_valid_ether_addr(addr))
4670 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004671
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004672 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004673}
4674
4675static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4676{
4677 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004678 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004679 u32 msg = E1000_VT_MSGTYPE_NACK;
4680
4681 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004682 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4683 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004684 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004685 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004686 }
4687}
4688
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004689static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004690{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004691 struct pci_dev *pdev = adapter->pdev;
4692 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004693 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004694 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004695 s32 retval;
4696
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004697 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004698
Alexander Duyckfef45f42009-12-11 22:57:34 -08004699 if (retval) {
4700 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004701 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08004702 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4703 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4704 return;
4705 goto out;
4706 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004707
4708 /* this is a message we already processed, do nothing */
4709 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004710 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004711
4712 /*
4713 * until the vf completes a reset it should not be
4714 * allowed to start any configuration.
4715 */
4716
4717 if (msgbuf[0] == E1000_VF_RESET) {
4718 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004719 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004720 }
4721
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004722 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08004723 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4724 return;
4725 retval = -1;
4726 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004727 }
4728
4729 switch ((msgbuf[0] & 0xFFFF)) {
4730 case E1000_VF_SET_MAC_ADDR:
4731 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4732 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004733 case E1000_VF_SET_PROMISC:
4734 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4735 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004736 case E1000_VF_SET_MULTICAST:
4737 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4738 break;
4739 case E1000_VF_SET_LPE:
4740 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4741 break;
4742 case E1000_VF_SET_VLAN:
Williams, Mitch A8151d292010-02-10 01:44:24 +00004743 if (adapter->vf_data[vf].pf_vlan)
4744 retval = -1;
4745 else
4746 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004747 break;
4748 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00004749 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004750 retval = -1;
4751 break;
4752 }
4753
Alexander Duyckfef45f42009-12-11 22:57:34 -08004754 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4755out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004756 /* notify the VF of the results of what it sent us */
4757 if (retval)
4758 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4759 else
4760 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4761
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004762 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004763}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004764
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004765static void igb_msg_task(struct igb_adapter *adapter)
4766{
4767 struct e1000_hw *hw = &adapter->hw;
4768 u32 vf;
4769
4770 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4771 /* process any reset requests */
4772 if (!igb_check_for_rst(hw, vf))
4773 igb_vf_reset_event(adapter, vf);
4774
4775 /* process any messages pending */
4776 if (!igb_check_for_msg(hw, vf))
4777 igb_rcv_msg_from_vf(adapter, vf);
4778
4779 /* process any acks */
4780 if (!igb_check_for_ack(hw, vf))
4781 igb_rcv_ack_from_vf(adapter, vf);
4782 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004783}
4784
Auke Kok9d5c8242008-01-24 02:22:38 -08004785/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004786 * igb_set_uta - Set unicast filter table address
4787 * @adapter: board private structure
4788 *
4789 * The unicast table address is a register array of 32-bit registers.
4790 * The table is meant to be used in a way similar to how the MTA is used
4791 * however due to certain limitations in the hardware it is necessary to
4792 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4793 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4794 **/
4795static void igb_set_uta(struct igb_adapter *adapter)
4796{
4797 struct e1000_hw *hw = &adapter->hw;
4798 int i;
4799
4800 /* The UTA table only exists on 82576 hardware and newer */
4801 if (hw->mac.type < e1000_82576)
4802 return;
4803
4804 /* we only need to do this if VMDq is enabled */
4805 if (!adapter->vfs_allocated_count)
4806 return;
4807
4808 for (i = 0; i < hw->mac.uta_reg_count; i++)
4809 array_wr32(E1000_UTA, i, ~0);
4810}
4811
4812/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004813 * igb_intr_msi - Interrupt Handler
4814 * @irq: interrupt number
4815 * @data: pointer to a network interface device structure
4816 **/
4817static irqreturn_t igb_intr_msi(int irq, void *data)
4818{
Alexander Duyck047e0032009-10-27 15:49:27 +00004819 struct igb_adapter *adapter = data;
4820 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004821 struct e1000_hw *hw = &adapter->hw;
4822 /* read ICR disables interrupts using IAM */
4823 u32 icr = rd32(E1000_ICR);
4824
Alexander Duyck047e0032009-10-27 15:49:27 +00004825 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004826
Alexander Duyck7f081d42010-01-07 17:41:00 +00004827 if (icr & E1000_ICR_DRSTA)
4828 schedule_work(&adapter->reset_task);
4829
Alexander Duyck047e0032009-10-27 15:49:27 +00004830 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004831 /* HW is reporting DMA is out of sync */
4832 adapter->stats.doosync++;
4833 }
4834
Auke Kok9d5c8242008-01-24 02:22:38 -08004835 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4836 hw->mac.get_link_status = 1;
4837 if (!test_bit(__IGB_DOWN, &adapter->state))
4838 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4839 }
4840
Alexander Duyck047e0032009-10-27 15:49:27 +00004841 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004842
4843 return IRQ_HANDLED;
4844}
4845
4846/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004847 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004848 * @irq: interrupt number
4849 * @data: pointer to a network interface device structure
4850 **/
4851static irqreturn_t igb_intr(int irq, void *data)
4852{
Alexander Duyck047e0032009-10-27 15:49:27 +00004853 struct igb_adapter *adapter = data;
4854 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004855 struct e1000_hw *hw = &adapter->hw;
4856 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4857 * need for the IMC write */
4858 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004859 if (!icr)
4860 return IRQ_NONE; /* Not our interrupt */
4861
Alexander Duyck047e0032009-10-27 15:49:27 +00004862 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004863
4864 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4865 * not set, then the adapter didn't send an interrupt */
4866 if (!(icr & E1000_ICR_INT_ASSERTED))
4867 return IRQ_NONE;
4868
Alexander Duyck7f081d42010-01-07 17:41:00 +00004869 if (icr & E1000_ICR_DRSTA)
4870 schedule_work(&adapter->reset_task);
4871
Alexander Duyck047e0032009-10-27 15:49:27 +00004872 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004873 /* HW is reporting DMA is out of sync */
4874 adapter->stats.doosync++;
4875 }
4876
Auke Kok9d5c8242008-01-24 02:22:38 -08004877 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4878 hw->mac.get_link_status = 1;
4879 /* guard against interrupt when we're going down */
4880 if (!test_bit(__IGB_DOWN, &adapter->state))
4881 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4882 }
4883
Alexander Duyck047e0032009-10-27 15:49:27 +00004884 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004885
4886 return IRQ_HANDLED;
4887}
4888
Alexander Duyck047e0032009-10-27 15:49:27 +00004889static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004890{
Alexander Duyck047e0032009-10-27 15:49:27 +00004891 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004892 struct e1000_hw *hw = &adapter->hw;
4893
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004894 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4895 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004896 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004897 igb_set_itr(adapter);
4898 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004899 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004900 }
4901
4902 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4903 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004904 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004905 else
4906 igb_irq_enable(adapter);
4907 }
4908}
4909
Auke Kok9d5c8242008-01-24 02:22:38 -08004910/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004911 * igb_poll - NAPI Rx polling callback
4912 * @napi: napi polling structure
4913 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004914 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004915static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004916{
Alexander Duyck047e0032009-10-27 15:49:27 +00004917 struct igb_q_vector *q_vector = container_of(napi,
4918 struct igb_q_vector,
4919 napi);
4920 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004921
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004922#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004923 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4924 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004925#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004926 if (q_vector->tx_ring)
4927 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004928
Alexander Duyck047e0032009-10-27 15:49:27 +00004929 if (q_vector->rx_ring)
4930 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4931
4932 if (!tx_clean_complete)
4933 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004934
Alexander Duyck46544252009-02-19 20:39:04 -08004935 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004936 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004937 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004938 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004939 }
4940
4941 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004942}
Al Viro6d8126f2008-03-16 22:23:24 +00004943
Auke Kok9d5c8242008-01-24 02:22:38 -08004944/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004945 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004946 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004947 * @shhwtstamps: timestamp structure to update
4948 * @regval: unsigned 64bit system time value.
4949 *
4950 * We need to convert the system time value stored in the RX/TXSTMP registers
4951 * into a hwtstamp which can be used by the upper level timestamping functions
4952 */
4953static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4954 struct skb_shared_hwtstamps *shhwtstamps,
4955 u64 regval)
4956{
4957 u64 ns;
4958
Alexander Duyck55cac242009-11-19 12:42:21 +00004959 /*
4960 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4961 * 24 to match clock shift we setup earlier.
4962 */
4963 if (adapter->hw.mac.type == e1000_82580)
4964 regval <<= IGB_82580_TSYNC_SHIFT;
4965
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004966 ns = timecounter_cyc2time(&adapter->clock, regval);
4967 timecompare_update(&adapter->compare, ns);
4968 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4969 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4970 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4971}
4972
4973/**
4974 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4975 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004976 * @skb: packet that was just sent
4977 *
4978 * If we were asked to do hardware stamping and such a time stamp is
4979 * available, then it must have been for this skb here because we only
4980 * allow only one such packet into the queue.
4981 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004982static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004983{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004984 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004985 union skb_shared_tx *shtx = skb_tx(skb);
4986 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004987 struct skb_shared_hwtstamps shhwtstamps;
4988 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004989
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004990 /* if skb does not support hw timestamp or TX stamp not valid exit */
4991 if (likely(!shtx->hardware) ||
4992 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4993 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004994
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004995 regval = rd32(E1000_TXSTMPL);
4996 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4997
4998 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4999 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005000}
5001
5002/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005003 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005004 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005005 * returns true if ring is completely cleaned
5006 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005007static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005008{
Alexander Duyck047e0032009-10-27 15:49:27 +00005009 struct igb_adapter *adapter = q_vector->adapter;
5010 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005011 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005012 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005013 struct igb_buffer *buffer_info;
5014 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005015 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005016 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005017 unsigned int i, eop, count = 0;
5018 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005019
Auke Kok9d5c8242008-01-24 02:22:38 -08005020 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005021 eop = tx_ring->buffer_info[i].next_to_watch;
5022 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5023
5024 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5025 (count < tx_ring->count)) {
5026 for (cleaned = false; !cleaned; count++) {
5027 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005028 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005029 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005030 skb = buffer_info->skb;
5031
5032 if (skb) {
5033 unsigned int segs, bytecount;
5034 /* gso_segs is currently only valid for tcp */
5035 segs = skb_shinfo(skb)->gso_segs ?: 1;
5036 /* multiply data chunks by size of headers */
5037 bytecount = ((segs - 1) * skb_headlen(skb)) +
5038 skb->len;
5039 total_packets += segs;
5040 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005041
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005042 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005043 }
5044
Alexander Duyck80785292009-10-27 15:51:47 +00005045 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005046 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005047
5048 i++;
5049 if (i == tx_ring->count)
5050 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005051 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005052 eop = tx_ring->buffer_info[i].next_to_watch;
5053 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5054 }
5055
Auke Kok9d5c8242008-01-24 02:22:38 -08005056 tx_ring->next_to_clean = i;
5057
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005058 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005059 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005060 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005061 /* Make sure that anybody stopping the queue after this
5062 * sees the new next_to_clean.
5063 */
5064 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005065 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5066 !(test_bit(__IGB_DOWN, &adapter->state))) {
5067 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005068 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005069 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005070 }
5071
5072 if (tx_ring->detect_tx_hung) {
5073 /* Detect a transmit hang in hardware, this serializes the
5074 * check with the clearing of time_stamp and movement of i */
5075 tx_ring->detect_tx_hung = false;
5076 if (tx_ring->buffer_info[i].time_stamp &&
5077 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005078 (adapter->tx_timeout_factor * HZ)) &&
5079 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005080
Auke Kok9d5c8242008-01-24 02:22:38 -08005081 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00005082 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005083 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005084 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005085 " TDH <%x>\n"
5086 " TDT <%x>\n"
5087 " next_to_use <%x>\n"
5088 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005089 "buffer_info[next_to_clean]\n"
5090 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005091 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005092 " jiffies <%lx>\n"
5093 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005094 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005095 readl(tx_ring->head),
5096 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005097 tx_ring->next_to_use,
5098 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005099 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005100 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005101 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005102 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005103 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005104 }
5105 }
5106 tx_ring->total_bytes += total_bytes;
5107 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07005108 tx_ring->tx_stats.bytes += total_bytes;
5109 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005110 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005111}
5112
Auke Kok9d5c8242008-01-24 02:22:38 -08005113/**
5114 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005115 * @q_vector: structure containing interrupt and ring information
5116 * @skb: packet to send up
5117 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005118 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005119static void igb_receive_skb(struct igb_q_vector *q_vector,
5120 struct sk_buff *skb,
5121 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005122{
Alexander Duyck047e0032009-10-27 15:49:27 +00005123 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005124
Alexander Duyck047e0032009-10-27 15:49:27 +00005125 if (vlan_tag)
5126 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5127 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005128 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005129 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005130}
5131
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005132static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005133 u32 status_err, struct sk_buff *skb)
5134{
5135 skb->ip_summed = CHECKSUM_NONE;
5136
5137 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005138 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5139 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005140 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005141
Auke Kok9d5c8242008-01-24 02:22:38 -08005142 /* TCP/UDP checksum error bit is set */
5143 if (status_err &
5144 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005145 /*
5146 * work around errata with sctp packets where the TCPE aka
5147 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5148 * packets, (aka let the stack check the crc32c)
5149 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005150 if ((skb->len == 60) &&
5151 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005152 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005153
Auke Kok9d5c8242008-01-24 02:22:38 -08005154 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005155 return;
5156 }
5157 /* It must be a TCP or UDP packet with a valid checksum */
5158 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5159 skb->ip_summed = CHECKSUM_UNNECESSARY;
5160
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005161 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005162}
5163
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005164static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5165 struct sk_buff *skb)
5166{
5167 struct igb_adapter *adapter = q_vector->adapter;
5168 struct e1000_hw *hw = &adapter->hw;
5169 u64 regval;
5170
5171 /*
5172 * If this bit is set, then the RX registers contain the time stamp. No
5173 * other packet will be time stamped until we read these registers, so
5174 * read the registers to make them available again. Because only one
5175 * packet can be time stamped at a time, we know that the register
5176 * values must belong to this one here and therefore we don't need to
5177 * compare any of the additional attributes stored for it.
5178 *
5179 * If nothing went wrong, then it should have a skb_shared_tx that we
5180 * can turn into a skb_shared_hwtstamps.
5181 */
5182 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5183 return;
5184 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5185 return;
5186
5187 regval = rd32(E1000_RXSTMPL);
5188 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5189
5190 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5191}
Alexander Duyck4c844852009-10-27 15:52:07 +00005192static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005193 union e1000_adv_rx_desc *rx_desc)
5194{
5195 /* HW will not DMA in data larger than the given buffer, even if it
5196 * parses the (NFS, of course) header to be larger. In that case, it
5197 * fills the header buffer and spills the rest into the page.
5198 */
5199 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5200 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005201 if (hlen > rx_ring->rx_buffer_len)
5202 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005203 return hlen;
5204}
5205
Alexander Duyck047e0032009-10-27 15:49:27 +00005206static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5207 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005208{
Alexander Duyck047e0032009-10-27 15:49:27 +00005209 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005210 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00005211 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005212 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5213 struct igb_buffer *buffer_info , *next_buffer;
5214 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005215 bool cleaned = false;
5216 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005217 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005218 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005219 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005220 u32 staterr;
5221 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005222 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005223
5224 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005225 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005226 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5227 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5228
5229 while (staterr & E1000_RXD_STAT_DD) {
5230 if (*work_done >= budget)
5231 break;
5232 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005233
5234 skb = buffer_info->skb;
5235 prefetch(skb->data - NET_IP_ALIGN);
5236 buffer_info->skb = NULL;
5237
5238 i++;
5239 if (i == rx_ring->count)
5240 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005241
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005242 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5243 prefetch(next_rxd);
5244 next_buffer = &rx_ring->buffer_info[i];
5245
5246 length = le16_to_cpu(rx_desc->wb.upper.length);
5247 cleaned = true;
5248 cleaned_count++;
5249
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005250 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005251 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005252 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005253 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005254 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005255 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005256 skb_put(skb, length);
5257 goto send_up;
5258 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005259 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005260 }
5261
5262 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005263 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005264 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005265 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005266
5267 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5268 buffer_info->page,
5269 buffer_info->page_offset,
5270 length);
5271
Alexander Duyckd1eff352009-11-12 18:38:35 +00005272 if ((page_count(buffer_info->page) != 1) ||
5273 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005274 buffer_info->page = NULL;
5275 else
5276 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005277
5278 skb->len += length;
5279 skb->data_len += length;
5280 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005281 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005282
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005283 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005284 buffer_info->skb = next_buffer->skb;
5285 buffer_info->dma = next_buffer->dma;
5286 next_buffer->skb = skb;
5287 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005288 goto next_desc;
5289 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005290send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005291 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5292 dev_kfree_skb_irq(skb);
5293 goto next_desc;
5294 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005295
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005296 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005297 total_bytes += skb->len;
5298 total_packets++;
5299
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005300 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005301
5302 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005303 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005304
Alexander Duyck047e0032009-10-27 15:49:27 +00005305 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5306 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5307
5308 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005309
Auke Kok9d5c8242008-01-24 02:22:38 -08005310next_desc:
5311 rx_desc->wb.upper.status_error = 0;
5312
5313 /* return some buffers to hardware, one at a time is too slow */
5314 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005315 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005316 cleaned_count = 0;
5317 }
5318
5319 /* use prefetched values */
5320 rx_desc = next_rxd;
5321 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005322 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5323 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005324
Auke Kok9d5c8242008-01-24 02:22:38 -08005325 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005326 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005327
5328 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005329 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005330
5331 rx_ring->total_packets += total_packets;
5332 rx_ring->total_bytes += total_bytes;
5333 rx_ring->rx_stats.packets += total_packets;
5334 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005335 return cleaned;
5336}
5337
Auke Kok9d5c8242008-01-24 02:22:38 -08005338/**
5339 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5340 * @adapter: address of board private structure
5341 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005342void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005343{
Alexander Duycke694e962009-10-27 15:53:06 +00005344 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005345 union e1000_adv_rx_desc *rx_desc;
5346 struct igb_buffer *buffer_info;
5347 struct sk_buff *skb;
5348 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005349 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005350
5351 i = rx_ring->next_to_use;
5352 buffer_info = &rx_ring->buffer_info[i];
5353
Alexander Duyck4c844852009-10-27 15:52:07 +00005354 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005355
Auke Kok9d5c8242008-01-24 02:22:38 -08005356 while (cleaned_count--) {
5357 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5358
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005359 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005360 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005361 buffer_info->page = netdev_alloc_page(netdev);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005362 if (!buffer_info->page) {
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005363 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005364 goto no_buffers;
5365 }
5366 buffer_info->page_offset = 0;
5367 } else {
5368 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005369 }
5370 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005371 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005372 buffer_info->page_offset,
5373 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005374 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005375 if (pci_dma_mapping_error(rx_ring->pdev,
5376 buffer_info->page_dma)) {
5377 buffer_info->page_dma = 0;
5378 rx_ring->rx_stats.alloc_failed++;
5379 goto no_buffers;
5380 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005381 }
5382
Alexander Duyck42d07812009-10-27 23:51:16 +00005383 skb = buffer_info->skb;
5384 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005385 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005386 if (!skb) {
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005387 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005388 goto no_buffers;
5389 }
5390
Auke Kok9d5c8242008-01-24 02:22:38 -08005391 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005392 }
5393 if (!buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00005394 buffer_info->dma = pci_map_single(rx_ring->pdev,
5395 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005396 bufsz,
5397 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005398 if (pci_dma_mapping_error(rx_ring->pdev,
5399 buffer_info->dma)) {
5400 buffer_info->dma = 0;
5401 rx_ring->rx_stats.alloc_failed++;
5402 goto no_buffers;
5403 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005404 }
5405 /* Refresh the desc even if buffer_addrs didn't change because
5406 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005407 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005408 rx_desc->read.pkt_addr =
5409 cpu_to_le64(buffer_info->page_dma);
5410 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5411 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005412 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005413 rx_desc->read.hdr_addr = 0;
5414 }
5415
5416 i++;
5417 if (i == rx_ring->count)
5418 i = 0;
5419 buffer_info = &rx_ring->buffer_info[i];
5420 }
5421
5422no_buffers:
5423 if (rx_ring->next_to_use != i) {
5424 rx_ring->next_to_use = i;
5425 if (i == 0)
5426 i = (rx_ring->count - 1);
5427 else
5428 i--;
5429
5430 /* Force memory writes to complete before letting h/w
5431 * know there are new descriptors to fetch. (Only
5432 * applicable for weak-ordered memory model archs,
5433 * such as IA-64). */
5434 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005435 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005436 }
5437}
5438
5439/**
5440 * igb_mii_ioctl -
5441 * @netdev:
5442 * @ifreq:
5443 * @cmd:
5444 **/
5445static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5446{
5447 struct igb_adapter *adapter = netdev_priv(netdev);
5448 struct mii_ioctl_data *data = if_mii(ifr);
5449
5450 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5451 return -EOPNOTSUPP;
5452
5453 switch (cmd) {
5454 case SIOCGMIIPHY:
5455 data->phy_id = adapter->hw.phy.addr;
5456 break;
5457 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005458 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5459 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005460 return -EIO;
5461 break;
5462 case SIOCSMIIREG:
5463 default:
5464 return -EOPNOTSUPP;
5465 }
5466 return 0;
5467}
5468
5469/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005470 * igb_hwtstamp_ioctl - control hardware time stamping
5471 * @netdev:
5472 * @ifreq:
5473 * @cmd:
5474 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005475 * Outgoing time stamping can be enabled and disabled. Play nice and
5476 * disable it when requested, although it shouldn't case any overhead
5477 * when no packet needs it. At most one packet in the queue may be
5478 * marked for time stamping, otherwise it would be impossible to tell
5479 * for sure to which packet the hardware time stamp belongs.
5480 *
5481 * Incoming time stamping has to be configured via the hardware
5482 * filters. Not all combinations are supported, in particular event
5483 * type has to be specified. Matching the kind of event packet is
5484 * not supported, with the exception of "all V2 events regardless of
5485 * level 2 or 4".
5486 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005487 **/
5488static int igb_hwtstamp_ioctl(struct net_device *netdev,
5489 struct ifreq *ifr, int cmd)
5490{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005491 struct igb_adapter *adapter = netdev_priv(netdev);
5492 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005493 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005494 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5495 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005496 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005497 bool is_l4 = false;
5498 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005499 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005500
5501 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5502 return -EFAULT;
5503
5504 /* reserved for future extensions */
5505 if (config.flags)
5506 return -EINVAL;
5507
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005508 switch (config.tx_type) {
5509 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005510 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005511 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005512 break;
5513 default:
5514 return -ERANGE;
5515 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005516
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005517 switch (config.rx_filter) {
5518 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005519 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005520 break;
5521 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5522 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5523 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5524 case HWTSTAMP_FILTER_ALL:
5525 /*
5526 * register TSYNCRXCFG must be set, therefore it is not
5527 * possible to time stamp both Sync and Delay_Req messages
5528 * => fall back to time stamping all packets
5529 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005530 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005531 config.rx_filter = HWTSTAMP_FILTER_ALL;
5532 break;
5533 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005534 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005535 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005536 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005537 break;
5538 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005539 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005540 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005541 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005542 break;
5543 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5544 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005545 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005546 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005547 is_l2 = true;
5548 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005549 config.rx_filter = HWTSTAMP_FILTER_SOME;
5550 break;
5551 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5552 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005553 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005554 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005555 is_l2 = true;
5556 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005557 config.rx_filter = HWTSTAMP_FILTER_SOME;
5558 break;
5559 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5560 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5561 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005562 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005563 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005564 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005565 break;
5566 default:
5567 return -ERANGE;
5568 }
5569
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005570 if (hw->mac.type == e1000_82575) {
5571 if (tsync_rx_ctl | tsync_tx_ctl)
5572 return -EINVAL;
5573 return 0;
5574 }
5575
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005576 /* enable/disable TX */
5577 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005578 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5579 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005580 wr32(E1000_TSYNCTXCTL, regval);
5581
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005582 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005583 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005584 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5585 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005586 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005587
5588 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005589 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5590
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005591 /* define ethertype filter for timestamped packets */
5592 if (is_l2)
5593 wr32(E1000_ETQF(3),
5594 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5595 E1000_ETQF_1588 | /* enable timestamping */
5596 ETH_P_1588)); /* 1588 eth protocol type */
5597 else
5598 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005599
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005600#define PTP_PORT 319
5601 /* L4 Queue Filter[3]: filter by destination port and protocol */
5602 if (is_l4) {
5603 u32 ftqf = (IPPROTO_UDP /* UDP */
5604 | E1000_FTQF_VF_BP /* VF not compared */
5605 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5606 | E1000_FTQF_MASK); /* mask all inputs */
5607 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005608
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005609 wr32(E1000_IMIR(3), htons(PTP_PORT));
5610 wr32(E1000_IMIREXT(3),
5611 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5612 if (hw->mac.type == e1000_82576) {
5613 /* enable source port check */
5614 wr32(E1000_SPQF(3), htons(PTP_PORT));
5615 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5616 }
5617 wr32(E1000_FTQF(3), ftqf);
5618 } else {
5619 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5620 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005621 wrfl();
5622
5623 adapter->hwtstamp_config = config;
5624
5625 /* clear TX/RX time stamp registers, just to be sure */
5626 regval = rd32(E1000_TXSTMPH);
5627 regval = rd32(E1000_RXSTMPH);
5628
5629 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5630 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005631}
5632
5633/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005634 * igb_ioctl -
5635 * @netdev:
5636 * @ifreq:
5637 * @cmd:
5638 **/
5639static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5640{
5641 switch (cmd) {
5642 case SIOCGMIIPHY:
5643 case SIOCGMIIREG:
5644 case SIOCSMIIREG:
5645 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005646 case SIOCSHWTSTAMP:
5647 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005648 default:
5649 return -EOPNOTSUPP;
5650 }
5651}
5652
Alexander Duyck009bc062009-07-23 18:08:35 +00005653s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5654{
5655 struct igb_adapter *adapter = hw->back;
5656 u16 cap_offset;
5657
5658 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5659 if (!cap_offset)
5660 return -E1000_ERR_CONFIG;
5661
5662 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5663
5664 return 0;
5665}
5666
5667s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5668{
5669 struct igb_adapter *adapter = hw->back;
5670 u16 cap_offset;
5671
5672 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5673 if (!cap_offset)
5674 return -E1000_ERR_CONFIG;
5675
5676 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5677
5678 return 0;
5679}
5680
Auke Kok9d5c8242008-01-24 02:22:38 -08005681static void igb_vlan_rx_register(struct net_device *netdev,
5682 struct vlan_group *grp)
5683{
5684 struct igb_adapter *adapter = netdev_priv(netdev);
5685 struct e1000_hw *hw = &adapter->hw;
5686 u32 ctrl, rctl;
5687
5688 igb_irq_disable(adapter);
5689 adapter->vlgrp = grp;
5690
5691 if (grp) {
5692 /* enable VLAN tag insert/strip */
5693 ctrl = rd32(E1000_CTRL);
5694 ctrl |= E1000_CTRL_VME;
5695 wr32(E1000_CTRL, ctrl);
5696
Alexander Duyck51466232009-10-27 23:47:35 +00005697 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005698 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005699 rctl &= ~E1000_RCTL_CFIEN;
5700 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005701 } else {
5702 /* disable VLAN tag insert/strip */
5703 ctrl = rd32(E1000_CTRL);
5704 ctrl &= ~E1000_CTRL_VME;
5705 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005706 }
5707
Alexander Duycke1739522009-02-19 20:39:44 -08005708 igb_rlpml_set(adapter);
5709
Auke Kok9d5c8242008-01-24 02:22:38 -08005710 if (!test_bit(__IGB_DOWN, &adapter->state))
5711 igb_irq_enable(adapter);
5712}
5713
5714static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5715{
5716 struct igb_adapter *adapter = netdev_priv(netdev);
5717 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005718 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005719
Alexander Duyck51466232009-10-27 23:47:35 +00005720 /* attempt to add filter to vlvf array */
5721 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005722
Alexander Duyck51466232009-10-27 23:47:35 +00005723 /* add the filter since PF can receive vlans w/o entry in vlvf */
5724 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005725}
5726
5727static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5728{
5729 struct igb_adapter *adapter = netdev_priv(netdev);
5730 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005731 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005732 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005733
5734 igb_irq_disable(adapter);
5735 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5736
5737 if (!test_bit(__IGB_DOWN, &adapter->state))
5738 igb_irq_enable(adapter);
5739
Alexander Duyck51466232009-10-27 23:47:35 +00005740 /* remove vlan from VLVF table array */
5741 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005742
Alexander Duyck51466232009-10-27 23:47:35 +00005743 /* if vid was not present in VLVF just remove it from table */
5744 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005745 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005746}
5747
5748static void igb_restore_vlan(struct igb_adapter *adapter)
5749{
5750 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5751
5752 if (adapter->vlgrp) {
5753 u16 vid;
5754 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5755 if (!vlan_group_get_device(adapter->vlgrp, vid))
5756 continue;
5757 igb_vlan_rx_add_vid(adapter->netdev, vid);
5758 }
5759 }
5760}
5761
5762int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5763{
Alexander Duyck090b1792009-10-27 23:51:55 +00005764 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005765 struct e1000_mac_info *mac = &adapter->hw.mac;
5766
5767 mac->autoneg = 0;
5768
Auke Kok9d5c8242008-01-24 02:22:38 -08005769 switch (spddplx) {
5770 case SPEED_10 + DUPLEX_HALF:
5771 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5772 break;
5773 case SPEED_10 + DUPLEX_FULL:
5774 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5775 break;
5776 case SPEED_100 + DUPLEX_HALF:
5777 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5778 break;
5779 case SPEED_100 + DUPLEX_FULL:
5780 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5781 break;
5782 case SPEED_1000 + DUPLEX_FULL:
5783 mac->autoneg = 1;
5784 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5785 break;
5786 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5787 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005788 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005789 return -EINVAL;
5790 }
5791 return 0;
5792}
5793
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005794static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005795{
5796 struct net_device *netdev = pci_get_drvdata(pdev);
5797 struct igb_adapter *adapter = netdev_priv(netdev);
5798 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005799 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005800 u32 wufc = adapter->wol;
5801#ifdef CONFIG_PM
5802 int retval = 0;
5803#endif
5804
5805 netif_device_detach(netdev);
5806
Alexander Duycka88f10e2008-07-08 15:13:38 -07005807 if (netif_running(netdev))
5808 igb_close(netdev);
5809
Alexander Duyck047e0032009-10-27 15:49:27 +00005810 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005811
5812#ifdef CONFIG_PM
5813 retval = pci_save_state(pdev);
5814 if (retval)
5815 return retval;
5816#endif
5817
5818 status = rd32(E1000_STATUS);
5819 if (status & E1000_STATUS_LU)
5820 wufc &= ~E1000_WUFC_LNKC;
5821
5822 if (wufc) {
5823 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005824 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005825
5826 /* turn on all-multi mode if wake on multicast is enabled */
5827 if (wufc & E1000_WUFC_MC) {
5828 rctl = rd32(E1000_RCTL);
5829 rctl |= E1000_RCTL_MPE;
5830 wr32(E1000_RCTL, rctl);
5831 }
5832
5833 ctrl = rd32(E1000_CTRL);
5834 /* advertise wake from D3Cold */
5835 #define E1000_CTRL_ADVD3WUC 0x00100000
5836 /* phy power management enable */
5837 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5838 ctrl |= E1000_CTRL_ADVD3WUC;
5839 wr32(E1000_CTRL, ctrl);
5840
Auke Kok9d5c8242008-01-24 02:22:38 -08005841 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00005842 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005843
5844 wr32(E1000_WUC, E1000_WUC_PME_EN);
5845 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005846 } else {
5847 wr32(E1000_WUC, 0);
5848 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005849 }
5850
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005851 *enable_wake = wufc || adapter->en_mng_pt;
5852 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00005853 igb_power_down_link(adapter);
5854 else
5855 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005856
5857 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5858 * would have already happened in close and is redundant. */
5859 igb_release_hw_control(adapter);
5860
5861 pci_disable_device(pdev);
5862
Auke Kok9d5c8242008-01-24 02:22:38 -08005863 return 0;
5864}
5865
5866#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005867static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5868{
5869 int retval;
5870 bool wake;
5871
5872 retval = __igb_shutdown(pdev, &wake);
5873 if (retval)
5874 return retval;
5875
5876 if (wake) {
5877 pci_prepare_to_sleep(pdev);
5878 } else {
5879 pci_wake_from_d3(pdev, false);
5880 pci_set_power_state(pdev, PCI_D3hot);
5881 }
5882
5883 return 0;
5884}
5885
Auke Kok9d5c8242008-01-24 02:22:38 -08005886static int igb_resume(struct pci_dev *pdev)
5887{
5888 struct net_device *netdev = pci_get_drvdata(pdev);
5889 struct igb_adapter *adapter = netdev_priv(netdev);
5890 struct e1000_hw *hw = &adapter->hw;
5891 u32 err;
5892
5893 pci_set_power_state(pdev, PCI_D0);
5894 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00005895 pci_save_state(pdev);
Taku Izumi42bfd332008-06-20 12:10:30 +09005896
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005897 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005898 if (err) {
5899 dev_err(&pdev->dev,
5900 "igb: Cannot enable PCI device from suspend\n");
5901 return err;
5902 }
5903 pci_set_master(pdev);
5904
5905 pci_enable_wake(pdev, PCI_D3hot, 0);
5906 pci_enable_wake(pdev, PCI_D3cold, 0);
5907
Alexander Duyck047e0032009-10-27 15:49:27 +00005908 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005909 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5910 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005911 }
5912
Auke Kok9d5c8242008-01-24 02:22:38 -08005913 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005914
5915 /* let the f/w know that the h/w is now under the control of the
5916 * driver. */
5917 igb_get_hw_control(adapter);
5918
Auke Kok9d5c8242008-01-24 02:22:38 -08005919 wr32(E1000_WUS, ~0);
5920
Alexander Duycka88f10e2008-07-08 15:13:38 -07005921 if (netif_running(netdev)) {
5922 err = igb_open(netdev);
5923 if (err)
5924 return err;
5925 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005926
5927 netif_device_attach(netdev);
5928
Auke Kok9d5c8242008-01-24 02:22:38 -08005929 return 0;
5930}
5931#endif
5932
5933static void igb_shutdown(struct pci_dev *pdev)
5934{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005935 bool wake;
5936
5937 __igb_shutdown(pdev, &wake);
5938
5939 if (system_state == SYSTEM_POWER_OFF) {
5940 pci_wake_from_d3(pdev, wake);
5941 pci_set_power_state(pdev, PCI_D3hot);
5942 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005943}
5944
5945#ifdef CONFIG_NET_POLL_CONTROLLER
5946/*
5947 * Polling 'interrupt' - used by things like netconsole to send skbs
5948 * without having to re-enable interrupts. It's not called while
5949 * the interrupt routine is executing.
5950 */
5951static void igb_netpoll(struct net_device *netdev)
5952{
5953 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005954 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005955 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005956
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005957 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005958 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005959 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005960 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005961 return;
5962 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005963
Alexander Duyck047e0032009-10-27 15:49:27 +00005964 for (i = 0; i < adapter->num_q_vectors; i++) {
5965 struct igb_q_vector *q_vector = adapter->q_vector[i];
5966 wr32(E1000_EIMC, q_vector->eims_value);
5967 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005968 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005969}
5970#endif /* CONFIG_NET_POLL_CONTROLLER */
5971
5972/**
5973 * igb_io_error_detected - called when PCI error is detected
5974 * @pdev: Pointer to PCI device
5975 * @state: The current pci connection state
5976 *
5977 * This function is called after a PCI bus error affecting
5978 * this device has been detected.
5979 */
5980static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5981 pci_channel_state_t state)
5982{
5983 struct net_device *netdev = pci_get_drvdata(pdev);
5984 struct igb_adapter *adapter = netdev_priv(netdev);
5985
5986 netif_device_detach(netdev);
5987
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005988 if (state == pci_channel_io_perm_failure)
5989 return PCI_ERS_RESULT_DISCONNECT;
5990
Auke Kok9d5c8242008-01-24 02:22:38 -08005991 if (netif_running(netdev))
5992 igb_down(adapter);
5993 pci_disable_device(pdev);
5994
5995 /* Request a slot slot reset. */
5996 return PCI_ERS_RESULT_NEED_RESET;
5997}
5998
5999/**
6000 * igb_io_slot_reset - called after the pci bus has been reset.
6001 * @pdev: Pointer to PCI device
6002 *
6003 * Restart the card from scratch, as if from a cold-boot. Implementation
6004 * resembles the first-half of the igb_resume routine.
6005 */
6006static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6007{
6008 struct net_device *netdev = pci_get_drvdata(pdev);
6009 struct igb_adapter *adapter = netdev_priv(netdev);
6010 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006011 pci_ers_result_t result;
Taku Izumi42bfd332008-06-20 12:10:30 +09006012 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006013
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006014 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006015 dev_err(&pdev->dev,
6016 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006017 result = PCI_ERS_RESULT_DISCONNECT;
6018 } else {
6019 pci_set_master(pdev);
6020 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006021 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006022
6023 pci_enable_wake(pdev, PCI_D3hot, 0);
6024 pci_enable_wake(pdev, PCI_D3cold, 0);
6025
6026 igb_reset(adapter);
6027 wr32(E1000_WUS, ~0);
6028 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006029 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006030
Jeff Kirsherea943d42008-12-11 20:34:19 -08006031 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6032 if (err) {
6033 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6034 "failed 0x%0x\n", err);
6035 /* non-fatal, continue */
6036 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006037
Alexander Duyck40a914f2008-11-27 00:24:37 -08006038 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006039}
6040
6041/**
6042 * igb_io_resume - called when traffic can start flowing again.
6043 * @pdev: Pointer to PCI device
6044 *
6045 * This callback is called when the error recovery driver tells us that
6046 * its OK to resume normal operation. Implementation resembles the
6047 * second-half of the igb_resume routine.
6048 */
6049static void igb_io_resume(struct pci_dev *pdev)
6050{
6051 struct net_device *netdev = pci_get_drvdata(pdev);
6052 struct igb_adapter *adapter = netdev_priv(netdev);
6053
Auke Kok9d5c8242008-01-24 02:22:38 -08006054 if (netif_running(netdev)) {
6055 if (igb_up(adapter)) {
6056 dev_err(&pdev->dev, "igb_up failed after reset\n");
6057 return;
6058 }
6059 }
6060
6061 netif_device_attach(netdev);
6062
6063 /* let the f/w know that the h/w is now under the control of the
6064 * driver. */
6065 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006066}
6067
Alexander Duyck26ad9172009-10-05 06:32:49 +00006068static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6069 u8 qsel)
6070{
6071 u32 rar_low, rar_high;
6072 struct e1000_hw *hw = &adapter->hw;
6073
6074 /* HW expects these in little endian so we reverse the byte order
6075 * from network order (big endian) to little endian
6076 */
6077 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6078 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6079 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6080
6081 /* Indicate to hardware the Address is Valid. */
6082 rar_high |= E1000_RAH_AV;
6083
6084 if (hw->mac.type == e1000_82575)
6085 rar_high |= E1000_RAH_POOL_1 * qsel;
6086 else
6087 rar_high |= E1000_RAH_POOL_1 << qsel;
6088
6089 wr32(E1000_RAL(index), rar_low);
6090 wrfl();
6091 wr32(E1000_RAH(index), rar_high);
6092 wrfl();
6093}
6094
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006095static int igb_set_vf_mac(struct igb_adapter *adapter,
6096 int vf, unsigned char *mac_addr)
6097{
6098 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006099 /* VF MAC addresses start at end of receive addresses and moves
6100 * torwards the first, as a result a collision should not be possible */
6101 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006102
Alexander Duyck37680112009-02-19 20:40:30 -08006103 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006104
Alexander Duyck26ad9172009-10-05 06:32:49 +00006105 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006106
6107 return 0;
6108}
6109
Williams, Mitch A8151d292010-02-10 01:44:24 +00006110static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6111{
6112 struct igb_adapter *adapter = netdev_priv(netdev);
6113 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6114 return -EINVAL;
6115 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6116 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6117 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6118 " change effective.");
6119 if (test_bit(__IGB_DOWN, &adapter->state)) {
6120 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6121 " but the PF device is not up.\n");
6122 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6123 " attempting to use the VF device.\n");
6124 }
6125 return igb_set_vf_mac(adapter, vf, mac);
6126}
6127
6128static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6129{
6130 return -EOPNOTSUPP;
6131}
6132
6133static int igb_ndo_get_vf_config(struct net_device *netdev,
6134 int vf, struct ifla_vf_info *ivi)
6135{
6136 struct igb_adapter *adapter = netdev_priv(netdev);
6137 if (vf >= adapter->vfs_allocated_count)
6138 return -EINVAL;
6139 ivi->vf = vf;
6140 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6141 ivi->tx_rate = 0;
6142 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6143 ivi->qos = adapter->vf_data[vf].pf_qos;
6144 return 0;
6145}
6146
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006147static void igb_vmm_control(struct igb_adapter *adapter)
6148{
6149 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006150 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006151
Alexander Duyckd4960302009-10-27 15:53:45 +00006152 /* replication is not supported for 82575 */
6153 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006154 return;
6155
Alexander Duyck10d8e902009-10-27 15:54:04 +00006156 /* enable replication vlan tag stripping */
6157 reg = rd32(E1000_RPLOLR);
6158 reg |= E1000_RPLOLR_STRVLAN;
6159 wr32(E1000_RPLOLR, reg);
6160
6161 /* notify HW that the MAC is adding vlan tags */
6162 reg = rd32(E1000_DTXCTL);
6163 reg |= E1000_DTXCTL_VLAN_ADDED;
6164 wr32(E1000_DTXCTL, reg);
6165
Alexander Duyckd4960302009-10-27 15:53:45 +00006166 if (adapter->vfs_allocated_count) {
6167 igb_vmdq_set_loopback_pf(hw, true);
6168 igb_vmdq_set_replication_pf(hw, true);
6169 } else {
6170 igb_vmdq_set_loopback_pf(hw, false);
6171 igb_vmdq_set_replication_pf(hw, false);
6172 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006173}
6174
Auke Kok9d5c8242008-01-24 02:22:38 -08006175/* igb_main.c */