blob: 30fb5a89c42d5a978adb935ce202951f42824bac [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000037#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080038#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070042#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080046#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070047#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070048#include <linux/dca.h>
49#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include "igb.h"
51
Alexander Duyck55cac242009-11-19 12:42:21 +000052#define DRV_VERSION "2.1.0-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080053char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
Alexander Duyck86d5d382009-02-06 23:23:12 +000057static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080058
Auke Kok9d5c8242008-01-24 02:22:38 -080059static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000063static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyck55cac242009-11-19 12:42:21 +000064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
79 /* required last entry */
80 {0, }
81};
82
83MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
84
85void igb_reset(struct igb_adapter *);
86static int igb_setup_all_tx_resources(struct igb_adapter *);
87static int igb_setup_all_rx_resources(struct igb_adapter *);
88static void igb_free_all_tx_resources(struct igb_adapter *);
89static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +000090static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080091void igb_update_stats(struct igb_adapter *);
92static int igb_probe(struct pci_dev *, const struct pci_device_id *);
93static void __devexit igb_remove(struct pci_dev *pdev);
94static int igb_sw_init(struct igb_adapter *);
95static int igb_open(struct net_device *);
96static int igb_close(struct net_device *);
97static void igb_configure_tx(struct igb_adapter *);
98static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -080099static void igb_clean_all_tx_rings(struct igb_adapter *);
100static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700101static void igb_clean_tx_ring(struct igb_ring *);
102static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000103static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800104static void igb_update_phy_info(unsigned long);
105static void igb_watchdog(unsigned long);
106static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000107static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800108static struct net_device_stats *igb_get_stats(struct net_device *);
109static int igb_change_mtu(struct net_device *, int);
110static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000111static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static irqreturn_t igb_intr(int irq, void *);
113static irqreturn_t igb_intr_msi(int irq, void *);
114static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000115static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700116#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000117static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700118static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700119#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000120static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700121static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000122static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *);
126static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000130static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800131static void igb_ping_all_vfs(struct igb_adapter *);
132static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800133static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800135static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000136static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
137static int igb_ndo_set_vf_vlan(struct net_device *netdev,
138 int vf, u16 vlan, u8 qos);
139static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
140static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
141 struct ifla_vf_info *ivi);
Auke Kok9d5c8242008-01-24 02:22:38 -0800142
Auke Kok9d5c8242008-01-24 02:22:38 -0800143#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000144static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_resume(struct pci_dev *);
146#endif
147static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700149static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
150static struct notifier_block dca_notifier = {
151 .notifier_call = igb_notify_dca,
152 .next = NULL,
153 .priority = 0
154};
155#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800156#ifdef CONFIG_NET_POLL_CONTROLLER
157/* for netdump / net console */
158static void igb_netpoll(struct net_device *);
159#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800160#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000161static unsigned int max_vfs = 0;
162module_param(max_vfs, uint, 0);
163MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
164 "per physical function");
165#endif /* CONFIG_PCI_IOV */
166
Auke Kok9d5c8242008-01-24 02:22:38 -0800167static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
168 pci_channel_state_t);
169static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
170static void igb_io_resume(struct pci_dev *);
171
172static struct pci_error_handlers igb_err_handler = {
173 .error_detected = igb_io_error_detected,
174 .slot_reset = igb_io_slot_reset,
175 .resume = igb_io_resume,
176};
177
178
179static struct pci_driver igb_driver = {
180 .name = igb_driver_name,
181 .id_table = igb_pci_tbl,
182 .probe = igb_probe,
183 .remove = __devexit_p(igb_remove),
184#ifdef CONFIG_PM
185 /* Power Managment Hooks */
186 .suspend = igb_suspend,
187 .resume = igb_resume,
188#endif
189 .shutdown = igb_shutdown,
190 .err_handler = &igb_err_handler
191};
192
193MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
194MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
195MODULE_LICENSE("GPL");
196MODULE_VERSION(DRV_VERSION);
197
Patrick Ohly38c845c2009-02-12 05:03:41 +0000198/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000199 * igb_read_clock - read raw cycle counter (to be used by time counter)
200 */
201static cycle_t igb_read_clock(const struct cyclecounter *tc)
202{
203 struct igb_adapter *adapter =
204 container_of(tc, struct igb_adapter, cycles);
205 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000206 u64 stamp = 0;
207 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000208
Alexander Duyck55cac242009-11-19 12:42:21 +0000209 /*
210 * The timestamp latches on lowest register read. For the 82580
211 * the lowest register is SYSTIMR instead of SYSTIML. However we never
212 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
213 */
214 if (hw->mac.type == e1000_82580) {
215 stamp = rd32(E1000_SYSTIMR) >> 8;
216 shift = IGB_82580_TSYNC_SHIFT;
217 }
218
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000219 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
220 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000221 return stamp;
222}
223
Auke Kok9d5c8242008-01-24 02:22:38 -0800224#ifdef DEBUG
225/**
226 * igb_get_hw_dev_name - return device name string
227 * used by hardware layer to print debugging information
228 **/
229char *igb_get_hw_dev_name(struct e1000_hw *hw)
230{
231 struct igb_adapter *adapter = hw->back;
232 return adapter->netdev->name;
233}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000234
235/**
236 * igb_get_time_str - format current NIC and system time as string
237 */
238static char *igb_get_time_str(struct igb_adapter *adapter,
239 char buffer[160])
240{
241 cycle_t hw = adapter->cycles.read(&adapter->cycles);
242 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
243 struct timespec sys;
244 struct timespec delta;
245 getnstimeofday(&sys);
246
247 delta = timespec_sub(nic, sys);
248
249 sprintf(buffer,
Patrick Ohly33af6bc2009-02-12 05:03:43 +0000250 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
251 hw,
Patrick Ohly38c845c2009-02-12 05:03:41 +0000252 (long)nic.tv_sec, nic.tv_nsec,
253 (long)sys.tv_sec, sys.tv_nsec,
254 (long)delta.tv_sec, delta.tv_nsec);
255
256 return buffer;
257}
Auke Kok9d5c8242008-01-24 02:22:38 -0800258#endif
259
260/**
261 * igb_init_module - Driver Registration Routine
262 *
263 * igb_init_module is the first routine called when the driver is
264 * loaded. All it does is register with the PCI subsystem.
265 **/
266static int __init igb_init_module(void)
267{
268 int ret;
269 printk(KERN_INFO "%s - version %s\n",
270 igb_driver_string, igb_driver_version);
271
272 printk(KERN_INFO "%s\n", igb_copyright);
273
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700274#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700275 dca_register_notify(&dca_notifier);
276#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800277 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800278 return ret;
279}
280
281module_init(igb_init_module);
282
283/**
284 * igb_exit_module - Driver Exit Cleanup Routine
285 *
286 * igb_exit_module is called just before the driver is removed
287 * from memory.
288 **/
289static void __exit igb_exit_module(void)
290{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700291#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700292 dca_unregister_notify(&dca_notifier);
293#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800294 pci_unregister_driver(&igb_driver);
295}
296
297module_exit(igb_exit_module);
298
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800299#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
300/**
301 * igb_cache_ring_register - Descriptor ring to register mapping
302 * @adapter: board private structure to initialize
303 *
304 * Once we know the feature-set enabled for the device, we'll cache
305 * the register offset the descriptor ring is assigned to.
306 **/
307static void igb_cache_ring_register(struct igb_adapter *adapter)
308{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000309 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000310 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800311
312 switch (adapter->hw.mac.type) {
313 case e1000_82576:
314 /* The queues are allocated for virtualization such that VF 0
315 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
316 * In order to avoid collision we start at the first free queue
317 * and continue consuming queues in the same sequence
318 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000319 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000320 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000321 adapter->rx_ring[i]->reg_idx = rbase_offset +
322 Q_IDX_82576(i);
Alexander Duycka99955f2009-11-12 18:37:19 +0000323 for (; j < adapter->rss_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000324 adapter->tx_ring[j]->reg_idx = rbase_offset +
325 Q_IDX_82576(j);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000326 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800327 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000328 case e1000_82580:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800329 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000330 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000331 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000332 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000333 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800334 break;
335 }
336}
337
Alexander Duyck047e0032009-10-27 15:49:27 +0000338static void igb_free_queues(struct igb_adapter *adapter)
339{
Alexander Duyck3025a442010-02-17 01:02:39 +0000340 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000341
Alexander Duyck3025a442010-02-17 01:02:39 +0000342 for (i = 0; i < adapter->num_tx_queues; i++) {
343 kfree(adapter->tx_ring[i]);
344 adapter->tx_ring[i] = NULL;
345 }
346 for (i = 0; i < adapter->num_rx_queues; i++) {
347 kfree(adapter->rx_ring[i]);
348 adapter->rx_ring[i] = NULL;
349 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000350 adapter->num_rx_queues = 0;
351 adapter->num_tx_queues = 0;
352}
353
Auke Kok9d5c8242008-01-24 02:22:38 -0800354/**
355 * igb_alloc_queues - Allocate memory for all rings
356 * @adapter: board private structure to initialize
357 *
358 * We allocate one ring per queue at run-time since we don't know the
359 * number of queues at compile-time.
360 **/
361static int igb_alloc_queues(struct igb_adapter *adapter)
362{
Alexander Duyck3025a442010-02-17 01:02:39 +0000363 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800364 int i;
365
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700366 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000367 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
368 if (!ring)
369 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800370 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700371 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000372 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000373 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000374 /* For 82575, context index must be unique per ring. */
375 if (adapter->hw.mac.type == e1000_82575)
376 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000377 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700378 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000379
Auke Kok9d5c8242008-01-24 02:22:38 -0800380 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000381 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
382 if (!ring)
383 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800384 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700385 ring->queue_index = i;
Alexander Duyck80785292009-10-27 15:51:47 +0000386 ring->pdev = adapter->pdev;
Alexander Duycke694e962009-10-27 15:53:06 +0000387 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000388 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000389 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
390 /* set flag indicating ring supports SCTP checksum offload */
391 if (adapter->hw.mac.type >= e1000_82576)
392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000393 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800394 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800395
396 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000397
Auke Kok9d5c8242008-01-24 02:22:38 -0800398 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800399
Alexander Duyck047e0032009-10-27 15:49:27 +0000400err:
401 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700402
Alexander Duyck047e0032009-10-27 15:49:27 +0000403 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700404}
405
Auke Kok9d5c8242008-01-24 02:22:38 -0800406#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000407static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800408{
409 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000410 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800411 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700412 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000413 int rx_queue = IGB_N0_QUEUE;
414 int tx_queue = IGB_N0_QUEUE;
415
416 if (q_vector->rx_ring)
417 rx_queue = q_vector->rx_ring->reg_idx;
418 if (q_vector->tx_ring)
419 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700420
421 switch (hw->mac.type) {
422 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800423 /* The 82575 assigns vectors using a bitmask, which matches the
424 bitmask for the EICR/EIMS/EIMC registers. To assign one
425 or more queues to a vector, we write the appropriate bits
426 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000427 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800428 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000429 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800430 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000431 if (!adapter->msix_entries && msix_vector == 0)
432 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800433 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000434 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700435 break;
436 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800437 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700438 Each queue has a single entry in the table to which we write
439 a vector number along with a "valid" bit. Sadly, the layout
440 of the table is somewhat counterintuitive. */
441 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000442 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700443 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000444 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800445 /* vector goes into low byte of register */
446 ivar = ivar & 0xFFFFFF00;
447 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000448 } else {
449 /* vector goes into third byte of register */
450 ivar = ivar & 0xFF00FFFF;
451 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700452 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700453 array_wr32(E1000_IVAR0, index, ivar);
454 }
455 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000456 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700457 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000458 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800459 /* vector goes into second byte of register */
460 ivar = ivar & 0xFFFF00FF;
461 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000462 } else {
463 /* vector goes into high byte of register */
464 ivar = ivar & 0x00FFFFFF;
465 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700466 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700467 array_wr32(E1000_IVAR0, index, ivar);
468 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000469 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700470 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000471 case e1000_82580:
472 /* 82580 uses the same table-based approach as 82576 but has fewer
473 entries as a result we carry over for queues greater than 4. */
474 if (rx_queue > IGB_N0_QUEUE) {
475 index = (rx_queue >> 1);
476 ivar = array_rd32(E1000_IVAR0, index);
477 if (rx_queue & 0x1) {
478 /* vector goes into third byte of register */
479 ivar = ivar & 0xFF00FFFF;
480 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
481 } else {
482 /* vector goes into low byte of register */
483 ivar = ivar & 0xFFFFFF00;
484 ivar |= msix_vector | E1000_IVAR_VALID;
485 }
486 array_wr32(E1000_IVAR0, index, ivar);
487 }
488 if (tx_queue > IGB_N0_QUEUE) {
489 index = (tx_queue >> 1);
490 ivar = array_rd32(E1000_IVAR0, index);
491 if (tx_queue & 0x1) {
492 /* vector goes into high byte of register */
493 ivar = ivar & 0x00FFFFFF;
494 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
495 } else {
496 /* vector goes into second byte of register */
497 ivar = ivar & 0xFFFF00FF;
498 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
499 }
500 array_wr32(E1000_IVAR0, index, ivar);
501 }
502 q_vector->eims_value = 1 << msix_vector;
503 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700504 default:
505 BUG();
506 break;
507 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000508
509 /* add q_vector eims value to global eims_enable_mask */
510 adapter->eims_enable_mask |= q_vector->eims_value;
511
512 /* configure q_vector to set itr on first interrupt */
513 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800514}
515
516/**
517 * igb_configure_msix - Configure MSI-X hardware
518 *
519 * igb_configure_msix sets up the hardware to properly
520 * generate MSI-X interrupts.
521 **/
522static void igb_configure_msix(struct igb_adapter *adapter)
523{
524 u32 tmp;
525 int i, vector = 0;
526 struct e1000_hw *hw = &adapter->hw;
527
528 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800529
530 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700531 switch (hw->mac.type) {
532 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800533 tmp = rd32(E1000_CTRL_EXT);
534 /* enable MSI-X PBA support*/
535 tmp |= E1000_CTRL_EXT_PBA_CLR;
536
537 /* Auto-Mask interrupts upon ICR read. */
538 tmp |= E1000_CTRL_EXT_EIAME;
539 tmp |= E1000_CTRL_EXT_IRCA;
540
541 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000542
543 /* enable msix_other interrupt */
544 array_wr32(E1000_MSIXBM(0), vector++,
545 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700546 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800547
Alexander Duyck2d064c02008-07-08 15:10:12 -0700548 break;
549
550 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000551 case e1000_82580:
Alexander Duyck047e0032009-10-27 15:49:27 +0000552 /* Turn on MSI-X capability first, or our settings
553 * won't stick. And it will take days to debug. */
554 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
555 E1000_GPIE_PBA | E1000_GPIE_EIAME |
556 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700557
Alexander Duyck047e0032009-10-27 15:49:27 +0000558 /* enable msix_other interrupt */
559 adapter->eims_other = 1 << vector;
560 tmp = (vector++ | E1000_IVAR_VALID) << 8;
561
562 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700563 break;
564 default:
565 /* do nothing, since nothing else supports MSI-X */
566 break;
567 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000568
569 adapter->eims_enable_mask |= adapter->eims_other;
570
Alexander Duyck26b39272010-02-17 01:00:41 +0000571 for (i = 0; i < adapter->num_q_vectors; i++)
572 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000573
Auke Kok9d5c8242008-01-24 02:22:38 -0800574 wrfl();
575}
576
577/**
578 * igb_request_msix - Initialize MSI-X interrupts
579 *
580 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
581 * kernel.
582 **/
583static int igb_request_msix(struct igb_adapter *adapter)
584{
585 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000586 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 int i, err = 0, vector = 0;
588
Auke Kok9d5c8242008-01-24 02:22:38 -0800589 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800590 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800591 if (err)
592 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000593 vector++;
594
595 for (i = 0; i < adapter->num_q_vectors; i++) {
596 struct igb_q_vector *q_vector = adapter->q_vector[i];
597
598 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
599
600 if (q_vector->rx_ring && q_vector->tx_ring)
601 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
602 q_vector->rx_ring->queue_index);
603 else if (q_vector->tx_ring)
604 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
605 q_vector->tx_ring->queue_index);
606 else if (q_vector->rx_ring)
607 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
608 q_vector->rx_ring->queue_index);
609 else
610 sprintf(q_vector->name, "%s-unused", netdev->name);
611
612 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800613 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000614 q_vector);
615 if (err)
616 goto out;
617 vector++;
618 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800619
Auke Kok9d5c8242008-01-24 02:22:38 -0800620 igb_configure_msix(adapter);
621 return 0;
622out:
623 return err;
624}
625
626static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
627{
628 if (adapter->msix_entries) {
629 pci_disable_msix(adapter->pdev);
630 kfree(adapter->msix_entries);
631 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000632 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800633 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000634 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800635}
636
Alexander Duyck047e0032009-10-27 15:49:27 +0000637/**
638 * igb_free_q_vectors - Free memory allocated for interrupt vectors
639 * @adapter: board private structure to initialize
640 *
641 * This function frees the memory allocated to the q_vectors. In addition if
642 * NAPI is enabled it will delete any references to the NAPI struct prior
643 * to freeing the q_vector.
644 **/
645static void igb_free_q_vectors(struct igb_adapter *adapter)
646{
647 int v_idx;
648
649 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
650 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
651 adapter->q_vector[v_idx] = NULL;
652 netif_napi_del(&q_vector->napi);
653 kfree(q_vector);
654 }
655 adapter->num_q_vectors = 0;
656}
657
658/**
659 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
660 *
661 * This function resets the device so that it has 0 rx queues, tx queues, and
662 * MSI-X interrupts allocated.
663 */
664static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
665{
666 igb_free_queues(adapter);
667 igb_free_q_vectors(adapter);
668 igb_reset_interrupt_capability(adapter);
669}
Auke Kok9d5c8242008-01-24 02:22:38 -0800670
671/**
672 * igb_set_interrupt_capability - set MSI or MSI-X if supported
673 *
674 * Attempt to configure interrupts using the best available
675 * capabilities of the hardware and kernel.
676 **/
677static void igb_set_interrupt_capability(struct igb_adapter *adapter)
678{
679 int err;
680 int numvecs, i;
681
Alexander Duyck83b71802009-02-06 23:15:45 +0000682 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +0000683 adapter->num_rx_queues = adapter->rss_queues;
684 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +0000685
Alexander Duyck047e0032009-10-27 15:49:27 +0000686 /* start with one vector for every rx queue */
687 numvecs = adapter->num_rx_queues;
688
689 /* if tx handler is seperate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +0000690 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
691 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +0000692
693 /* store the number of vectors reserved for queues */
694 adapter->num_q_vectors = numvecs;
695
696 /* add 1 vector for link status interrupts */
697 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -0800698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL);
700 if (!adapter->msix_entries)
701 goto msi_only;
702
703 for (i = 0; i < numvecs; i++)
704 adapter->msix_entries[i].entry = i;
705
706 err = pci_enable_msix(adapter->pdev,
707 adapter->msix_entries,
708 numvecs);
709 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -0700710 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -0800711
712 igb_reset_interrupt_capability(adapter);
713
714 /* If we can't do MSI-X, try MSI */
715msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000716#ifdef CONFIG_PCI_IOV
717 /* disable SR-IOV for non MSI-X configurations */
718 if (adapter->vf_data) {
719 struct e1000_hw *hw = &adapter->hw;
720 /* disable iov and allow time for transactions to clear */
721 pci_disable_sriov(adapter->pdev);
722 msleep(500);
723
724 kfree(adapter->vf_data);
725 adapter->vf_data = NULL;
726 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
727 msleep(100);
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 }
730#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000731 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +0000732 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000733 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -0800734 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700735 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000736 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800737 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700738 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -0700739out:
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700740 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700741 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -0800742 return;
743}
744
745/**
Alexander Duyck047e0032009-10-27 15:49:27 +0000746 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
747 * @adapter: board private structure to initialize
748 *
749 * We allocate one q_vector per queue interrupt. If allocation fails we
750 * return -ENOMEM.
751 **/
752static int igb_alloc_q_vectors(struct igb_adapter *adapter)
753{
754 struct igb_q_vector *q_vector;
755 struct e1000_hw *hw = &adapter->hw;
756 int v_idx;
757
758 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
759 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
760 if (!q_vector)
761 goto err_out;
762 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
764 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000765 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
766 adapter->q_vector[v_idx] = q_vector;
767 }
768 return 0;
769
770err_out:
771 while (v_idx) {
772 v_idx--;
773 q_vector = adapter->q_vector[v_idx];
774 netif_napi_del(&q_vector->napi);
775 kfree(q_vector);
776 adapter->q_vector[v_idx] = NULL;
777 }
778 return -ENOMEM;
779}
780
781static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
782 int ring_idx, int v_idx)
783{
Alexander Duyck3025a442010-02-17 01:02:39 +0000784 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000785
Alexander Duyck3025a442010-02-17 01:02:39 +0000786 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000787 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000788 q_vector->itr_val = adapter->rx_itr_setting;
789 if (q_vector->itr_val && q_vector->itr_val <= 3)
790 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000791}
792
793static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
794 int ring_idx, int v_idx)
795{
Alexander Duyck3025a442010-02-17 01:02:39 +0000796 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000797
Alexander Duyck3025a442010-02-17 01:02:39 +0000798 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +0000799 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +0000800 q_vector->itr_val = adapter->tx_itr_setting;
801 if (q_vector->itr_val && q_vector->itr_val <= 3)
802 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803}
804
805/**
806 * igb_map_ring_to_vector - maps allocated queues to vectors
807 *
808 * This function maps the recently allocated queues to vectors.
809 **/
810static int igb_map_ring_to_vector(struct igb_adapter *adapter)
811{
812 int i;
813 int v_idx = 0;
814
815 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
816 (adapter->num_q_vectors < adapter->num_tx_queues))
817 return -ENOMEM;
818
819 if (adapter->num_q_vectors >=
820 (adapter->num_rx_queues + adapter->num_tx_queues)) {
821 for (i = 0; i < adapter->num_rx_queues; i++)
822 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
823 for (i = 0; i < adapter->num_tx_queues; i++)
824 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
825 } else {
826 for (i = 0; i < adapter->num_rx_queues; i++) {
827 if (i < adapter->num_tx_queues)
828 igb_map_tx_ring_to_vector(adapter, i, v_idx);
829 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
830 }
831 for (; i < adapter->num_tx_queues; i++)
832 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
833 }
834 return 0;
835}
836
837/**
838 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
839 *
840 * This function initializes the interrupts and allocates all of the queues.
841 **/
842static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
843{
844 struct pci_dev *pdev = adapter->pdev;
845 int err;
846
847 igb_set_interrupt_capability(adapter);
848
849 err = igb_alloc_q_vectors(adapter);
850 if (err) {
851 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
852 goto err_alloc_q_vectors;
853 }
854
855 err = igb_alloc_queues(adapter);
856 if (err) {
857 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
858 goto err_alloc_queues;
859 }
860
861 err = igb_map_ring_to_vector(adapter);
862 if (err) {
863 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
864 goto err_map_queues;
865 }
866
867
868 return 0;
869err_map_queues:
870 igb_free_queues(adapter);
871err_alloc_queues:
872 igb_free_q_vectors(adapter);
873err_alloc_q_vectors:
874 igb_reset_interrupt_capability(adapter);
875 return err;
876}
877
878/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800879 * igb_request_irq - initialize interrupts
880 *
881 * Attempts to configure interrupts using the best available
882 * capabilities of the hardware and kernel.
883 **/
884static int igb_request_irq(struct igb_adapter *adapter)
885{
886 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000887 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800888 int err = 0;
889
890 if (adapter->msix_entries) {
891 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700892 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800893 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -0800894 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +0000895 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800896 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700897 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800898 igb_free_all_tx_resources(adapter);
899 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000900 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800901 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +0000902 adapter->num_q_vectors = 1;
903 err = igb_alloc_q_vectors(adapter);
904 if (err) {
905 dev_err(&pdev->dev,
906 "Unable to allocate memory for vectors\n");
907 goto request_done;
908 }
909 err = igb_alloc_queues(adapter);
910 if (err) {
911 dev_err(&pdev->dev,
912 "Unable to allocate memory for queues\n");
913 igb_free_q_vectors(adapter);
914 goto request_done;
915 }
916 igb_setup_all_tx_resources(adapter);
917 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700918 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000919 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800920 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700921
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700922 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -0800923 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +0000924 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800925 if (!err)
926 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +0000927
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 /* fall back to legacy interrupts */
929 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -0700930 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -0800931 }
932
Joe Perchesa0607fd2009-11-18 23:29:17 -0800933 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +0000934 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800935
Andy Gospodarek6cb5e572008-02-15 14:05:25 -0800936 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -0800937 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
938 err);
Auke Kok9d5c8242008-01-24 02:22:38 -0800939
940request_done:
941 return err;
942}
943
944static void igb_free_irq(struct igb_adapter *adapter)
945{
Auke Kok9d5c8242008-01-24 02:22:38 -0800946 if (adapter->msix_entries) {
947 int vector = 0, i;
948
Alexander Duyck047e0032009-10-27 15:49:27 +0000949 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800950
Alexander Duyck047e0032009-10-27 15:49:27 +0000951 for (i = 0; i < adapter->num_q_vectors; i++) {
952 struct igb_q_vector *q_vector = adapter->q_vector[i];
953 free_irq(adapter->msix_entries[vector++].vector,
954 q_vector);
955 }
956 } else {
957 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800958 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800959}
960
961/**
962 * igb_irq_disable - Mask off interrupt generation on the NIC
963 * @adapter: board private structure
964 **/
965static void igb_irq_disable(struct igb_adapter *adapter)
966{
967 struct e1000_hw *hw = &adapter->hw;
968
Alexander Duyck25568a52009-10-27 23:49:59 +0000969 /*
970 * we need to be careful when disabling interrupts. The VFs are also
971 * mapped into these registers and so clearing the bits can cause
972 * issues on the VF drivers so we only need to clear what we set
973 */
Auke Kok9d5c8242008-01-24 02:22:38 -0800974 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000975 u32 regval = rd32(E1000_EIAM);
976 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
977 wr32(E1000_EIMC, adapter->eims_enable_mask);
978 regval = rd32(E1000_EIAC);
979 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -0800980 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700981
982 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -0800983 wr32(E1000_IMC, ~0);
984 wrfl();
985 synchronize_irq(adapter->pdev->irq);
986}
987
988/**
989 * igb_irq_enable - Enable default interrupt generation settings
990 * @adapter: board private structure
991 **/
992static void igb_irq_enable(struct igb_adapter *adapter)
993{
994 struct e1000_hw *hw = &adapter->hw;
995
996 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +0000997 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +0000998 u32 regval = rd32(E1000_EIAC);
999 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1000 regval = rd32(E1000_EIAM);
1001 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001002 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001003 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001004 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001005 ims |= E1000_IMS_VMMB;
1006 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001007 if (adapter->hw.mac.type == e1000_82580)
1008 ims |= E1000_IMS_DRSTA;
1009
Alexander Duyck25568a52009-10-27 23:49:59 +00001010 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001011 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001012 wr32(E1000_IMS, IMS_ENABLE_MASK |
1013 E1000_IMS_DRSTA);
1014 wr32(E1000_IAM, IMS_ENABLE_MASK |
1015 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001016 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001017}
1018
1019static void igb_update_mng_vlan(struct igb_adapter *adapter)
1020{
Alexander Duyck51466232009-10-27 23:47:35 +00001021 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001022 u16 vid = adapter->hw.mng_cookie.vlan_id;
1023 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001024
Alexander Duyck51466232009-10-27 23:47:35 +00001025 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1026 /* add VID to filter table */
1027 igb_vfta_set(hw, vid, true);
1028 adapter->mng_vlan_id = vid;
1029 } else {
1030 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1031 }
1032
1033 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1034 (vid != old_vid) &&
1035 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1036 /* remove VID from filter table */
1037 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001038 }
1039}
1040
1041/**
1042 * igb_release_hw_control - release control of the h/w to f/w
1043 * @adapter: address of board private structure
1044 *
1045 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1046 * For ASF and Pass Through versions of f/w this means that the
1047 * driver is no longer loaded.
1048 *
1049 **/
1050static void igb_release_hw_control(struct igb_adapter *adapter)
1051{
1052 struct e1000_hw *hw = &adapter->hw;
1053 u32 ctrl_ext;
1054
1055 /* Let firmware take over control of h/w */
1056 ctrl_ext = rd32(E1000_CTRL_EXT);
1057 wr32(E1000_CTRL_EXT,
1058 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1059}
1060
Auke Kok9d5c8242008-01-24 02:22:38 -08001061/**
1062 * igb_get_hw_control - get control of the h/w from f/w
1063 * @adapter: address of board private structure
1064 *
1065 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1066 * For ASF and Pass Through versions of f/w this means that
1067 * the driver is loaded.
1068 *
1069 **/
1070static void igb_get_hw_control(struct igb_adapter *adapter)
1071{
1072 struct e1000_hw *hw = &adapter->hw;
1073 u32 ctrl_ext;
1074
1075 /* Let firmware know the driver has taken over */
1076 ctrl_ext = rd32(E1000_CTRL_EXT);
1077 wr32(E1000_CTRL_EXT,
1078 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1079}
1080
Auke Kok9d5c8242008-01-24 02:22:38 -08001081/**
1082 * igb_configure - configure the hardware for RX and TX
1083 * @adapter: private board structure
1084 **/
1085static void igb_configure(struct igb_adapter *adapter)
1086{
1087 struct net_device *netdev = adapter->netdev;
1088 int i;
1089
1090 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001091 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001092
1093 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001094
Alexander Duyck85b430b2009-10-27 15:50:29 +00001095 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001096 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001097 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001098
1099 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001100 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001101
1102 igb_rx_fifo_flush_82575(&adapter->hw);
1103
Alexander Duyckc493ea42009-03-20 00:16:50 +00001104 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001105 * at least 1 descriptor unused to make sure
1106 * next_to_use != next_to_clean */
1107 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001108 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001109 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001110 }
1111
1112
1113 adapter->tx_queue_len = netdev->tx_queue_len;
1114}
1115
Nick Nunley88a268c2010-02-17 01:01:59 +00001116/**
1117 * igb_power_up_link - Power up the phy/serdes link
1118 * @adapter: address of board private structure
1119 **/
1120void igb_power_up_link(struct igb_adapter *adapter)
1121{
1122 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1123 igb_power_up_phy_copper(&adapter->hw);
1124 else
1125 igb_power_up_serdes_link_82575(&adapter->hw);
1126}
1127
1128/**
1129 * igb_power_down_link - Power down the phy/serdes link
1130 * @adapter: address of board private structure
1131 */
1132static void igb_power_down_link(struct igb_adapter *adapter)
1133{
1134 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1135 igb_power_down_phy_copper_82575(&adapter->hw);
1136 else
1137 igb_shutdown_serdes_link_82575(&adapter->hw);
1138}
Auke Kok9d5c8242008-01-24 02:22:38 -08001139
1140/**
1141 * igb_up - Open the interface and prepare it to handle traffic
1142 * @adapter: board private structure
1143 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001144int igb_up(struct igb_adapter *adapter)
1145{
1146 struct e1000_hw *hw = &adapter->hw;
1147 int i;
1148
1149 /* hardware has been reset, we need to reload some things */
1150 igb_configure(adapter);
1151
1152 clear_bit(__IGB_DOWN, &adapter->state);
1153
Alexander Duyck047e0032009-10-27 15:49:27 +00001154 for (i = 0; i < adapter->num_q_vectors; i++) {
1155 struct igb_q_vector *q_vector = adapter->q_vector[i];
1156 napi_enable(&q_vector->napi);
1157 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001158 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001159 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001160 else
1161 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001162
1163 /* Clear any pending interrupts. */
1164 rd32(E1000_ICR);
1165 igb_irq_enable(adapter);
1166
Alexander Duyckd4960302009-10-27 15:53:45 +00001167 /* notify VFs that reset has been completed */
1168 if (adapter->vfs_allocated_count) {
1169 u32 reg_data = rd32(E1000_CTRL_EXT);
1170 reg_data |= E1000_CTRL_EXT_PFRSTD;
1171 wr32(E1000_CTRL_EXT, reg_data);
1172 }
1173
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001174 netif_tx_start_all_queues(adapter->netdev);
1175
Alexander Duyck25568a52009-10-27 23:49:59 +00001176 /* start the watchdog. */
1177 hw->mac.get_link_status = 1;
1178 schedule_work(&adapter->watchdog_task);
1179
Auke Kok9d5c8242008-01-24 02:22:38 -08001180 return 0;
1181}
1182
1183void igb_down(struct igb_adapter *adapter)
1184{
Auke Kok9d5c8242008-01-24 02:22:38 -08001185 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001186 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001187 u32 tctl, rctl;
1188 int i;
1189
1190 /* signal that we're down so the interrupt handler does not
1191 * reschedule our watchdog timer */
1192 set_bit(__IGB_DOWN, &adapter->state);
1193
1194 /* disable receives in the hardware */
1195 rctl = rd32(E1000_RCTL);
1196 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1197 /* flush and sleep below */
1198
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001199 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001200
1201 /* disable transmits in the hardware */
1202 tctl = rd32(E1000_TCTL);
1203 tctl &= ~E1000_TCTL_EN;
1204 wr32(E1000_TCTL, tctl);
1205 /* flush both disables and wait for them to finish */
1206 wrfl();
1207 msleep(10);
1208
Alexander Duyck047e0032009-10-27 15:49:27 +00001209 for (i = 0; i < adapter->num_q_vectors; i++) {
1210 struct igb_q_vector *q_vector = adapter->q_vector[i];
1211 napi_disable(&q_vector->napi);
1212 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001213
Auke Kok9d5c8242008-01-24 02:22:38 -08001214 igb_irq_disable(adapter);
1215
1216 del_timer_sync(&adapter->watchdog_timer);
1217 del_timer_sync(&adapter->phy_info_timer);
1218
1219 netdev->tx_queue_len = adapter->tx_queue_len;
1220 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001221
1222 /* record the stats before reset*/
1223 igb_update_stats(adapter);
1224
Auke Kok9d5c8242008-01-24 02:22:38 -08001225 adapter->link_speed = 0;
1226 adapter->link_duplex = 0;
1227
Jeff Kirsher30236822008-06-24 17:01:15 -07001228 if (!pci_channel_offline(adapter->pdev))
1229 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001230 igb_clean_all_tx_rings(adapter);
1231 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001232#ifdef CONFIG_IGB_DCA
1233
1234 /* since we reset the hardware DCA settings were cleared */
1235 igb_setup_dca(adapter);
1236#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001237}
1238
1239void igb_reinit_locked(struct igb_adapter *adapter)
1240{
1241 WARN_ON(in_interrupt());
1242 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1243 msleep(1);
1244 igb_down(adapter);
1245 igb_up(adapter);
1246 clear_bit(__IGB_RESETTING, &adapter->state);
1247}
1248
1249void igb_reset(struct igb_adapter *adapter)
1250{
Alexander Duyck090b1792009-10-27 23:51:55 +00001251 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001252 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001253 struct e1000_mac_info *mac = &hw->mac;
1254 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001255 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1256 u16 hwm;
1257
1258 /* Repartition Pba for greater than 9k mtu
1259 * To take effect CTRL.RST is required.
1260 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001261 switch (mac->type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001262 case e1000_82580:
1263 pba = rd32(E1000_RXPBS);
1264 pba = igb_rxpbs_adjust_82580(pba);
1265 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001266 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001267 pba = rd32(E1000_RXPBS);
1268 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001269 break;
1270 case e1000_82575:
1271 default:
1272 pba = E1000_PBA_34K;
1273 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001274 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001275
Alexander Duyck2d064c02008-07-08 15:10:12 -07001276 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1277 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 /* adjust PBA for jumbo frames */
1279 wr32(E1000_PBA, pba);
1280
1281 /* To maintain wire speed transmits, the Tx FIFO should be
1282 * large enough to accommodate two full transmit packets,
1283 * rounded up to the next 1KB and expressed in KB. Likewise,
1284 * the Rx FIFO should be large enough to accommodate at least
1285 * one full receive packet and is similarly rounded up and
1286 * expressed in KB. */
1287 pba = rd32(E1000_PBA);
1288 /* upper 16 bits has Tx packet buffer allocation size in KB */
1289 tx_space = pba >> 16;
1290 /* lower 16 bits has Rx packet buffer allocation size in KB */
1291 pba &= 0xffff;
1292 /* the tx fifo also stores 16 bytes of information about the tx
1293 * but don't include ethernet FCS because hardware appends it */
1294 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001295 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001296 ETH_FCS_LEN) * 2;
1297 min_tx_space = ALIGN(min_tx_space, 1024);
1298 min_tx_space >>= 10;
1299 /* software strips receive CRC, so leave room for it */
1300 min_rx_space = adapter->max_frame_size;
1301 min_rx_space = ALIGN(min_rx_space, 1024);
1302 min_rx_space >>= 10;
1303
1304 /* If current Tx allocation is less than the min Tx FIFO size,
1305 * and the min Tx FIFO size is less than the current Rx FIFO
1306 * allocation, take space away from current Rx allocation */
1307 if (tx_space < min_tx_space &&
1308 ((min_tx_space - tx_space) < pba)) {
1309 pba = pba - (min_tx_space - tx_space);
1310
1311 /* if short on rx space, rx wins and must trump tx
1312 * adjustment */
1313 if (pba < min_rx_space)
1314 pba = min_rx_space;
1315 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001316 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001317 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001318
1319 /* flow control settings */
1320 /* The high water mark must be low enough to fit one full frame
1321 * (or the size used for early receive) above it in the Rx FIFO.
1322 * Set it to the lower of:
1323 * - 90% of the Rx FIFO size, or
1324 * - the full Rx FIFO size minus one full frame */
1325 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001326 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyckd405ea32009-12-23 13:21:27 +00001328 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1329 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001330 fc->pause_time = 0xFFFF;
1331 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001332 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001333
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001334 /* disable receive for all VFs and wait one second */
1335 if (adapter->vfs_allocated_count) {
1336 int i;
1337 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001338 adapter->vf_data[i].flags = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001339
1340 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001341 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001342
1343 /* disable transmits and receives */
1344 wr32(E1000_VFRE, 0);
1345 wr32(E1000_VFTE, 0);
1346 }
1347
Auke Kok9d5c8242008-01-24 02:22:38 -08001348 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001349 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001350 wr32(E1000_WUC, 0);
1351
Alexander Duyck330a6d62009-10-27 23:51:35 +00001352 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001353 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001354
Alexander Duyck55cac242009-11-19 12:42:21 +00001355 if (hw->mac.type == e1000_82580) {
1356 u32 reg = rd32(E1000_PCIEMISC);
1357 wr32(E1000_PCIEMISC,
1358 reg & ~E1000_PCIEMISC_LX_DECISION);
1359 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001360 if (!netif_running(adapter->netdev))
1361 igb_power_down_link(adapter);
1362
Auke Kok9d5c8242008-01-24 02:22:38 -08001363 igb_update_mng_vlan(adapter);
1364
1365 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1366 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1367
Alexander Duyck330a6d62009-10-27 23:51:35 +00001368 igb_reset_adaptive(hw);
1369 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001370}
1371
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001372static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001373 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001374 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001375 .ndo_start_xmit = igb_xmit_frame_adv,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001376 .ndo_get_stats = igb_get_stats,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001377 .ndo_set_rx_mode = igb_set_rx_mode,
1378 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001379 .ndo_set_mac_address = igb_set_mac,
1380 .ndo_change_mtu = igb_change_mtu,
1381 .ndo_do_ioctl = igb_ioctl,
1382 .ndo_tx_timeout = igb_tx_timeout,
1383 .ndo_validate_addr = eth_validate_addr,
1384 .ndo_vlan_rx_register = igb_vlan_rx_register,
1385 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1386 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001387 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1388 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1389 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1390 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001391#ifdef CONFIG_NET_POLL_CONTROLLER
1392 .ndo_poll_controller = igb_netpoll,
1393#endif
1394};
1395
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001396/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001397 * igb_probe - Device Initialization Routine
1398 * @pdev: PCI device information struct
1399 * @ent: entry in igb_pci_tbl
1400 *
1401 * Returns 0 on success, negative on failure
1402 *
1403 * igb_probe initializes an adapter identified by a pci_dev structure.
1404 * The OS initialization, configuring of the adapter private structure,
1405 * and a hardware reset occur.
1406 **/
1407static int __devinit igb_probe(struct pci_dev *pdev,
1408 const struct pci_device_id *ent)
1409{
1410 struct net_device *netdev;
1411 struct igb_adapter *adapter;
1412 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001413 u16 eeprom_data = 0;
1414 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001415 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1416 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001417 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001418 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1419 u32 part_num;
1420
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001421 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001422 if (err)
1423 return err;
1424
1425 pci_using_dac = 0;
Yang Hongyang6a355282009-04-06 19:01:13 -07001426 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001427 if (!err) {
Yang Hongyang6a355282009-04-06 19:01:13 -07001428 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001429 if (!err)
1430 pci_using_dac = 1;
1431 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07001432 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001433 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001434 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001435 if (err) {
1436 dev_err(&pdev->dev, "No usable DMA "
1437 "configuration, aborting\n");
1438 goto err_dma;
1439 }
1440 }
1441 }
1442
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001443 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1444 IORESOURCE_MEM),
1445 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001446 if (err)
1447 goto err_pci_reg;
1448
Frans Pop19d5afd2009-10-02 10:04:12 -07001449 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001450
Auke Kok9d5c8242008-01-24 02:22:38 -08001451 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001452 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001453
1454 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001455 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1456 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001457 if (!netdev)
1458 goto err_alloc_etherdev;
1459
1460 SET_NETDEV_DEV(netdev, &pdev->dev);
1461
1462 pci_set_drvdata(pdev, netdev);
1463 adapter = netdev_priv(netdev);
1464 adapter->netdev = netdev;
1465 adapter->pdev = pdev;
1466 hw = &adapter->hw;
1467 hw->back = adapter;
1468 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1469
1470 mmio_start = pci_resource_start(pdev, 0);
1471 mmio_len = pci_resource_len(pdev, 0);
1472
1473 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001474 hw->hw_addr = ioremap(mmio_start, mmio_len);
1475 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001476 goto err_ioremap;
1477
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001478 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001479 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001481
1482 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1483
1484 netdev->mem_start = mmio_start;
1485 netdev->mem_end = mmio_start + mmio_len;
1486
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 /* PCI config space info */
1488 hw->vendor_id = pdev->vendor;
1489 hw->device_id = pdev->device;
1490 hw->revision_id = pdev->revision;
1491 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1492 hw->subsystem_device_id = pdev->subsystem_device;
1493
Auke Kok9d5c8242008-01-24 02:22:38 -08001494 /* Copy the default MAC, PHY and NVM function pointers */
1495 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1496 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1497 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1498 /* Initialize skew-specific constants */
1499 err = ei->get_invariants(hw);
1500 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001501 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001502
Alexander Duyck450c87c2009-02-06 23:22:11 +00001503 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001504 err = igb_sw_init(adapter);
1505 if (err)
1506 goto err_sw_init;
1507
1508 igb_get_bus_info_pcie(hw);
1509
1510 hw->phy.autoneg_wait_to_complete = false;
1511 hw->mac.adaptive_ifs = true;
1512
1513 /* Copper options */
1514 if (hw->phy.media_type == e1000_media_type_copper) {
1515 hw->phy.mdix = AUTO_ALL_MODES;
1516 hw->phy.disable_polarity_correction = false;
1517 hw->phy.ms_type = e1000_ms_hw_default;
1518 }
1519
1520 if (igb_check_reset_block(hw))
1521 dev_info(&pdev->dev,
1522 "PHY reset is blocked due to SOL/IDER session.\n");
1523
1524 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001525 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001526 NETIF_F_HW_VLAN_TX |
1527 NETIF_F_HW_VLAN_RX |
1528 NETIF_F_HW_VLAN_FILTER;
1529
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001530 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001531 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001532 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001533 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001534
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001535 netdev->vlan_features |= NETIF_F_TSO;
1536 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001537 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001538 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001539 netdev->vlan_features |= NETIF_F_SG;
1540
Auke Kok9d5c8242008-01-24 02:22:38 -08001541 if (pci_using_dac)
1542 netdev->features |= NETIF_F_HIGHDMA;
1543
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001544 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001545 netdev->features |= NETIF_F_SCTP_CSUM;
1546
Alexander Duyck330a6d62009-10-27 23:51:35 +00001547 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001548
1549 /* before reading the NVM, reset the controller to put the device in a
1550 * known good starting state */
1551 hw->mac.ops.reset_hw(hw);
1552
1553 /* make sure the NVM is good */
1554 if (igb_validate_nvm_checksum(hw) < 0) {
1555 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1556 err = -EIO;
1557 goto err_eeprom;
1558 }
1559
1560 /* copy the MAC address out of the NVM */
1561 if (hw->mac.ops.read_mac_addr(hw))
1562 dev_err(&pdev->dev, "NVM Read Error\n");
1563
1564 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1565 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1566
1567 if (!is_valid_ether_addr(netdev->perm_addr)) {
1568 dev_err(&pdev->dev, "Invalid MAC Address\n");
1569 err = -EIO;
1570 goto err_eeprom;
1571 }
1572
Alexander Duyck0e340482009-03-20 00:17:08 +00001573 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1574 (unsigned long) adapter);
1575 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1576 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001577
1578 INIT_WORK(&adapter->reset_task, igb_reset_task);
1579 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1580
Alexander Duyck450c87c2009-02-06 23:22:11 +00001581 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001582 adapter->fc_autoneg = true;
1583 hw->mac.autoneg = true;
1584 hw->phy.autoneg_advertised = 0x2f;
1585
Alexander Duyck0cce1192009-07-23 18:10:24 +00001586 hw->fc.requested_mode = e1000_fc_default;
1587 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001588
Auke Kok9d5c8242008-01-24 02:22:38 -08001589 igb_validate_mdi_setting(hw);
1590
Auke Kok9d5c8242008-01-24 02:22:38 -08001591 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1592 * enable the ACPI Magic Packet filter
1593 */
1594
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001595 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001596 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001597 else if (hw->mac.type == e1000_82580)
1598 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1599 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1600 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001601 else if (hw->bus.func == 1)
1602 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001603
1604 if (eeprom_data & eeprom_apme_mask)
1605 adapter->eeprom_wol |= E1000_WUFC_MAG;
1606
1607 /* now that we have the eeprom settings, apply the special cases where
1608 * the eeprom may be wrong or the board simply won't support wake on
1609 * lan on a particular port */
1610 switch (pdev->device) {
1611 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1612 adapter->eeprom_wol = 0;
1613 break;
1614 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001615 case E1000_DEV_ID_82576_FIBER:
1616 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001617 /* Wake events only supported on port A for dual fiber
1618 * regardless of eeprom setting */
1619 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1620 adapter->eeprom_wol = 0;
1621 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001622 case E1000_DEV_ID_82576_QUAD_COPPER:
1623 /* if quad port adapter, disable WoL on all but port A */
1624 if (global_quad_port_a != 0)
1625 adapter->eeprom_wol = 0;
1626 else
1627 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1628 /* Reset for multiple quad port adapters */
1629 if (++global_quad_port_a == 4)
1630 global_quad_port_a = 0;
1631 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001632 }
1633
1634 /* initialize the wol settings based on the eeprom settings */
1635 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001636 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001637
1638 /* reset the hardware with the new settings */
1639 igb_reset(adapter);
1640
1641 /* let the f/w know that the h/w is now under the control of the
1642 * driver. */
1643 igb_get_hw_control(adapter);
1644
Auke Kok9d5c8242008-01-24 02:22:38 -08001645 strcpy(netdev->name, "eth%d");
1646 err = register_netdev(netdev);
1647 if (err)
1648 goto err_register;
1649
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001650 /* carrier off reporting is important to ethtool even BEFORE open */
1651 netif_carrier_off(netdev);
1652
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001653#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001654 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001655 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001656 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001657 igb_setup_dca(adapter);
1658 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001659
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001660#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001661 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1662 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001663 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001664 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001665 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1666 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00001667 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1668 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1669 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1670 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07001671 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08001672
1673 igb_read_part_num(hw, &part_num);
1674 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1675 (part_num >> 8), (part_num & 0xff));
1676
1677 dev_info(&pdev->dev,
1678 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1679 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001680 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08001681 adapter->num_rx_queues, adapter->num_tx_queues);
1682
Auke Kok9d5c8242008-01-24 02:22:38 -08001683 return 0;
1684
1685err_register:
1686 igb_release_hw_control(adapter);
1687err_eeprom:
1688 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08001689 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001690
1691 if (hw->flash_address)
1692 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08001693err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00001694 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001695 iounmap(hw->hw_addr);
1696err_ioremap:
1697 free_netdev(netdev);
1698err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00001699 pci_release_selected_regions(pdev,
1700 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001701err_pci_reg:
1702err_dma:
1703 pci_disable_device(pdev);
1704 return err;
1705}
1706
1707/**
1708 * igb_remove - Device Removal Routine
1709 * @pdev: PCI device information struct
1710 *
1711 * igb_remove is called by the PCI subsystem to alert the driver
1712 * that it should release a PCI device. The could be caused by a
1713 * Hot-Plug event, or because the driver is going to be removed from
1714 * memory.
1715 **/
1716static void __devexit igb_remove(struct pci_dev *pdev)
1717{
1718 struct net_device *netdev = pci_get_drvdata(pdev);
1719 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001720 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001721
1722 /* flush_scheduled work may reschedule our watchdog task, so
1723 * explicitly disable watchdog tasks from being rescheduled */
1724 set_bit(__IGB_DOWN, &adapter->state);
1725 del_timer_sync(&adapter->watchdog_timer);
1726 del_timer_sync(&adapter->phy_info_timer);
1727
1728 flush_scheduled_work();
1729
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001730#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001731 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001732 dev_info(&pdev->dev, "DCA disabled\n");
1733 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001734 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08001735 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001736 }
1737#endif
1738
Auke Kok9d5c8242008-01-24 02:22:38 -08001739 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1740 * would have already happened in close and is redundant. */
1741 igb_release_hw_control(adapter);
1742
1743 unregister_netdev(netdev);
1744
Alexander Duyck047e0032009-10-27 15:49:27 +00001745 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001746
Alexander Duyck37680112009-02-19 20:40:30 -08001747#ifdef CONFIG_PCI_IOV
1748 /* reclaim resources allocated to VFs */
1749 if (adapter->vf_data) {
1750 /* disable iov and allow time for transactions to clear */
1751 pci_disable_sriov(pdev);
1752 msleep(500);
1753
1754 kfree(adapter->vf_data);
1755 adapter->vf_data = NULL;
1756 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1757 msleep(100);
1758 dev_info(&pdev->dev, "IOV Disabled\n");
1759 }
1760#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00001761
Alexander Duyck28b07592009-02-06 23:20:31 +00001762 iounmap(hw->hw_addr);
1763 if (hw->flash_address)
1764 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00001765 pci_release_selected_regions(pdev,
1766 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08001767
1768 free_netdev(netdev);
1769
Frans Pop19d5afd2009-10-02 10:04:12 -07001770 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001771
Auke Kok9d5c8242008-01-24 02:22:38 -08001772 pci_disable_device(pdev);
1773}
1774
1775/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00001776 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1777 * @adapter: board private structure to initialize
1778 *
1779 * This function initializes the vf specific data storage and then attempts to
1780 * allocate the VFs. The reason for ordering it this way is because it is much
1781 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1782 * the memory for the VFs.
1783 **/
1784static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1785{
1786#ifdef CONFIG_PCI_IOV
1787 struct pci_dev *pdev = adapter->pdev;
1788
1789 if (adapter->vfs_allocated_count > 7)
1790 adapter->vfs_allocated_count = 7;
1791
1792 if (adapter->vfs_allocated_count) {
1793 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1794 sizeof(struct vf_data_storage),
1795 GFP_KERNEL);
1796 /* if allocation failed then we do not support SR-IOV */
1797 if (!adapter->vf_data) {
1798 adapter->vfs_allocated_count = 0;
1799 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1800 "Data Storage\n");
1801 }
1802 }
1803
1804 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1805 kfree(adapter->vf_data);
1806 adapter->vf_data = NULL;
1807#endif /* CONFIG_PCI_IOV */
1808 adapter->vfs_allocated_count = 0;
1809#ifdef CONFIG_PCI_IOV
1810 } else {
1811 unsigned char mac_addr[ETH_ALEN];
1812 int i;
1813 dev_info(&pdev->dev, "%d vfs allocated\n",
1814 adapter->vfs_allocated_count);
1815 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1816 random_ether_addr(mac_addr);
1817 igb_set_vf_mac(adapter, i, mac_addr);
1818 }
1819 }
1820#endif /* CONFIG_PCI_IOV */
1821}
1822
Alexander Duyck115f4592009-11-12 18:37:00 +00001823
1824/**
1825 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1826 * @adapter: board private structure to initialize
1827 *
1828 * igb_init_hw_timer initializes the function pointer and values for the hw
1829 * timer found in hardware.
1830 **/
1831static void igb_init_hw_timer(struct igb_adapter *adapter)
1832{
1833 struct e1000_hw *hw = &adapter->hw;
1834
1835 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00001836 case e1000_82580:
1837 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1838 adapter->cycles.read = igb_read_clock;
1839 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1840 adapter->cycles.mult = 1;
1841 /*
1842 * The 82580 timesync updates the system timer every 8ns by 8ns
1843 * and the value cannot be shifted. Instead we need to shift
1844 * the registers to generate a 64bit timer value. As a result
1845 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1846 * 24 in order to generate a larger value for synchronization.
1847 */
1848 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1849 /* disable system timer temporarily by setting bit 31 */
1850 wr32(E1000_TSAUXC, 0x80000000);
1851 wrfl();
1852
1853 /* Set registers so that rollover occurs soon to test this. */
1854 wr32(E1000_SYSTIMR, 0x00000000);
1855 wr32(E1000_SYSTIML, 0x80000000);
1856 wr32(E1000_SYSTIMH, 0x000000FF);
1857 wrfl();
1858
1859 /* enable system timer by clearing bit 31 */
1860 wr32(E1000_TSAUXC, 0x0);
1861 wrfl();
1862
1863 timecounter_init(&adapter->clock,
1864 &adapter->cycles,
1865 ktime_to_ns(ktime_get_real()));
1866 /*
1867 * Synchronize our NIC clock against system wall clock. NIC
1868 * time stamp reading requires ~3us per sample, each sample
1869 * was pretty stable even under load => only require 10
1870 * samples for each offset comparison.
1871 */
1872 memset(&adapter->compare, 0, sizeof(adapter->compare));
1873 adapter->compare.source = &adapter->clock;
1874 adapter->compare.target = ktime_get_real;
1875 adapter->compare.num_samples = 10;
1876 timecompare_update(&adapter->compare, 0);
1877 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00001878 case e1000_82576:
1879 /*
1880 * Initialize hardware timer: we keep it running just in case
1881 * that some program needs it later on.
1882 */
1883 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1884 adapter->cycles.read = igb_read_clock;
1885 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1886 adapter->cycles.mult = 1;
1887 /**
1888 * Scale the NIC clock cycle by a large factor so that
1889 * relatively small clock corrections can be added or
1890 * substracted at each clock tick. The drawbacks of a large
1891 * factor are a) that the clock register overflows more quickly
1892 * (not such a big deal) and b) that the increment per tick has
1893 * to fit into 24 bits. As a result we need to use a shift of
1894 * 19 so we can fit a value of 16 into the TIMINCA register.
1895 */
1896 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1897 wr32(E1000_TIMINCA,
1898 (1 << E1000_TIMINCA_16NS_SHIFT) |
1899 (16 << IGB_82576_TSYNC_SHIFT));
1900
1901 /* Set registers so that rollover occurs soon to test this. */
1902 wr32(E1000_SYSTIML, 0x00000000);
1903 wr32(E1000_SYSTIMH, 0xFF800000);
1904 wrfl();
1905
1906 timecounter_init(&adapter->clock,
1907 &adapter->cycles,
1908 ktime_to_ns(ktime_get_real()));
1909 /*
1910 * Synchronize our NIC clock against system wall clock. NIC
1911 * time stamp reading requires ~3us per sample, each sample
1912 * was pretty stable even under load => only require 10
1913 * samples for each offset comparison.
1914 */
1915 memset(&adapter->compare, 0, sizeof(adapter->compare));
1916 adapter->compare.source = &adapter->clock;
1917 adapter->compare.target = ktime_get_real;
1918 adapter->compare.num_samples = 10;
1919 timecompare_update(&adapter->compare, 0);
1920 break;
1921 case e1000_82575:
1922 /* 82575 does not support timesync */
1923 default:
1924 break;
1925 }
1926
1927}
1928
Alexander Duycka6b623e2009-10-27 23:47:53 +00001929/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001930 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1931 * @adapter: board private structure to initialize
1932 *
1933 * igb_sw_init initializes the Adapter private data structure.
1934 * Fields are initialized based on PCI device information and
1935 * OS network device settings (MTU size).
1936 **/
1937static int __devinit igb_sw_init(struct igb_adapter *adapter)
1938{
1939 struct e1000_hw *hw = &adapter->hw;
1940 struct net_device *netdev = adapter->netdev;
1941 struct pci_dev *pdev = adapter->pdev;
1942
1943 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1944
Alexander Duyck68fd9912008-11-20 00:48:10 -08001945 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1946 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001947 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1948 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1949
Auke Kok9d5c8242008-01-24 02:22:38 -08001950 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1951 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1952
Alexander Duycka6b623e2009-10-27 23:47:53 +00001953#ifdef CONFIG_PCI_IOV
1954 if (hw->mac.type == e1000_82576)
1955 adapter->vfs_allocated_count = max_vfs;
1956
1957#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00001958 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1959
1960 /*
1961 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1962 * then we should combine the queues into a queue pair in order to
1963 * conserve interrupts due to limited supply
1964 */
1965 if ((adapter->rss_queues > 4) ||
1966 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1967 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1968
Alexander Duycka6b623e2009-10-27 23:47:53 +00001969 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00001970 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001971 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1972 return -ENOMEM;
1973 }
1974
Alexander Duyck115f4592009-11-12 18:37:00 +00001975 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00001976 igb_probe_vfs(adapter);
1977
Auke Kok9d5c8242008-01-24 02:22:38 -08001978 /* Explicitly disable IRQ since the NIC can be in any state. */
1979 igb_irq_disable(adapter);
1980
1981 set_bit(__IGB_DOWN, &adapter->state);
1982 return 0;
1983}
1984
1985/**
1986 * igb_open - Called when a network interface is made active
1987 * @netdev: network interface device structure
1988 *
1989 * Returns 0 on success, negative value on failure
1990 *
1991 * The open entry point is called when a network interface is made
1992 * active by the system (IFF_UP). At this point all resources needed
1993 * for transmit and receive operations are allocated, the interrupt
1994 * handler is registered with the OS, the watchdog timer is started,
1995 * and the stack is notified that the interface is ready.
1996 **/
1997static int igb_open(struct net_device *netdev)
1998{
1999 struct igb_adapter *adapter = netdev_priv(netdev);
2000 struct e1000_hw *hw = &adapter->hw;
2001 int err;
2002 int i;
2003
2004 /* disallow open during test */
2005 if (test_bit(__IGB_TESTING, &adapter->state))
2006 return -EBUSY;
2007
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002008 netif_carrier_off(netdev);
2009
Auke Kok9d5c8242008-01-24 02:22:38 -08002010 /* allocate transmit descriptors */
2011 err = igb_setup_all_tx_resources(adapter);
2012 if (err)
2013 goto err_setup_tx;
2014
2015 /* allocate receive descriptors */
2016 err = igb_setup_all_rx_resources(adapter);
2017 if (err)
2018 goto err_setup_rx;
2019
Nick Nunley88a268c2010-02-17 01:01:59 +00002020 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002021
Auke Kok9d5c8242008-01-24 02:22:38 -08002022 /* before we allocate an interrupt, we must be ready to handle it.
2023 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2024 * as soon as we call pci_request_irq, so we have to setup our
2025 * clean_rx handler before we do so. */
2026 igb_configure(adapter);
2027
2028 err = igb_request_irq(adapter);
2029 if (err)
2030 goto err_req_irq;
2031
2032 /* From here on the code is the same as igb_up() */
2033 clear_bit(__IGB_DOWN, &adapter->state);
2034
Alexander Duyck047e0032009-10-27 15:49:27 +00002035 for (i = 0; i < adapter->num_q_vectors; i++) {
2036 struct igb_q_vector *q_vector = adapter->q_vector[i];
2037 napi_enable(&q_vector->napi);
2038 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002039
2040 /* Clear any pending interrupts. */
2041 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002042
2043 igb_irq_enable(adapter);
2044
Alexander Duyckd4960302009-10-27 15:53:45 +00002045 /* notify VFs that reset has been completed */
2046 if (adapter->vfs_allocated_count) {
2047 u32 reg_data = rd32(E1000_CTRL_EXT);
2048 reg_data |= E1000_CTRL_EXT_PFRSTD;
2049 wr32(E1000_CTRL_EXT, reg_data);
2050 }
2051
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002052 netif_tx_start_all_queues(netdev);
2053
Alexander Duyck25568a52009-10-27 23:49:59 +00002054 /* start the watchdog. */
2055 hw->mac.get_link_status = 1;
2056 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002057
2058 return 0;
2059
2060err_req_irq:
2061 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002062 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002063 igb_free_all_rx_resources(adapter);
2064err_setup_rx:
2065 igb_free_all_tx_resources(adapter);
2066err_setup_tx:
2067 igb_reset(adapter);
2068
2069 return err;
2070}
2071
2072/**
2073 * igb_close - Disables a network interface
2074 * @netdev: network interface device structure
2075 *
2076 * Returns 0, this is not allowed to fail
2077 *
2078 * The close entry point is called when an interface is de-activated
2079 * by the OS. The hardware is still under the driver's control, but
2080 * needs to be disabled. A global MAC reset is issued to stop the
2081 * hardware, and all transmit and receive resources are freed.
2082 **/
2083static int igb_close(struct net_device *netdev)
2084{
2085 struct igb_adapter *adapter = netdev_priv(netdev);
2086
2087 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2088 igb_down(adapter);
2089
2090 igb_free_irq(adapter);
2091
2092 igb_free_all_tx_resources(adapter);
2093 igb_free_all_rx_resources(adapter);
2094
Auke Kok9d5c8242008-01-24 02:22:38 -08002095 return 0;
2096}
2097
2098/**
2099 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002100 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2101 *
2102 * Return 0 on success, negative on failure
2103 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002104int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002105{
Alexander Duyck80785292009-10-27 15:51:47 +00002106 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002107 int size;
2108
2109 size = sizeof(struct igb_buffer) * tx_ring->count;
2110 tx_ring->buffer_info = vmalloc(size);
2111 if (!tx_ring->buffer_info)
2112 goto err;
2113 memset(tx_ring->buffer_info, 0, size);
2114
2115 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002116 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002117 tx_ring->size = ALIGN(tx_ring->size, 4096);
2118
Alexander Duyck439705e2009-10-27 23:49:20 +00002119 tx_ring->desc = pci_alloc_consistent(pdev,
2120 tx_ring->size,
Auke Kok9d5c8242008-01-24 02:22:38 -08002121 &tx_ring->dma);
2122
2123 if (!tx_ring->desc)
2124 goto err;
2125
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 tx_ring->next_to_use = 0;
2127 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002128 return 0;
2129
2130err:
2131 vfree(tx_ring->buffer_info);
Alexander Duyck047e0032009-10-27 15:49:27 +00002132 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002133 "Unable to allocate memory for the transmit descriptor ring\n");
2134 return -ENOMEM;
2135}
2136
2137/**
2138 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2139 * (Descriptors) for all queues
2140 * @adapter: board private structure
2141 *
2142 * Return 0 on success, negative on failure
2143 **/
2144static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2145{
Alexander Duyck439705e2009-10-27 23:49:20 +00002146 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002147 int i, err = 0;
2148
2149 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002150 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002151 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002152 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002153 "Allocation for Tx Queue %u failed\n", i);
2154 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002155 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002156 break;
2157 }
2158 }
2159
Alexander Duycka99955f2009-11-12 18:37:19 +00002160 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002161 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002162 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002163 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002164 return err;
2165}
2166
2167/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002168 * igb_setup_tctl - configure the transmit control registers
2169 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002170 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002171void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002172{
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 struct e1000_hw *hw = &adapter->hw;
2174 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002175
Alexander Duyck85b430b2009-10-27 15:50:29 +00002176 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2177 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002178
2179 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002180 tctl = rd32(E1000_TCTL);
2181 tctl &= ~E1000_TCTL_CT;
2182 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2183 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2184
2185 igb_config_collision_dist(hw);
2186
Auke Kok9d5c8242008-01-24 02:22:38 -08002187 /* Enable transmits */
2188 tctl |= E1000_TCTL_EN;
2189
2190 wr32(E1000_TCTL, tctl);
2191}
2192
2193/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002194 * igb_configure_tx_ring - Configure transmit ring after Reset
2195 * @adapter: board private structure
2196 * @ring: tx ring to configure
2197 *
2198 * Configure a transmit ring after a reset.
2199 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002200void igb_configure_tx_ring(struct igb_adapter *adapter,
2201 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002202{
2203 struct e1000_hw *hw = &adapter->hw;
2204 u32 txdctl;
2205 u64 tdba = ring->dma;
2206 int reg_idx = ring->reg_idx;
2207
2208 /* disable the queue */
2209 txdctl = rd32(E1000_TXDCTL(reg_idx));
2210 wr32(E1000_TXDCTL(reg_idx),
2211 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2212 wrfl();
2213 mdelay(10);
2214
2215 wr32(E1000_TDLEN(reg_idx),
2216 ring->count * sizeof(union e1000_adv_tx_desc));
2217 wr32(E1000_TDBAL(reg_idx),
2218 tdba & 0x00000000ffffffffULL);
2219 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2220
Alexander Duyckfce99e32009-10-27 15:51:27 +00002221 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2222 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2223 writel(0, ring->head);
2224 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002225
2226 txdctl |= IGB_TX_PTHRESH;
2227 txdctl |= IGB_TX_HTHRESH << 8;
2228 txdctl |= IGB_TX_WTHRESH << 16;
2229
2230 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2231 wr32(E1000_TXDCTL(reg_idx), txdctl);
2232}
2233
2234/**
2235 * igb_configure_tx - Configure transmit Unit after Reset
2236 * @adapter: board private structure
2237 *
2238 * Configure the Tx unit of the MAC after a reset.
2239 **/
2240static void igb_configure_tx(struct igb_adapter *adapter)
2241{
2242 int i;
2243
2244 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002245 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002246}
2247
2248/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002249 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002250 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2251 *
2252 * Returns 0 on success, negative on failure
2253 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002254int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002255{
Alexander Duyck80785292009-10-27 15:51:47 +00002256 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002257 int size, desc_len;
2258
2259 size = sizeof(struct igb_buffer) * rx_ring->count;
2260 rx_ring->buffer_info = vmalloc(size);
2261 if (!rx_ring->buffer_info)
2262 goto err;
2263 memset(rx_ring->buffer_info, 0, size);
2264
2265 desc_len = sizeof(union e1000_adv_rx_desc);
2266
2267 /* Round up to nearest 4K */
2268 rx_ring->size = rx_ring->count * desc_len;
2269 rx_ring->size = ALIGN(rx_ring->size, 4096);
2270
2271 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2272 &rx_ring->dma);
2273
2274 if (!rx_ring->desc)
2275 goto err;
2276
2277 rx_ring->next_to_clean = 0;
2278 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002279
Auke Kok9d5c8242008-01-24 02:22:38 -08002280 return 0;
2281
2282err:
2283 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002284 rx_ring->buffer_info = NULL;
Alexander Duyck80785292009-10-27 15:51:47 +00002285 dev_err(&pdev->dev, "Unable to allocate memory for "
Auke Kok9d5c8242008-01-24 02:22:38 -08002286 "the receive descriptor ring\n");
2287 return -ENOMEM;
2288}
2289
2290/**
2291 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2292 * (Descriptors) for all queues
2293 * @adapter: board private structure
2294 *
2295 * Return 0 on success, negative on failure
2296 **/
2297static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2298{
Alexander Duyck439705e2009-10-27 23:49:20 +00002299 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002300 int i, err = 0;
2301
2302 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002303 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002304 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002305 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002306 "Allocation for Rx Queue %u failed\n", i);
2307 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002308 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002309 break;
2310 }
2311 }
2312
2313 return err;
2314}
2315
2316/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002317 * igb_setup_mrqc - configure the multiple receive queue control registers
2318 * @adapter: Board private structure
2319 **/
2320static void igb_setup_mrqc(struct igb_adapter *adapter)
2321{
2322 struct e1000_hw *hw = &adapter->hw;
2323 u32 mrqc, rxcsum;
2324 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2325 union e1000_reta {
2326 u32 dword;
2327 u8 bytes[4];
2328 } reta;
2329 static const u8 rsshash[40] = {
2330 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2331 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2332 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2333 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2334
2335 /* Fill out hash function seeds */
2336 for (j = 0; j < 10; j++) {
2337 u32 rsskey = rsshash[(j * 4)];
2338 rsskey |= rsshash[(j * 4) + 1] << 8;
2339 rsskey |= rsshash[(j * 4) + 2] << 16;
2340 rsskey |= rsshash[(j * 4) + 3] << 24;
2341 array_wr32(E1000_RSSRK(0), j, rsskey);
2342 }
2343
Alexander Duycka99955f2009-11-12 18:37:19 +00002344 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002345
2346 if (adapter->vfs_allocated_count) {
2347 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2348 switch (hw->mac.type) {
Alexander Duyck55cac242009-11-19 12:42:21 +00002349 case e1000_82580:
2350 num_rx_queues = 1;
2351 shift = 0;
2352 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002353 case e1000_82576:
2354 shift = 3;
2355 num_rx_queues = 2;
2356 break;
2357 case e1000_82575:
2358 shift = 2;
2359 shift2 = 6;
2360 default:
2361 break;
2362 }
2363 } else {
2364 if (hw->mac.type == e1000_82575)
2365 shift = 6;
2366 }
2367
2368 for (j = 0; j < (32 * 4); j++) {
2369 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2370 if (shift2)
2371 reta.bytes[j & 3] |= num_rx_queues << shift2;
2372 if ((j & 3) == 3)
2373 wr32(E1000_RETA(j >> 2), reta.dword);
2374 }
2375
2376 /*
2377 * Disable raw packet checksumming so that RSS hash is placed in
2378 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2379 * offloads as they are enabled by default
2380 */
2381 rxcsum = rd32(E1000_RXCSUM);
2382 rxcsum |= E1000_RXCSUM_PCSD;
2383
2384 if (adapter->hw.mac.type >= e1000_82576)
2385 /* Enable Receive Checksum Offload for SCTP */
2386 rxcsum |= E1000_RXCSUM_CRCOFL;
2387
2388 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2389 wr32(E1000_RXCSUM, rxcsum);
2390
2391 /* If VMDq is enabled then we set the appropriate mode for that, else
2392 * we default to RSS so that an RSS hash is calculated per packet even
2393 * if we are only using one queue */
2394 if (adapter->vfs_allocated_count) {
2395 if (hw->mac.type > e1000_82575) {
2396 /* Set the default pool for the PF's first queue */
2397 u32 vtctl = rd32(E1000_VT_CTL);
2398 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2399 E1000_VT_CTL_DISABLE_DEF_POOL);
2400 vtctl |= adapter->vfs_allocated_count <<
2401 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2402 wr32(E1000_VT_CTL, vtctl);
2403 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002404 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002405 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2406 else
2407 mrqc = E1000_MRQC_ENABLE_VMDQ;
2408 } else {
2409 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2410 }
2411 igb_vmm_control(adapter);
2412
2413 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2414 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2415 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2416 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2417 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2418 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2419 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2420 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2421
2422 wr32(E1000_MRQC, mrqc);
2423}
2424
2425/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002426 * igb_setup_rctl - configure the receive control registers
2427 * @adapter: Board private structure
2428 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002429void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002430{
2431 struct e1000_hw *hw = &adapter->hw;
2432 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002433
2434 rctl = rd32(E1000_RCTL);
2435
2436 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002437 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002438
Alexander Duyck69d728b2008-11-25 01:04:03 -08002439 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002440 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002441
Auke Kok87cb7e82008-07-08 15:08:29 -07002442 /*
2443 * enable stripping of CRC. It's unlikely this will break BMC
2444 * redirection as it did with e1000. Newer features require
2445 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002446 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002447 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002448
Alexander Duyck559e9c42009-10-27 23:52:50 +00002449 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002450 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002451
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002452 /* enable LPE to prevent packets larger than max_frame_size */
2453 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002454
Alexander Duyck952f72a2009-10-27 15:51:07 +00002455 /* disable queue 0 to prevent tail write w/o re-config */
2456 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002457
Alexander Duycke1739522009-02-19 20:39:44 -08002458 /* Attention!!! For SR-IOV PF driver operations you must enable
2459 * queue drop for all VF and PF queues to prevent head of line blocking
2460 * if an un-trusted VF does not provide descriptors to hardware.
2461 */
2462 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002463 /* set all queue drop enable bits */
2464 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002465 }
2466
Auke Kok9d5c8242008-01-24 02:22:38 -08002467 wr32(E1000_RCTL, rctl);
2468}
2469
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002470static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2471 int vfn)
2472{
2473 struct e1000_hw *hw = &adapter->hw;
2474 u32 vmolr;
2475
2476 /* if it isn't the PF check to see if VFs are enabled and
2477 * increase the size to support vlan tags */
2478 if (vfn < adapter->vfs_allocated_count &&
2479 adapter->vf_data[vfn].vlans_enabled)
2480 size += VLAN_TAG_SIZE;
2481
2482 vmolr = rd32(E1000_VMOLR(vfn));
2483 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2484 vmolr |= size | E1000_VMOLR_LPE;
2485 wr32(E1000_VMOLR(vfn), vmolr);
2486
2487 return 0;
2488}
2489
Auke Kok9d5c8242008-01-24 02:22:38 -08002490/**
Alexander Duycke1739522009-02-19 20:39:44 -08002491 * igb_rlpml_set - set maximum receive packet size
2492 * @adapter: board private structure
2493 *
2494 * Configure maximum receivable packet size.
2495 **/
2496static void igb_rlpml_set(struct igb_adapter *adapter)
2497{
2498 u32 max_frame_size = adapter->max_frame_size;
2499 struct e1000_hw *hw = &adapter->hw;
2500 u16 pf_id = adapter->vfs_allocated_count;
2501
2502 if (adapter->vlgrp)
2503 max_frame_size += VLAN_TAG_SIZE;
2504
2505 /* if vfs are enabled we set RLPML to the largest possible request
2506 * size and set the VMOLR RLPML to the size we need */
2507 if (pf_id) {
2508 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002509 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002510 }
2511
2512 wr32(E1000_RLPML, max_frame_size);
2513}
2514
Williams, Mitch A8151d292010-02-10 01:44:24 +00002515static inline void igb_set_vmolr(struct igb_adapter *adapter,
2516 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002517{
2518 struct e1000_hw *hw = &adapter->hw;
2519 u32 vmolr;
2520
2521 /*
2522 * This register exists only on 82576 and newer so if we are older then
2523 * we should exit and do nothing
2524 */
2525 if (hw->mac.type < e1000_82576)
2526 return;
2527
2528 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002529 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2530 if (aupe)
2531 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2532 else
2533 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002534
2535 /* clear all bits that might not be set */
2536 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2537
Alexander Duycka99955f2009-11-12 18:37:19 +00002538 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002539 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2540 /*
2541 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2542 * multicast packets
2543 */
2544 if (vfn <= adapter->vfs_allocated_count)
2545 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2546
2547 wr32(E1000_VMOLR(vfn), vmolr);
2548}
2549
Alexander Duycke1739522009-02-19 20:39:44 -08002550/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002551 * igb_configure_rx_ring - Configure a receive ring after Reset
2552 * @adapter: board private structure
2553 * @ring: receive ring to be configured
2554 *
2555 * Configure the Rx unit of the MAC after a reset.
2556 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002557void igb_configure_rx_ring(struct igb_adapter *adapter,
2558 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002559{
2560 struct e1000_hw *hw = &adapter->hw;
2561 u64 rdba = ring->dma;
2562 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002563 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002564
2565 /* disable the queue */
2566 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2567 wr32(E1000_RXDCTL(reg_idx),
2568 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2569
2570 /* Set DMA base address registers */
2571 wr32(E1000_RDBAL(reg_idx),
2572 rdba & 0x00000000ffffffffULL);
2573 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2574 wr32(E1000_RDLEN(reg_idx),
2575 ring->count * sizeof(union e1000_adv_rx_desc));
2576
2577 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002578 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2579 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2580 writel(0, ring->head);
2581 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002582
Alexander Duyck952f72a2009-10-27 15:51:07 +00002583 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002584 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2585 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002586 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2587#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2588 srrctl |= IGB_RXBUFFER_16384 >>
2589 E1000_SRRCTL_BSIZEPKT_SHIFT;
2590#else
2591 srrctl |= (PAGE_SIZE / 2) >>
2592 E1000_SRRCTL_BSIZEPKT_SHIFT;
2593#endif
2594 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2595 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002596 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002597 E1000_SRRCTL_BSIZEPKT_SHIFT;
2598 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2599 }
2600
2601 wr32(E1000_SRRCTL(reg_idx), srrctl);
2602
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002603 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002604 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002605
Alexander Duyck85b430b2009-10-27 15:50:29 +00002606 /* enable receive descriptor fetching */
2607 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2608 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2609 rxdctl &= 0xFFF00000;
2610 rxdctl |= IGB_RX_PTHRESH;
2611 rxdctl |= IGB_RX_HTHRESH << 8;
2612 rxdctl |= IGB_RX_WTHRESH << 16;
2613 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2614}
2615
2616/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002617 * igb_configure_rx - Configure receive Unit after Reset
2618 * @adapter: board private structure
2619 *
2620 * Configure the Rx unit of the MAC after a reset.
2621 **/
2622static void igb_configure_rx(struct igb_adapter *adapter)
2623{
Hannes Eder91075842009-02-18 19:36:04 -08002624 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002625
Alexander Duyck68d480c2009-10-05 06:33:08 +00002626 /* set UTA to appropriate mode */
2627 igb_set_uta(adapter);
2628
Alexander Duyck26ad9172009-10-05 06:32:49 +00002629 /* set the correct pool for the PF default MAC address in entry 0 */
2630 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2631 adapter->vfs_allocated_count);
2632
Alexander Duyck06cf2662009-10-27 15:53:25 +00002633 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2634 * the Base and Length of the Rx Descriptor Ring */
2635 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002636 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002637}
2638
2639/**
2640 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002641 * @tx_ring: Tx descriptor ring for a specific queue
2642 *
2643 * Free all transmit software resources
2644 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002645void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002646{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002647 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002648
2649 vfree(tx_ring->buffer_info);
2650 tx_ring->buffer_info = NULL;
2651
Alexander Duyck439705e2009-10-27 23:49:20 +00002652 /* if not set, then don't free */
2653 if (!tx_ring->desc)
2654 return;
2655
Alexander Duyck80785292009-10-27 15:51:47 +00002656 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2657 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002658
2659 tx_ring->desc = NULL;
2660}
2661
2662/**
2663 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2664 * @adapter: board private structure
2665 *
2666 * Free all transmit software resources
2667 **/
2668static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2669{
2670 int i;
2671
2672 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002673 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002674}
2675
Alexander Duyckb1a436c2009-10-27 15:54:43 +00002676void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2677 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002678{
Alexander Duyck6366ad32009-12-02 16:47:18 +00002679 if (buffer_info->dma) {
2680 if (buffer_info->mapped_as_page)
2681 pci_unmap_page(tx_ring->pdev,
2682 buffer_info->dma,
2683 buffer_info->length,
2684 PCI_DMA_TODEVICE);
2685 else
2686 pci_unmap_single(tx_ring->pdev,
2687 buffer_info->dma,
2688 buffer_info->length,
2689 PCI_DMA_TODEVICE);
2690 buffer_info->dma = 0;
2691 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002692 if (buffer_info->skb) {
2693 dev_kfree_skb_any(buffer_info->skb);
2694 buffer_info->skb = NULL;
2695 }
2696 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00002697 buffer_info->length = 0;
2698 buffer_info->next_to_watch = 0;
2699 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002700}
2701
2702/**
2703 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08002704 * @tx_ring: ring to be cleaned
2705 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002706static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002707{
2708 struct igb_buffer *buffer_info;
2709 unsigned long size;
2710 unsigned int i;
2711
2712 if (!tx_ring->buffer_info)
2713 return;
2714 /* Free all the Tx ring sk_buffs */
2715
2716 for (i = 0; i < tx_ring->count; i++) {
2717 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00002718 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08002719 }
2720
2721 size = sizeof(struct igb_buffer) * tx_ring->count;
2722 memset(tx_ring->buffer_info, 0, size);
2723
2724 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08002725 memset(tx_ring->desc, 0, tx_ring->size);
2726
2727 tx_ring->next_to_use = 0;
2728 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002729}
2730
2731/**
2732 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2733 * @adapter: board private structure
2734 **/
2735static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2736{
2737 int i;
2738
2739 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002740 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002741}
2742
2743/**
2744 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08002745 * @rx_ring: ring to clean the resources from
2746 *
2747 * Free all receive software resources
2748 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08002749void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002750{
Mitch Williams3b644cf2008-06-27 10:59:48 -07002751 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08002752
2753 vfree(rx_ring->buffer_info);
2754 rx_ring->buffer_info = NULL;
2755
Alexander Duyck439705e2009-10-27 23:49:20 +00002756 /* if not set, then don't free */
2757 if (!rx_ring->desc)
2758 return;
2759
Alexander Duyck80785292009-10-27 15:51:47 +00002760 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2761 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08002762
2763 rx_ring->desc = NULL;
2764}
2765
2766/**
2767 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2768 * @adapter: board private structure
2769 *
2770 * Free all receive software resources
2771 **/
2772static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2773{
2774 int i;
2775
2776 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002777 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002778}
2779
2780/**
2781 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08002782 * @rx_ring: ring to free buffers from
2783 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07002784static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002785{
2786 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08002787 unsigned long size;
2788 unsigned int i;
2789
2790 if (!rx_ring->buffer_info)
2791 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00002792
Auke Kok9d5c8242008-01-24 02:22:38 -08002793 /* Free all the Rx ring sk_buffs */
2794 for (i = 0; i < rx_ring->count; i++) {
2795 buffer_info = &rx_ring->buffer_info[i];
2796 if (buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002797 pci_unmap_single(rx_ring->pdev,
2798 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00002799 rx_ring->rx_buffer_len,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002800 PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002801 buffer_info->dma = 0;
2802 }
2803
2804 if (buffer_info->skb) {
2805 dev_kfree_skb(buffer_info->skb);
2806 buffer_info->skb = NULL;
2807 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002808 if (buffer_info->page_dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00002809 pci_unmap_page(rx_ring->pdev,
2810 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002811 PAGE_SIZE / 2,
2812 PCI_DMA_FROMDEVICE);
2813 buffer_info->page_dma = 0;
2814 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002815 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002816 put_page(buffer_info->page);
2817 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07002818 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002819 }
2820 }
2821
Auke Kok9d5c8242008-01-24 02:22:38 -08002822 size = sizeof(struct igb_buffer) * rx_ring->count;
2823 memset(rx_ring->buffer_info, 0, size);
2824
2825 /* Zero out the descriptor ring */
2826 memset(rx_ring->desc, 0, rx_ring->size);
2827
2828 rx_ring->next_to_clean = 0;
2829 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002830}
2831
2832/**
2833 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2834 * @adapter: board private structure
2835 **/
2836static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2837{
2838 int i;
2839
2840 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002841 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002842}
2843
2844/**
2845 * igb_set_mac - Change the Ethernet Address of the NIC
2846 * @netdev: network interface device structure
2847 * @p: pointer to an address structure
2848 *
2849 * Returns 0 on success, negative on failure
2850 **/
2851static int igb_set_mac(struct net_device *netdev, void *p)
2852{
2853 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00002854 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002855 struct sockaddr *addr = p;
2856
2857 if (!is_valid_ether_addr(addr->sa_data))
2858 return -EADDRNOTAVAIL;
2859
2860 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00002861 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002862
Alexander Duyck26ad9172009-10-05 06:32:49 +00002863 /* set the correct pool for the new PF MAC address in entry 0 */
2864 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2865 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08002866
Auke Kok9d5c8242008-01-24 02:22:38 -08002867 return 0;
2868}
2869
2870/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00002871 * igb_write_mc_addr_list - write multicast addresses to MTA
2872 * @netdev: network interface device structure
2873 *
2874 * Writes multicast address list to the MTA hash table.
2875 * Returns: -ENOMEM on failure
2876 * 0 on no addresses written
2877 * X on writing X addresses to MTA
2878 **/
2879static int igb_write_mc_addr_list(struct net_device *netdev)
2880{
2881 struct igb_adapter *adapter = netdev_priv(netdev);
2882 struct e1000_hw *hw = &adapter->hw;
2883 struct dev_mc_list *mc_ptr = netdev->mc_list;
2884 u8 *mta_list;
2885 u32 vmolr = 0;
2886 int i;
2887
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002888 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002889 /* nothing to program, so clear mc list */
2890 igb_update_mc_addr_list(hw, NULL, 0);
2891 igb_restore_vf_multicasts(adapter);
2892 return 0;
2893 }
2894
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002895 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002896 if (!mta_list)
2897 return -ENOMEM;
2898
2899 /* set vmolr receive overflow multicast bit */
2900 vmolr |= E1000_VMOLR_ROMPE;
2901
2902 /* The shared function expects a packed array of only addresses. */
2903 mc_ptr = netdev->mc_list;
2904
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002905 for (i = 0; i < netdev_mc_count(netdev); i++) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002906 if (!mc_ptr)
2907 break;
2908 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2909 mc_ptr = mc_ptr->next;
2910 }
2911 igb_update_mc_addr_list(hw, mta_list, i);
2912 kfree(mta_list);
2913
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002914 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002915}
2916
2917/**
2918 * igb_write_uc_addr_list - write unicast addresses to RAR table
2919 * @netdev: network interface device structure
2920 *
2921 * Writes unicast address list to the RAR table.
2922 * Returns: -ENOMEM on failure/insufficient address space
2923 * 0 on no addresses written
2924 * X on writing X addresses to the RAR table
2925 **/
2926static int igb_write_uc_addr_list(struct net_device *netdev)
2927{
2928 struct igb_adapter *adapter = netdev_priv(netdev);
2929 struct e1000_hw *hw = &adapter->hw;
2930 unsigned int vfn = adapter->vfs_allocated_count;
2931 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2932 int count = 0;
2933
2934 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002935 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00002936 return -ENOMEM;
2937
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002938 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002939 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002940
2941 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002942 if (!rar_entries)
2943 break;
2944 igb_rar_set_qsel(adapter, ha->addr,
2945 rar_entries--,
2946 vfn);
2947 count++;
2948 }
2949 }
2950 /* write the addresses in reverse order to avoid write combining */
2951 for (; rar_entries > 0 ; rar_entries--) {
2952 wr32(E1000_RAH(rar_entries), 0);
2953 wr32(E1000_RAL(rar_entries), 0);
2954 }
2955 wrfl();
2956
2957 return count;
2958}
2959
2960/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002961 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08002962 * @netdev: network interface device structure
2963 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002964 * The set_rx_mode entry point is called whenever the unicast or multicast
2965 * address lists or the network interface flags are updated. This routine is
2966 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08002967 * promiscuous mode, and all-multi behavior.
2968 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002969static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002970{
2971 struct igb_adapter *adapter = netdev_priv(netdev);
2972 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002973 unsigned int vfn = adapter->vfs_allocated_count;
2974 u32 rctl, vmolr = 0;
2975 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08002976
2977 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08002978 rctl = rd32(E1000_RCTL);
2979
Alexander Duyck68d480c2009-10-05 06:33:08 +00002980 /* clear the effected bits */
2981 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2982
Patrick McHardy746b9f02008-07-16 20:15:45 -07002983 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002984 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00002985 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002986 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00002987 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002988 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00002989 vmolr |= E1000_VMOLR_MPME;
2990 } else {
2991 /*
2992 * Write addresses to the MTA, if the attempt fails
2993 * then we should just turn on promiscous mode so
2994 * that we can at least receive multicast traffic
2995 */
2996 count = igb_write_mc_addr_list(netdev);
2997 if (count < 0) {
2998 rctl |= E1000_RCTL_MPE;
2999 vmolr |= E1000_VMOLR_MPME;
3000 } else if (count) {
3001 vmolr |= E1000_VMOLR_ROMPE;
3002 }
3003 }
3004 /*
3005 * Write addresses to available RAR registers, if there is not
3006 * sufficient space to store all the addresses then enable
3007 * unicast promiscous mode
3008 */
3009 count = igb_write_uc_addr_list(netdev);
3010 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003011 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003012 vmolr |= E1000_VMOLR_ROPE;
3013 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003014 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003015 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003016 wr32(E1000_RCTL, rctl);
3017
Alexander Duyck68d480c2009-10-05 06:33:08 +00003018 /*
3019 * In order to support SR-IOV and eventually VMDq it is necessary to set
3020 * the VMOLR to enable the appropriate modes. Without this workaround
3021 * we will have issues with VLAN tag stripping not being done for frames
3022 * that are only arriving because we are the default pool
3023 */
3024 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003025 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003026
Alexander Duyck68d480c2009-10-05 06:33:08 +00003027 vmolr |= rd32(E1000_VMOLR(vfn)) &
3028 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3029 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003030 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003031}
3032
3033/* Need to wait a few seconds after link up to get diagnostic information from
3034 * the phy */
3035static void igb_update_phy_info(unsigned long data)
3036{
3037 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003038 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003039}
3040
3041/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003042 * igb_has_link - check shared code for link and determine up/down
3043 * @adapter: pointer to driver private info
3044 **/
Nick Nunley31455352010-02-17 01:01:21 +00003045bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003046{
3047 struct e1000_hw *hw = &adapter->hw;
3048 bool link_active = false;
3049 s32 ret_val = 0;
3050
3051 /* get_link_status is set on LSC (link status) interrupt or
3052 * rx sequence error interrupt. get_link_status will stay
3053 * false until the e1000_check_for_link establishes link
3054 * for copper adapters ONLY
3055 */
3056 switch (hw->phy.media_type) {
3057 case e1000_media_type_copper:
3058 if (hw->mac.get_link_status) {
3059 ret_val = hw->mac.ops.check_for_link(hw);
3060 link_active = !hw->mac.get_link_status;
3061 } else {
3062 link_active = true;
3063 }
3064 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003065 case e1000_media_type_internal_serdes:
3066 ret_val = hw->mac.ops.check_for_link(hw);
3067 link_active = hw->mac.serdes_has_link;
3068 break;
3069 default:
3070 case e1000_media_type_unknown:
3071 break;
3072 }
3073
3074 return link_active;
3075}
3076
3077/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003078 * igb_watchdog - Timer Call-back
3079 * @data: pointer to adapter cast into an unsigned long
3080 **/
3081static void igb_watchdog(unsigned long data)
3082{
3083 struct igb_adapter *adapter = (struct igb_adapter *)data;
3084 /* Do the rest outside of interrupt context */
3085 schedule_work(&adapter->watchdog_task);
3086}
3087
3088static void igb_watchdog_task(struct work_struct *work)
3089{
3090 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003091 struct igb_adapter,
3092 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003093 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003094 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003095 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003096 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003097
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003098 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003099 if (link) {
3100 if (!netif_carrier_ok(netdev)) {
3101 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003102 hw->mac.ops.get_speed_and_duplex(hw,
3103 &adapter->link_speed,
3104 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003105
3106 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003107 /* Links status message must follow this format */
3108 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003109 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003110 netdev->name,
3111 adapter->link_speed,
3112 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003113 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003114 ((ctrl & E1000_CTRL_TFCE) &&
3115 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3116 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3117 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003118
3119 /* tweak tx_queue_len according to speed/duplex and
3120 * adjust the timeout factor */
3121 netdev->tx_queue_len = adapter->tx_queue_len;
3122 adapter->tx_timeout_factor = 1;
3123 switch (adapter->link_speed) {
3124 case SPEED_10:
3125 netdev->tx_queue_len = 10;
3126 adapter->tx_timeout_factor = 14;
3127 break;
3128 case SPEED_100:
3129 netdev->tx_queue_len = 100;
3130 /* maybe add some timeout factor ? */
3131 break;
3132 }
3133
3134 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003135
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003136 igb_ping_all_vfs(adapter);
3137
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003138 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003139 if (!test_bit(__IGB_DOWN, &adapter->state))
3140 mod_timer(&adapter->phy_info_timer,
3141 round_jiffies(jiffies + 2 * HZ));
3142 }
3143 } else {
3144 if (netif_carrier_ok(netdev)) {
3145 adapter->link_speed = 0;
3146 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003147 /* Links status message must follow this format */
3148 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3149 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003150 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003151
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003152 igb_ping_all_vfs(adapter);
3153
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003154 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003155 if (!test_bit(__IGB_DOWN, &adapter->state))
3156 mod_timer(&adapter->phy_info_timer,
3157 round_jiffies(jiffies + 2 * HZ));
3158 }
3159 }
3160
Auke Kok9d5c8242008-01-24 02:22:38 -08003161 igb_update_stats(adapter);
Alexander Duyck645a3ab2009-10-27 23:50:18 +00003162 igb_update_adaptive(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003163
Alexander Duyckdbabb062009-11-12 18:38:16 +00003164 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003165 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003166 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003167 /* We've lost link, so the controller stops DMA,
3168 * but we've got queued Tx work that's never going
3169 * to get done, so reset controller to flush Tx.
3170 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003171 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3172 adapter->tx_timeout_count++;
3173 schedule_work(&adapter->reset_task);
3174 /* return immediately since reset is imminent */
3175 return;
3176 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003177 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003178
Alexander Duyckdbabb062009-11-12 18:38:16 +00003179 /* Force detection of hung controller every watchdog period */
3180 tx_ring->detect_tx_hung = true;
3181 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003182
Auke Kok9d5c8242008-01-24 02:22:38 -08003183 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003184 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003185 u32 eics = 0;
3186 for (i = 0; i < adapter->num_q_vectors; i++) {
3187 struct igb_q_vector *q_vector = adapter->q_vector[i];
3188 eics |= q_vector->eims_value;
3189 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003190 wr32(E1000_EICS, eics);
3191 } else {
3192 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3193 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003194
Auke Kok9d5c8242008-01-24 02:22:38 -08003195 /* Reset the timer */
3196 if (!test_bit(__IGB_DOWN, &adapter->state))
3197 mod_timer(&adapter->watchdog_timer,
3198 round_jiffies(jiffies + 2 * HZ));
3199}
3200
3201enum latency_range {
3202 lowest_latency = 0,
3203 low_latency = 1,
3204 bulk_latency = 2,
3205 latency_invalid = 255
3206};
3207
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003208/**
3209 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3210 *
3211 * Stores a new ITR value based on strictly on packet size. This
3212 * algorithm is less sophisticated than that used in igb_update_itr,
3213 * due to the difficulty of synchronizing statistics across multiple
3214 * receive rings. The divisors and thresholds used by this fuction
3215 * were determined based on theoretical maximum wire speed and testing
3216 * data, in order to minimize response time while increasing bulk
3217 * throughput.
3218 * This functionality is controlled by the InterruptThrottleRate module
3219 * parameter (see igb_param.c)
3220 * NOTE: This function is called only when operating in a multiqueue
3221 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003222 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003223 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003224static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003225{
Alexander Duyck047e0032009-10-27 15:49:27 +00003226 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003227 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003228 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -08003229
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003230 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3231 * ints/sec - ITR timer value of 120 ticks.
3232 */
3233 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003234 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003235 goto set_itr_val;
3236 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003237
3238 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3239 struct igb_ring *ring = q_vector->rx_ring;
3240 avg_wire_size = ring->total_bytes / ring->total_packets;
3241 }
3242
3243 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3244 struct igb_ring *ring = q_vector->tx_ring;
3245 avg_wire_size = max_t(u32, avg_wire_size,
3246 (ring->total_bytes /
3247 ring->total_packets));
3248 }
3249
3250 /* if avg_wire_size isn't set no work was done */
3251 if (!avg_wire_size)
3252 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003253
3254 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3255 avg_wire_size += 24;
3256
3257 /* Don't starve jumbo frames */
3258 avg_wire_size = min(avg_wire_size, 3000);
3259
3260 /* Give a little boost to mid-size frames */
3261 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3262 new_val = avg_wire_size / 3;
3263 else
3264 new_val = avg_wire_size / 2;
3265
3266set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003267 if (new_val != q_vector->itr_val) {
3268 q_vector->itr_val = new_val;
3269 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003270 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003271clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003272 if (q_vector->rx_ring) {
3273 q_vector->rx_ring->total_bytes = 0;
3274 q_vector->rx_ring->total_packets = 0;
3275 }
3276 if (q_vector->tx_ring) {
3277 q_vector->tx_ring->total_bytes = 0;
3278 q_vector->tx_ring->total_packets = 0;
3279 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003280}
3281
3282/**
3283 * igb_update_itr - update the dynamic ITR value based on statistics
3284 * Stores a new ITR value based on packets and byte
3285 * counts during the last interrupt. The advantage of per interrupt
3286 * computation is faster updates and more accurate ITR for the current
3287 * traffic pattern. Constants in this function were computed
3288 * based on theoretical maximum wire speed and thresholds were set based
3289 * on testing data as well as attempting to minimize response time
3290 * while increasing bulk throughput.
3291 * this functionality is controlled by the InterruptThrottleRate module
3292 * parameter (see igb_param.c)
3293 * NOTE: These calculations are only valid when operating in a single-
3294 * queue environment.
3295 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003296 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003297 * @packets: the number of packets during this measurement interval
3298 * @bytes: the number of bytes during this measurement interval
3299 **/
3300static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3301 int packets, int bytes)
3302{
3303 unsigned int retval = itr_setting;
3304
3305 if (packets == 0)
3306 goto update_itr_done;
3307
3308 switch (itr_setting) {
3309 case lowest_latency:
3310 /* handle TSO and jumbo frames */
3311 if (bytes/packets > 8000)
3312 retval = bulk_latency;
3313 else if ((packets < 5) && (bytes > 512))
3314 retval = low_latency;
3315 break;
3316 case low_latency: /* 50 usec aka 20000 ints/s */
3317 if (bytes > 10000) {
3318 /* this if handles the TSO accounting */
3319 if (bytes/packets > 8000) {
3320 retval = bulk_latency;
3321 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3322 retval = bulk_latency;
3323 } else if ((packets > 35)) {
3324 retval = lowest_latency;
3325 }
3326 } else if (bytes/packets > 2000) {
3327 retval = bulk_latency;
3328 } else if (packets <= 2 && bytes < 512) {
3329 retval = lowest_latency;
3330 }
3331 break;
3332 case bulk_latency: /* 250 usec aka 4000 ints/s */
3333 if (bytes > 25000) {
3334 if (packets > 35)
3335 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003336 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003337 retval = low_latency;
3338 }
3339 break;
3340 }
3341
3342update_itr_done:
3343 return retval;
3344}
3345
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003346static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003347{
Alexander Duyck047e0032009-10-27 15:49:27 +00003348 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003349 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003350 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003351
3352 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3353 if (adapter->link_speed != SPEED_1000) {
3354 current_itr = 0;
3355 new_itr = 4000;
3356 goto set_itr_now;
3357 }
3358
3359 adapter->rx_itr = igb_update_itr(adapter,
3360 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003361 q_vector->rx_ring->total_packets,
3362 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003363
Alexander Duyck047e0032009-10-27 15:49:27 +00003364 adapter->tx_itr = igb_update_itr(adapter,
3365 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003366 q_vector->tx_ring->total_packets,
3367 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003368 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003369
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003370 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003371 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003372 current_itr = low_latency;
3373
Auke Kok9d5c8242008-01-24 02:22:38 -08003374 switch (current_itr) {
3375 /* counts and packets in update_itr are dependent on these numbers */
3376 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003377 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003378 break;
3379 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003380 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003381 break;
3382 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003383 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003384 break;
3385 default:
3386 break;
3387 }
3388
3389set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003390 q_vector->rx_ring->total_bytes = 0;
3391 q_vector->rx_ring->total_packets = 0;
3392 q_vector->tx_ring->total_bytes = 0;
3393 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003394
Alexander Duyck047e0032009-10-27 15:49:27 +00003395 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003396 /* this attempts to bias the interrupt rate towards Bulk
3397 * by adding intermediate steps when interrupt rate is
3398 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003399 new_itr = new_itr > q_vector->itr_val ?
3400 max((new_itr * q_vector->itr_val) /
3401 (new_itr + (q_vector->itr_val >> 2)),
3402 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003403 new_itr;
3404 /* Don't write the value here; it resets the adapter's
3405 * internal timer, and causes us to delay far longer than
3406 * we should between interrupts. Instead, we write the ITR
3407 * value at the beginning of the next interrupt so the timing
3408 * ends up being correct.
3409 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003410 q_vector->itr_val = new_itr;
3411 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003412 }
3413
3414 return;
3415}
3416
Auke Kok9d5c8242008-01-24 02:22:38 -08003417#define IGB_TX_FLAGS_CSUM 0x00000001
3418#define IGB_TX_FLAGS_VLAN 0x00000002
3419#define IGB_TX_FLAGS_TSO 0x00000004
3420#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003421#define IGB_TX_FLAGS_TSTAMP 0x00000010
3422#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3423#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003424
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003425static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003426 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3427{
3428 struct e1000_adv_tx_context_desc *context_desc;
3429 unsigned int i;
3430 int err;
3431 struct igb_buffer *buffer_info;
3432 u32 info = 0, tu_cmd = 0;
3433 u32 mss_l4len_idx, l4len;
3434 *hdr_len = 0;
3435
3436 if (skb_header_cloned(skb)) {
3437 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3438 if (err)
3439 return err;
3440 }
3441
3442 l4len = tcp_hdrlen(skb);
3443 *hdr_len += l4len;
3444
3445 if (skb->protocol == htons(ETH_P_IP)) {
3446 struct iphdr *iph = ip_hdr(skb);
3447 iph->tot_len = 0;
3448 iph->check = 0;
3449 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3450 iph->daddr, 0,
3451 IPPROTO_TCP,
3452 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003453 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003454 ipv6_hdr(skb)->payload_len = 0;
3455 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3456 &ipv6_hdr(skb)->daddr,
3457 0, IPPROTO_TCP, 0);
3458 }
3459
3460 i = tx_ring->next_to_use;
3461
3462 buffer_info = &tx_ring->buffer_info[i];
3463 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3464 /* VLAN MACLEN IPLEN */
3465 if (tx_flags & IGB_TX_FLAGS_VLAN)
3466 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3467 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3468 *hdr_len += skb_network_offset(skb);
3469 info |= skb_network_header_len(skb);
3470 *hdr_len += skb_network_header_len(skb);
3471 context_desc->vlan_macip_lens = cpu_to_le32(info);
3472
3473 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3474 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3475
3476 if (skb->protocol == htons(ETH_P_IP))
3477 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3478 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3479
3480 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3481
3482 /* MSS L4LEN IDX */
3483 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3484 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3485
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003486 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003487 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3488 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003489
3490 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3491 context_desc->seqnum_seed = 0;
3492
3493 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003494 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003495 buffer_info->dma = 0;
3496 i++;
3497 if (i == tx_ring->count)
3498 i = 0;
3499
3500 tx_ring->next_to_use = i;
3501
3502 return true;
3503}
3504
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003505static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3506 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003507{
3508 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck80785292009-10-27 15:51:47 +00003509 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003510 struct igb_buffer *buffer_info;
3511 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003512 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003513
3514 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3515 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3516 i = tx_ring->next_to_use;
3517 buffer_info = &tx_ring->buffer_info[i];
3518 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3519
3520 if (tx_flags & IGB_TX_FLAGS_VLAN)
3521 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003522
Auke Kok9d5c8242008-01-24 02:22:38 -08003523 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3524 if (skb->ip_summed == CHECKSUM_PARTIAL)
3525 info |= skb_network_header_len(skb);
3526
3527 context_desc->vlan_macip_lens = cpu_to_le32(info);
3528
3529 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3530
3531 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003532 __be16 protocol;
3533
3534 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3535 const struct vlan_ethhdr *vhdr =
3536 (const struct vlan_ethhdr*)skb->data;
3537
3538 protocol = vhdr->h_vlan_encapsulated_proto;
3539 } else {
3540 protocol = skb->protocol;
3541 }
3542
3543 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003544 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003545 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003546 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3547 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003548 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3549 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003550 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003551 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003552 /* XXX what about other V6 headers?? */
3553 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3554 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003555 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3556 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003557 break;
3558 default:
3559 if (unlikely(net_ratelimit()))
Alexander Duyck80785292009-10-27 15:51:47 +00003560 dev_warn(&pdev->dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003561 "partial checksum but proto=%x!\n",
3562 skb->protocol);
3563 break;
3564 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003565 }
3566
3567 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3568 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003569 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003570 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003571 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003572
3573 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003574 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003575 buffer_info->dma = 0;
3576
3577 i++;
3578 if (i == tx_ring->count)
3579 i = 0;
3580 tx_ring->next_to_use = i;
3581
3582 return true;
3583 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003584 return false;
3585}
3586
3587#define IGB_MAX_TXD_PWR 16
3588#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3589
Alexander Duyck80785292009-10-27 15:51:47 +00003590static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003591 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003592{
3593 struct igb_buffer *buffer_info;
Alexander Duyck80785292009-10-27 15:51:47 +00003594 struct pci_dev *pdev = tx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003595 unsigned int len = skb_headlen(skb);
3596 unsigned int count = 0, i;
3597 unsigned int f;
3598
3599 i = tx_ring->next_to_use;
3600
3601 buffer_info = &tx_ring->buffer_info[i];
3602 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3603 buffer_info->length = len;
3604 /* set time_stamp *before* dma to help avoid a possible race */
3605 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003606 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003607 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3608 PCI_DMA_TODEVICE);
3609 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3610 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08003611
3612 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3613 struct skb_frag_struct *frag;
3614
Alexander Duyck85811452010-01-23 01:35:00 -08003615 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003616 i++;
3617 if (i == tx_ring->count)
3618 i = 0;
3619
Auke Kok9d5c8242008-01-24 02:22:38 -08003620 frag = &skb_shinfo(skb)->frags[f];
3621 len = frag->size;
3622
3623 buffer_info = &tx_ring->buffer_info[i];
3624 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3625 buffer_info->length = len;
3626 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003627 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003628 buffer_info->mapped_as_page = true;
3629 buffer_info->dma = pci_map_page(pdev,
3630 frag->page,
3631 frag->page_offset,
3632 len,
3633 PCI_DMA_TODEVICE);
3634 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3635 goto dma_error;
3636
Auke Kok9d5c8242008-01-24 02:22:38 -08003637 }
3638
Auke Kok9d5c8242008-01-24 02:22:38 -08003639 tx_ring->buffer_info[i].skb = skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003640 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003641
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003642 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003643
3644dma_error:
3645 dev_err(&pdev->dev, "TX DMA map failed\n");
3646
3647 /* clear timestamp and dma mappings for failed buffer_info mapping */
3648 buffer_info->dma = 0;
3649 buffer_info->time_stamp = 0;
3650 buffer_info->length = 0;
3651 buffer_info->next_to_watch = 0;
3652 buffer_info->mapped_as_page = false;
3653 count--;
3654
3655 /* clear timestamp and dma mappings for remaining portion of packet */
3656 while (count >= 0) {
3657 count--;
3658 i--;
3659 if (i < 0)
3660 i += tx_ring->count;
3661 buffer_info = &tx_ring->buffer_info[i];
3662 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3663 }
3664
3665 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003666}
3667
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003668static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003669 int tx_flags, int count, u32 paylen,
3670 u8 hdr_len)
3671{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003672 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003673 struct igb_buffer *buffer_info;
3674 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003675 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08003676
3677 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3678 E1000_ADVTXD_DCMD_DEXT);
3679
3680 if (tx_flags & IGB_TX_FLAGS_VLAN)
3681 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3682
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003683 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3684 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3685
Auke Kok9d5c8242008-01-24 02:22:38 -08003686 if (tx_flags & IGB_TX_FLAGS_TSO) {
3687 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3688
3689 /* insert tcp checksum */
3690 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3691
3692 /* insert ip checksum */
3693 if (tx_flags & IGB_TX_FLAGS_IPV4)
3694 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3695
3696 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3697 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3698 }
3699
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003700 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3701 (tx_flags & (IGB_TX_FLAGS_CSUM |
3702 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003703 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003704 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003705
3706 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3707
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003708 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08003709 buffer_info = &tx_ring->buffer_info[i];
3710 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3711 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3712 tx_desc->read.cmd_type_len =
3713 cpu_to_le32(cmd_type_len | buffer_info->length);
3714 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003715 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08003716 i++;
3717 if (i == tx_ring->count)
3718 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003719 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003720
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003721 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08003722 /* Force memory writes to complete before letting h/w
3723 * know there are new descriptors to fetch. (Only
3724 * applicable for weak-ordered memory model archs,
3725 * such as IA-64). */
3726 wmb();
3727
3728 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00003729 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08003730 /* we need this if more than one processor can write to our tail
3731 * at a time, it syncronizes IO on IA64/Altix systems */
3732 mmiowb();
3733}
3734
Alexander Duycke694e962009-10-27 15:53:06 +00003735static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003736{
Alexander Duycke694e962009-10-27 15:53:06 +00003737 struct net_device *netdev = tx_ring->netdev;
3738
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003739 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003740
Auke Kok9d5c8242008-01-24 02:22:38 -08003741 /* Herbert's original patch had:
3742 * smp_mb__after_netif_stop_queue();
3743 * but since that doesn't exist yet, just open code it. */
3744 smp_mb();
3745
3746 /* We need to check again in a case another CPU has just
3747 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00003748 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003749 return -EBUSY;
3750
3751 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003752 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00003753 tx_ring->tx_stats.restart_queue++;
Auke Kok9d5c8242008-01-24 02:22:38 -08003754 return 0;
3755}
3756
Alexander Duycke694e962009-10-27 15:53:06 +00003757static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003758{
Alexander Duyckc493ea42009-03-20 00:16:50 +00003759 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08003760 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00003761 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003762}
3763
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003764netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3765 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003766{
Alexander Duycke694e962009-10-27 15:53:06 +00003767 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003768 unsigned int first;
Auke Kok9d5c8242008-01-24 02:22:38 -08003769 unsigned int tx_flags = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003770 u8 hdr_len = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003771 int tso = 0, count;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003772 union skb_shared_tx *shtx = skb_tx(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003773
Auke Kok9d5c8242008-01-24 02:22:38 -08003774 /* need: 1 descriptor per page,
3775 * + 2 desc gap to keep tail from touching head,
3776 * + 1 desc for skb->data,
3777 * + 1 desc for context descriptor,
3778 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00003779 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003780 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08003781 return NETDEV_TX_BUSY;
3782 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003783
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003784 if (unlikely(shtx->hardware)) {
3785 shtx->in_progress = 1;
3786 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00003787 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003788
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003789 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003790 tx_flags |= IGB_TX_FLAGS_VLAN;
3791 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3792 }
3793
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003794 if (skb->protocol == htons(ETH_P_IP))
3795 tx_flags |= IGB_TX_FLAGS_IPV4;
3796
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003797 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003798 if (skb_is_gso(skb)) {
3799 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003800
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003801 if (tso < 0) {
3802 dev_kfree_skb_any(skb);
3803 return NETDEV_TX_OK;
3804 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003805 }
3806
3807 if (tso)
3808 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003809 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00003810 (skb->ip_summed == CHECKSUM_PARTIAL))
3811 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003812
Alexander Duyck65689fe2009-03-20 00:17:43 +00003813 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003814 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00003815 * has occured and we need to rewind the descriptor queue
3816 */
Alexander Duyck80785292009-10-27 15:51:47 +00003817 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003818 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00003819 dev_kfree_skb_any(skb);
3820 tx_ring->buffer_info[first].time_stamp = 0;
3821 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003822 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00003823 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003824
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003825 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3826
3827 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00003828 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003829
Auke Kok9d5c8242008-01-24 02:22:38 -08003830 return NETDEV_TX_OK;
3831}
3832
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003833static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3834 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003835{
3836 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003837 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003838 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003839
3840 if (test_bit(__IGB_DOWN, &adapter->state)) {
3841 dev_kfree_skb_any(skb);
3842 return NETDEV_TX_OK;
3843 }
3844
3845 if (skb->len <= 0) {
3846 dev_kfree_skb_any(skb);
3847 return NETDEV_TX_OK;
3848 }
3849
Alexander Duyck1bfaf072009-02-19 20:39:23 -08003850 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07003851 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08003852
3853 /* This goes back to the question of how to logically map a tx queue
3854 * to a flow. Right now, performance is impacted slightly negatively
3855 * if using multiple tx queues. If the stack breaks away from a
3856 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00003857 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003858}
3859
3860/**
3861 * igb_tx_timeout - Respond to a Tx Hang
3862 * @netdev: network interface device structure
3863 **/
3864static void igb_tx_timeout(struct net_device *netdev)
3865{
3866 struct igb_adapter *adapter = netdev_priv(netdev);
3867 struct e1000_hw *hw = &adapter->hw;
3868
3869 /* Do the reset outside of interrupt context */
3870 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003871
Alexander Duyck55cac242009-11-19 12:42:21 +00003872 if (hw->mac.type == e1000_82580)
3873 hw->dev_spec._82575.global_device_reset = true;
3874
Auke Kok9d5c8242008-01-24 02:22:38 -08003875 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00003876 wr32(E1000_EICS,
3877 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08003878}
3879
3880static void igb_reset_task(struct work_struct *work)
3881{
3882 struct igb_adapter *adapter;
3883 adapter = container_of(work, struct igb_adapter, reset_task);
3884
3885 igb_reinit_locked(adapter);
3886}
3887
3888/**
3889 * igb_get_stats - Get System Network Statistics
3890 * @netdev: network interface device structure
3891 *
3892 * Returns the address of the device statistics structure.
3893 * The statistics are actually updated from the timer callback.
3894 **/
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003895static struct net_device_stats *igb_get_stats(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003896{
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 /* only return the current stats */
Ajit Khaparde8d24e932009-10-07 02:42:56 +00003898 return &netdev->stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08003899}
3900
3901/**
3902 * igb_change_mtu - Change the Maximum Transfer Unit
3903 * @netdev: network interface device structure
3904 * @new_mtu: new value for maximum frame size
3905 *
3906 * Returns 0 on success, negative on failure
3907 **/
3908static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3909{
3910 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00003911 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00003913 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003914
Alexander Duyckc809d222009-10-27 23:52:13 +00003915 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003916 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003917 return -EINVAL;
3918 }
3919
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00003921 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 return -EINVAL;
3923 }
3924
3925 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3926 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003927
Auke Kok9d5c8242008-01-24 02:22:38 -08003928 /* igb_down has a dependency on max_frame_size */
3929 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00003930
Auke Kok9d5c8242008-01-24 02:22:38 -08003931 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3932 * means we reserve 2 more, this pushes us to allocate from the next
3933 * larger slab size.
3934 * i.e. RXBUFFER_2048 --> size-4096 slab
3935 */
3936
Alexander Duyck7d95b712009-10-27 15:50:08 +00003937 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00003938 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003939 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00003940 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003941 else
Alexander Duyck4c844852009-10-27 15:52:07 +00003942 rx_buffer_len = IGB_RXBUFFER_128;
3943
3944 if (netif_running(netdev))
3945 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003946
Alexander Duyck090b1792009-10-27 23:51:55 +00003947 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08003948 netdev->mtu, new_mtu);
3949 netdev->mtu = new_mtu;
3950
Alexander Duyck4c844852009-10-27 15:52:07 +00003951 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003952 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00003953
Auke Kok9d5c8242008-01-24 02:22:38 -08003954 if (netif_running(netdev))
3955 igb_up(adapter);
3956 else
3957 igb_reset(adapter);
3958
3959 clear_bit(__IGB_RESETTING, &adapter->state);
3960
3961 return 0;
3962}
3963
3964/**
3965 * igb_update_stats - Update the board statistics counters
3966 * @adapter: board private structure
3967 **/
3968
3969void igb_update_stats(struct igb_adapter *adapter)
3970{
Alexander Duyck128e45e2009-11-12 18:37:38 +00003971 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003972 struct e1000_hw *hw = &adapter->hw;
3973 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003974 u32 rnbc;
Auke Kok9d5c8242008-01-24 02:22:38 -08003975 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003976 int i;
3977 u64 bytes, packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003978
3979#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3980
3981 /*
3982 * Prevent stats update while adapter is being reset, or if the pci
3983 * connection is down.
3984 */
3985 if (adapter->link_speed == 0)
3986 return;
3987 if (pci_channel_offline(pdev))
3988 return;
3989
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003990 bytes = 0;
3991 packets = 0;
3992 for (i = 0; i < adapter->num_rx_queues; i++) {
3993 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00003994 struct igb_ring *ring = adapter->rx_ring[i];
3995 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00003996 net_stats->rx_fifo_errors += rqdpc_tmp;
Alexander Duyck3025a442010-02-17 01:02:39 +00003997 bytes += ring->rx_stats.bytes;
3998 packets += ring->rx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00003999 }
4000
Alexander Duyck128e45e2009-11-12 18:37:38 +00004001 net_stats->rx_bytes = bytes;
4002 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004003
4004 bytes = 0;
4005 packets = 0;
4006 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004007 struct igb_ring *ring = adapter->tx_ring[i];
4008 bytes += ring->tx_stats.bytes;
4009 packets += ring->tx_stats.packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004010 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004011 net_stats->tx_bytes = bytes;
4012 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004013
4014 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004015 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4016 adapter->stats.gprc += rd32(E1000_GPRC);
4017 adapter->stats.gorc += rd32(E1000_GORCL);
4018 rd32(E1000_GORCH); /* clear GORCL */
4019 adapter->stats.bprc += rd32(E1000_BPRC);
4020 adapter->stats.mprc += rd32(E1000_MPRC);
4021 adapter->stats.roc += rd32(E1000_ROC);
4022
4023 adapter->stats.prc64 += rd32(E1000_PRC64);
4024 adapter->stats.prc127 += rd32(E1000_PRC127);
4025 adapter->stats.prc255 += rd32(E1000_PRC255);
4026 adapter->stats.prc511 += rd32(E1000_PRC511);
4027 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4028 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4029 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4030 adapter->stats.sec += rd32(E1000_SEC);
4031
4032 adapter->stats.mpc += rd32(E1000_MPC);
4033 adapter->stats.scc += rd32(E1000_SCC);
4034 adapter->stats.ecol += rd32(E1000_ECOL);
4035 adapter->stats.mcc += rd32(E1000_MCC);
4036 adapter->stats.latecol += rd32(E1000_LATECOL);
4037 adapter->stats.dc += rd32(E1000_DC);
4038 adapter->stats.rlec += rd32(E1000_RLEC);
4039 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4040 adapter->stats.xontxc += rd32(E1000_XONTXC);
4041 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4042 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4043 adapter->stats.fcruc += rd32(E1000_FCRUC);
4044 adapter->stats.gptc += rd32(E1000_GPTC);
4045 adapter->stats.gotc += rd32(E1000_GOTCL);
4046 rd32(E1000_GOTCH); /* clear GOTCL */
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004047 rnbc = rd32(E1000_RNBC);
4048 adapter->stats.rnbc += rnbc;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004049 net_stats->rx_fifo_errors += rnbc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004050 adapter->stats.ruc += rd32(E1000_RUC);
4051 adapter->stats.rfc += rd32(E1000_RFC);
4052 adapter->stats.rjc += rd32(E1000_RJC);
4053 adapter->stats.tor += rd32(E1000_TORH);
4054 adapter->stats.tot += rd32(E1000_TOTH);
4055 adapter->stats.tpr += rd32(E1000_TPR);
4056
4057 adapter->stats.ptc64 += rd32(E1000_PTC64);
4058 adapter->stats.ptc127 += rd32(E1000_PTC127);
4059 adapter->stats.ptc255 += rd32(E1000_PTC255);
4060 adapter->stats.ptc511 += rd32(E1000_PTC511);
4061 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4062 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4063
4064 adapter->stats.mptc += rd32(E1000_MPTC);
4065 adapter->stats.bptc += rd32(E1000_BPTC);
4066
4067 /* used for adaptive IFS */
Auke Kok9d5c8242008-01-24 02:22:38 -08004068 hw->mac.tx_packet_delta = rd32(E1000_TPT);
4069 adapter->stats.tpt += hw->mac.tx_packet_delta;
4070 hw->mac.collision_delta = rd32(E1000_COLC);
4071 adapter->stats.colc += hw->mac.collision_delta;
4072
4073 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4074 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4075 adapter->stats.tncrs += rd32(E1000_TNCRS);
4076 adapter->stats.tsctc += rd32(E1000_TSCTC);
4077 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4078
4079 adapter->stats.iac += rd32(E1000_IAC);
4080 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4081 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4082 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4083 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4084 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4085 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4086 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4087 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4088
4089 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004090 net_stats->multicast = adapter->stats.mprc;
4091 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004092
4093 /* Rx Errors */
4094
4095 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004096 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004097 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004098 adapter->stats.crcerrs + adapter->stats.algnerrc +
4099 adapter->stats.ruc + adapter->stats.roc +
4100 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004101 net_stats->rx_length_errors = adapter->stats.ruc +
4102 adapter->stats.roc;
4103 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4104 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4105 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004106
4107 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004108 net_stats->tx_errors = adapter->stats.ecol +
4109 adapter->stats.latecol;
4110 net_stats->tx_aborted_errors = adapter->stats.ecol;
4111 net_stats->tx_window_errors = adapter->stats.latecol;
4112 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004113
4114 /* Tx Dropped needs to be maintained elsewhere */
4115
4116 /* Phy Stats */
4117 if (hw->phy.media_type == e1000_media_type_copper) {
4118 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004119 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004120 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4121 adapter->phy_stats.idle_errors += phy_tmp;
4122 }
4123 }
4124
4125 /* Management Stats */
4126 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4127 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4128 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4129}
4130
Auke Kok9d5c8242008-01-24 02:22:38 -08004131static irqreturn_t igb_msix_other(int irq, void *data)
4132{
Alexander Duyck047e0032009-10-27 15:49:27 +00004133 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004134 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004135 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004136 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004137
Alexander Duyck7f081d42010-01-07 17:41:00 +00004138 if (icr & E1000_ICR_DRSTA)
4139 schedule_work(&adapter->reset_task);
4140
Alexander Duyck047e0032009-10-27 15:49:27 +00004141 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004142 /* HW is reporting DMA is out of sync */
4143 adapter->stats.doosync++;
4144 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004145
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004146 /* Check for a mailbox event */
4147 if (icr & E1000_ICR_VMMB)
4148 igb_msg_task(adapter);
4149
4150 if (icr & E1000_ICR_LSC) {
4151 hw->mac.get_link_status = 1;
4152 /* guard against interrupt when we're going down */
4153 if (!test_bit(__IGB_DOWN, &adapter->state))
4154 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4155 }
4156
Alexander Duyck25568a52009-10-27 23:49:59 +00004157 if (adapter->vfs_allocated_count)
4158 wr32(E1000_IMS, E1000_IMS_LSC |
4159 E1000_IMS_VMMB |
4160 E1000_IMS_DOUTSYNC);
4161 else
4162 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004163 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004164
4165 return IRQ_HANDLED;
4166}
4167
Alexander Duyck047e0032009-10-27 15:49:27 +00004168static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004169{
Alexander Duyck26b39272010-02-17 01:00:41 +00004170 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004171 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004172
Alexander Duyck047e0032009-10-27 15:49:27 +00004173 if (!q_vector->set_itr)
4174 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004175
Alexander Duyck047e0032009-10-27 15:49:27 +00004176 if (!itr_val)
4177 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004178
Alexander Duyck26b39272010-02-17 01:00:41 +00004179 if (adapter->hw.mac.type == e1000_82575)
4180 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004181 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004182 itr_val |= 0x8000000;
4183
4184 writel(itr_val, q_vector->itr_register);
4185 q_vector->set_itr = 0;
4186}
4187
4188static irqreturn_t igb_msix_ring(int irq, void *data)
4189{
4190 struct igb_q_vector *q_vector = data;
4191
4192 /* Write the ITR value calculated from the previous interrupt. */
4193 igb_write_itr(q_vector);
4194
4195 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004196
Auke Kok9d5c8242008-01-24 02:22:38 -08004197 return IRQ_HANDLED;
4198}
4199
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004200#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004201static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004202{
Alexander Duyck047e0032009-10-27 15:49:27 +00004203 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004204 struct e1000_hw *hw = &adapter->hw;
4205 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004206
Alexander Duyck047e0032009-10-27 15:49:27 +00004207 if (q_vector->cpu == cpu)
4208 goto out_no_update;
4209
4210 if (q_vector->tx_ring) {
4211 int q = q_vector->tx_ring->reg_idx;
4212 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4213 if (hw->mac.type == e1000_82575) {
4214 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4215 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4216 } else {
4217 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4218 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4219 E1000_DCA_TXCTRL_CPUID_SHIFT;
4220 }
4221 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4222 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4223 }
4224 if (q_vector->rx_ring) {
4225 int q = q_vector->rx_ring->reg_idx;
4226 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4227 if (hw->mac.type == e1000_82575) {
4228 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4229 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4230 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004231 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004232 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004233 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004234 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004235 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4236 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4237 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4238 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004239 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004240 q_vector->cpu = cpu;
4241out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004242 put_cpu();
4243}
4244
4245static void igb_setup_dca(struct igb_adapter *adapter)
4246{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004247 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004248 int i;
4249
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004250 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004251 return;
4252
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004253 /* Always use CB2 mode, difference is masked in the CB driver. */
4254 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4255
Alexander Duyck047e0032009-10-27 15:49:27 +00004256 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004257 adapter->q_vector[i]->cpu = -1;
4258 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004259 }
4260}
4261
4262static int __igb_notify_dca(struct device *dev, void *data)
4263{
4264 struct net_device *netdev = dev_get_drvdata(dev);
4265 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004266 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004267 struct e1000_hw *hw = &adapter->hw;
4268 unsigned long event = *(unsigned long *)data;
4269
4270 switch (event) {
4271 case DCA_PROVIDER_ADD:
4272 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004273 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004274 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004275 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004276 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004277 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004278 igb_setup_dca(adapter);
4279 break;
4280 }
4281 /* Fall Through since DCA is disabled. */
4282 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004283 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004284 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004285 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004286 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004287 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004288 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004289 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004290 }
4291 break;
4292 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004293
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004294 return 0;
4295}
4296
4297static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4298 void *p)
4299{
4300 int ret_val;
4301
4302 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4303 __igb_notify_dca);
4304
4305 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4306}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004307#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004308
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004309static void igb_ping_all_vfs(struct igb_adapter *adapter)
4310{
4311 struct e1000_hw *hw = &adapter->hw;
4312 u32 ping;
4313 int i;
4314
4315 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4316 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004317 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004318 ping |= E1000_VT_MSGTYPE_CTS;
4319 igb_write_mbx(hw, &ping, 1, i);
4320 }
4321}
4322
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004323static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4324{
4325 struct e1000_hw *hw = &adapter->hw;
4326 u32 vmolr = rd32(E1000_VMOLR(vf));
4327 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4328
4329 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4330 IGB_VF_FLAG_MULTI_PROMISC);
4331 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4332
4333 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4334 vmolr |= E1000_VMOLR_MPME;
4335 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4336 } else {
4337 /*
4338 * if we have hashes and we are clearing a multicast promisc
4339 * flag we need to write the hashes to the MTA as this step
4340 * was previously skipped
4341 */
4342 if (vf_data->num_vf_mc_hashes > 30) {
4343 vmolr |= E1000_VMOLR_MPME;
4344 } else if (vf_data->num_vf_mc_hashes) {
4345 int j;
4346 vmolr |= E1000_VMOLR_ROMPE;
4347 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4348 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4349 }
4350 }
4351
4352 wr32(E1000_VMOLR(vf), vmolr);
4353
4354 /* there are flags left unprocessed, likely not supported */
4355 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4356 return -EINVAL;
4357
4358 return 0;
4359
4360}
4361
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004362static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4363 u32 *msgbuf, u32 vf)
4364{
4365 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4366 u16 *hash_list = (u16 *)&msgbuf[1];
4367 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4368 int i;
4369
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004370 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004371 * to this VF for later use to restore when the PF multi cast
4372 * list changes
4373 */
4374 vf_data->num_vf_mc_hashes = n;
4375
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004376 /* only up to 30 hash values supported */
4377 if (n > 30)
4378 n = 30;
4379
4380 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004381 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004382 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004383
4384 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004385 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004386
4387 return 0;
4388}
4389
4390static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4391{
4392 struct e1000_hw *hw = &adapter->hw;
4393 struct vf_data_storage *vf_data;
4394 int i, j;
4395
4396 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004397 u32 vmolr = rd32(E1000_VMOLR(i));
4398 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4399
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004400 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004401
4402 if ((vf_data->num_vf_mc_hashes > 30) ||
4403 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4404 vmolr |= E1000_VMOLR_MPME;
4405 } else if (vf_data->num_vf_mc_hashes) {
4406 vmolr |= E1000_VMOLR_ROMPE;
4407 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4408 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4409 }
4410 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004411 }
4412}
4413
4414static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4415{
4416 struct e1000_hw *hw = &adapter->hw;
4417 u32 pool_mask, reg, vid;
4418 int i;
4419
4420 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4421
4422 /* Find the vlan filter for this id */
4423 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4424 reg = rd32(E1000_VLVF(i));
4425
4426 /* remove the vf from the pool */
4427 reg &= ~pool_mask;
4428
4429 /* if pool is empty then remove entry from vfta */
4430 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4431 (reg & E1000_VLVF_VLANID_ENABLE)) {
4432 reg = 0;
4433 vid = reg & E1000_VLVF_VLANID_MASK;
4434 igb_vfta_set(hw, vid, false);
4435 }
4436
4437 wr32(E1000_VLVF(i), reg);
4438 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004439
4440 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004441}
4442
4443static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4444{
4445 struct e1000_hw *hw = &adapter->hw;
4446 u32 reg, i;
4447
Alexander Duyck51466232009-10-27 23:47:35 +00004448 /* The vlvf table only exists on 82576 hardware and newer */
4449 if (hw->mac.type < e1000_82576)
4450 return -1;
4451
4452 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004453 if (!adapter->vfs_allocated_count)
4454 return -1;
4455
4456 /* Find the vlan filter for this id */
4457 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4458 reg = rd32(E1000_VLVF(i));
4459 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4460 vid == (reg & E1000_VLVF_VLANID_MASK))
4461 break;
4462 }
4463
4464 if (add) {
4465 if (i == E1000_VLVF_ARRAY_SIZE) {
4466 /* Did not find a matching VLAN ID entry that was
4467 * enabled. Search for a free filter entry, i.e.
4468 * one without the enable bit set
4469 */
4470 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4471 reg = rd32(E1000_VLVF(i));
4472 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4473 break;
4474 }
4475 }
4476 if (i < E1000_VLVF_ARRAY_SIZE) {
4477 /* Found an enabled/available entry */
4478 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4479
4480 /* if !enabled we need to set this up in vfta */
4481 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004482 /* add VID to filter table */
4483 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004484 reg |= E1000_VLVF_VLANID_ENABLE;
4485 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004486 reg &= ~E1000_VLVF_VLANID_MASK;
4487 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004488 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004489
4490 /* do not modify RLPML for PF devices */
4491 if (vf >= adapter->vfs_allocated_count)
4492 return 0;
4493
4494 if (!adapter->vf_data[vf].vlans_enabled) {
4495 u32 size;
4496 reg = rd32(E1000_VMOLR(vf));
4497 size = reg & E1000_VMOLR_RLPML_MASK;
4498 size += 4;
4499 reg &= ~E1000_VMOLR_RLPML_MASK;
4500 reg |= size;
4501 wr32(E1000_VMOLR(vf), reg);
4502 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004503
Alexander Duyck51466232009-10-27 23:47:35 +00004504 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004505 return 0;
4506 }
4507 } else {
4508 if (i < E1000_VLVF_ARRAY_SIZE) {
4509 /* remove vf from the pool */
4510 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4511 /* if pool is empty then remove entry from vfta */
4512 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4513 reg = 0;
4514 igb_vfta_set(hw, vid, false);
4515 }
4516 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004517
4518 /* do not modify RLPML for PF devices */
4519 if (vf >= adapter->vfs_allocated_count)
4520 return 0;
4521
4522 adapter->vf_data[vf].vlans_enabled--;
4523 if (!adapter->vf_data[vf].vlans_enabled) {
4524 u32 size;
4525 reg = rd32(E1000_VMOLR(vf));
4526 size = reg & E1000_VMOLR_RLPML_MASK;
4527 size -= 4;
4528 reg &= ~E1000_VMOLR_RLPML_MASK;
4529 reg |= size;
4530 wr32(E1000_VMOLR(vf), reg);
4531 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004532 }
4533 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004534 return 0;
4535}
4536
4537static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4538{
4539 struct e1000_hw *hw = &adapter->hw;
4540
4541 if (vid)
4542 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4543 else
4544 wr32(E1000_VMVIR(vf), 0);
4545}
4546
4547static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4548 int vf, u16 vlan, u8 qos)
4549{
4550 int err = 0;
4551 struct igb_adapter *adapter = netdev_priv(netdev);
4552
4553 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4554 return -EINVAL;
4555 if (vlan || qos) {
4556 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4557 if (err)
4558 goto out;
4559 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4560 igb_set_vmolr(adapter, vf, !vlan);
4561 adapter->vf_data[vf].pf_vlan = vlan;
4562 adapter->vf_data[vf].pf_qos = qos;
4563 dev_info(&adapter->pdev->dev,
4564 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4565 if (test_bit(__IGB_DOWN, &adapter->state)) {
4566 dev_warn(&adapter->pdev->dev,
4567 "The VF VLAN has been set,"
4568 " but the PF device is not up.\n");
4569 dev_warn(&adapter->pdev->dev,
4570 "Bring the PF device up before"
4571 " attempting to use the VF device.\n");
4572 }
4573 } else {
4574 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4575 false, vf);
4576 igb_set_vmvir(adapter, vlan, vf);
4577 igb_set_vmolr(adapter, vf, true);
4578 adapter->vf_data[vf].pf_vlan = 0;
4579 adapter->vf_data[vf].pf_qos = 0;
4580 }
4581out:
4582 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004583}
4584
4585static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4586{
4587 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4588 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4589
4590 return igb_vlvf_set(adapter, vid, add, vf);
4591}
4592
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004593static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004594{
Williams, Mitch A8151d292010-02-10 01:44:24 +00004595 /* clear flags */
4596 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004597 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004598
4599 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004600 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004601
4602 /* reset vlans for device */
4603 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00004604 if (adapter->vf_data[vf].pf_vlan)
4605 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4606 adapter->vf_data[vf].pf_vlan,
4607 adapter->vf_data[vf].pf_qos);
4608 else
4609 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004610
4611 /* reset multicast table array for vf */
4612 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4613
4614 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004615 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004616}
4617
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004618static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4619{
4620 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4621
4622 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004623 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4624 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004625
4626 /* process remaining reset events */
4627 igb_vf_reset(adapter, vf);
4628}
4629
4630static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004631{
4632 struct e1000_hw *hw = &adapter->hw;
4633 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004634 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004635 u32 reg, msgbuf[3];
4636 u8 *addr = (u8 *)(&msgbuf[1]);
4637
4638 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004639 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004640
4641 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00004642 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004643
4644 /* enable transmit and receive for vf */
4645 reg = rd32(E1000_VFTE);
4646 wr32(E1000_VFTE, reg | (1 << vf));
4647 reg = rd32(E1000_VFRE);
4648 wr32(E1000_VFRE, reg | (1 << vf));
4649
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004650 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004651
4652 /* reply to reset with ack and vf mac address */
4653 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4654 memcpy(addr, vf_mac, 6);
4655 igb_write_mbx(hw, msgbuf, 3, vf);
4656}
4657
4658static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4659{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004660 unsigned char *addr = (char *)&msg[1];
4661 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004662
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004663 if (is_valid_ether_addr(addr))
4664 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004665
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004666 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004667}
4668
4669static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4670{
4671 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004672 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004673 u32 msg = E1000_VT_MSGTYPE_NACK;
4674
4675 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004676 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4677 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004678 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004679 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004680 }
4681}
4682
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004683static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004684{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004685 struct pci_dev *pdev = adapter->pdev;
4686 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004687 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004688 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004689 s32 retval;
4690
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004691 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004692
Alexander Duyckfef45f42009-12-11 22:57:34 -08004693 if (retval) {
4694 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004695 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08004696 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4697 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4698 return;
4699 goto out;
4700 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004701
4702 /* this is a message we already processed, do nothing */
4703 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004704 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004705
4706 /*
4707 * until the vf completes a reset it should not be
4708 * allowed to start any configuration.
4709 */
4710
4711 if (msgbuf[0] == E1000_VF_RESET) {
4712 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004713 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004714 }
4715
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004716 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08004717 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4718 return;
4719 retval = -1;
4720 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004721 }
4722
4723 switch ((msgbuf[0] & 0xFFFF)) {
4724 case E1000_VF_SET_MAC_ADDR:
4725 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4726 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004727 case E1000_VF_SET_PROMISC:
4728 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4729 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004730 case E1000_VF_SET_MULTICAST:
4731 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4732 break;
4733 case E1000_VF_SET_LPE:
4734 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4735 break;
4736 case E1000_VF_SET_VLAN:
Williams, Mitch A8151d292010-02-10 01:44:24 +00004737 if (adapter->vf_data[vf].pf_vlan)
4738 retval = -1;
4739 else
4740 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004741 break;
4742 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00004743 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004744 retval = -1;
4745 break;
4746 }
4747
Alexander Duyckfef45f42009-12-11 22:57:34 -08004748 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4749out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004750 /* notify the VF of the results of what it sent us */
4751 if (retval)
4752 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4753 else
4754 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4755
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004756 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004757}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004758
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004759static void igb_msg_task(struct igb_adapter *adapter)
4760{
4761 struct e1000_hw *hw = &adapter->hw;
4762 u32 vf;
4763
4764 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4765 /* process any reset requests */
4766 if (!igb_check_for_rst(hw, vf))
4767 igb_vf_reset_event(adapter, vf);
4768
4769 /* process any messages pending */
4770 if (!igb_check_for_msg(hw, vf))
4771 igb_rcv_msg_from_vf(adapter, vf);
4772
4773 /* process any acks */
4774 if (!igb_check_for_ack(hw, vf))
4775 igb_rcv_ack_from_vf(adapter, vf);
4776 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004777}
4778
Auke Kok9d5c8242008-01-24 02:22:38 -08004779/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00004780 * igb_set_uta - Set unicast filter table address
4781 * @adapter: board private structure
4782 *
4783 * The unicast table address is a register array of 32-bit registers.
4784 * The table is meant to be used in a way similar to how the MTA is used
4785 * however due to certain limitations in the hardware it is necessary to
4786 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4787 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4788 **/
4789static void igb_set_uta(struct igb_adapter *adapter)
4790{
4791 struct e1000_hw *hw = &adapter->hw;
4792 int i;
4793
4794 /* The UTA table only exists on 82576 hardware and newer */
4795 if (hw->mac.type < e1000_82576)
4796 return;
4797
4798 /* we only need to do this if VMDq is enabled */
4799 if (!adapter->vfs_allocated_count)
4800 return;
4801
4802 for (i = 0; i < hw->mac.uta_reg_count; i++)
4803 array_wr32(E1000_UTA, i, ~0);
4804}
4805
4806/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004807 * igb_intr_msi - Interrupt Handler
4808 * @irq: interrupt number
4809 * @data: pointer to a network interface device structure
4810 **/
4811static irqreturn_t igb_intr_msi(int irq, void *data)
4812{
Alexander Duyck047e0032009-10-27 15:49:27 +00004813 struct igb_adapter *adapter = data;
4814 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004815 struct e1000_hw *hw = &adapter->hw;
4816 /* read ICR disables interrupts using IAM */
4817 u32 icr = rd32(E1000_ICR);
4818
Alexander Duyck047e0032009-10-27 15:49:27 +00004819 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004820
Alexander Duyck7f081d42010-01-07 17:41:00 +00004821 if (icr & E1000_ICR_DRSTA)
4822 schedule_work(&adapter->reset_task);
4823
Alexander Duyck047e0032009-10-27 15:49:27 +00004824 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004825 /* HW is reporting DMA is out of sync */
4826 adapter->stats.doosync++;
4827 }
4828
Auke Kok9d5c8242008-01-24 02:22:38 -08004829 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4830 hw->mac.get_link_status = 1;
4831 if (!test_bit(__IGB_DOWN, &adapter->state))
4832 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4833 }
4834
Alexander Duyck047e0032009-10-27 15:49:27 +00004835 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004836
4837 return IRQ_HANDLED;
4838}
4839
4840/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00004841 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08004842 * @irq: interrupt number
4843 * @data: pointer to a network interface device structure
4844 **/
4845static irqreturn_t igb_intr(int irq, void *data)
4846{
Alexander Duyck047e0032009-10-27 15:49:27 +00004847 struct igb_adapter *adapter = data;
4848 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08004849 struct e1000_hw *hw = &adapter->hw;
4850 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4851 * need for the IMC write */
4852 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08004853 if (!icr)
4854 return IRQ_NONE; /* Not our interrupt */
4855
Alexander Duyck047e0032009-10-27 15:49:27 +00004856 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004857
4858 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4859 * not set, then the adapter didn't send an interrupt */
4860 if (!(icr & E1000_ICR_INT_ASSERTED))
4861 return IRQ_NONE;
4862
Alexander Duyck7f081d42010-01-07 17:41:00 +00004863 if (icr & E1000_ICR_DRSTA)
4864 schedule_work(&adapter->reset_task);
4865
Alexander Duyck047e0032009-10-27 15:49:27 +00004866 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004867 /* HW is reporting DMA is out of sync */
4868 adapter->stats.doosync++;
4869 }
4870
Auke Kok9d5c8242008-01-24 02:22:38 -08004871 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4872 hw->mac.get_link_status = 1;
4873 /* guard against interrupt when we're going down */
4874 if (!test_bit(__IGB_DOWN, &adapter->state))
4875 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4876 }
4877
Alexander Duyck047e0032009-10-27 15:49:27 +00004878 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08004879
4880 return IRQ_HANDLED;
4881}
4882
Alexander Duyck047e0032009-10-27 15:49:27 +00004883static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08004884{
Alexander Duyck047e0032009-10-27 15:49:27 +00004885 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08004886 struct e1000_hw *hw = &adapter->hw;
4887
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00004888 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4889 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004890 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08004891 igb_set_itr(adapter);
4892 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004893 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004894 }
4895
4896 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4897 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00004898 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08004899 else
4900 igb_irq_enable(adapter);
4901 }
4902}
4903
Auke Kok9d5c8242008-01-24 02:22:38 -08004904/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004905 * igb_poll - NAPI Rx polling callback
4906 * @napi: napi polling structure
4907 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08004908 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004909static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08004910{
Alexander Duyck047e0032009-10-27 15:49:27 +00004911 struct igb_q_vector *q_vector = container_of(napi,
4912 struct igb_q_vector,
4913 napi);
4914 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004915
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004916#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004917 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4918 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004919#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00004920 if (q_vector->tx_ring)
4921 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08004922
Alexander Duyck047e0032009-10-27 15:49:27 +00004923 if (q_vector->rx_ring)
4924 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4925
4926 if (!tx_clean_complete)
4927 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08004928
Alexander Duyck46544252009-02-19 20:39:04 -08004929 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00004930 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08004931 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00004932 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08004933 }
4934
4935 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08004936}
Al Viro6d8126f2008-03-16 22:23:24 +00004937
Auke Kok9d5c8242008-01-24 02:22:38 -08004938/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004939 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004940 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004941 * @shhwtstamps: timestamp structure to update
4942 * @regval: unsigned 64bit system time value.
4943 *
4944 * We need to convert the system time value stored in the RX/TXSTMP registers
4945 * into a hwtstamp which can be used by the upper level timestamping functions
4946 */
4947static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4948 struct skb_shared_hwtstamps *shhwtstamps,
4949 u64 regval)
4950{
4951 u64 ns;
4952
Alexander Duyck55cac242009-11-19 12:42:21 +00004953 /*
4954 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4955 * 24 to match clock shift we setup earlier.
4956 */
4957 if (adapter->hw.mac.type == e1000_82580)
4958 regval <<= IGB_82580_TSYNC_SHIFT;
4959
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004960 ns = timecounter_cyc2time(&adapter->clock, regval);
4961 timecompare_update(&adapter->compare, ns);
4962 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4963 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4964 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4965}
4966
4967/**
4968 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4969 * @q_vector: pointer to q_vector containing needed info
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004970 * @skb: packet that was just sent
4971 *
4972 * If we were asked to do hardware stamping and such a time stamp is
4973 * available, then it must have been for this skb here because we only
4974 * allow only one such packet into the queue.
4975 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004976static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004977{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004978 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004979 union skb_shared_tx *shtx = skb_tx(skb);
4980 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004981 struct skb_shared_hwtstamps shhwtstamps;
4982 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004983
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004984 /* if skb does not support hw timestamp or TX stamp not valid exit */
4985 if (likely(!shtx->hardware) ||
4986 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4987 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004988
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00004989 regval = rd32(E1000_TXSTMPL);
4990 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4991
4992 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4993 skb_tstamp_tx(skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004994}
4995
4996/**
Auke Kok9d5c8242008-01-24 02:22:38 -08004997 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00004998 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08004999 * returns true if ring is completely cleaned
5000 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005001static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005002{
Alexander Duyck047e0032009-10-27 15:49:27 +00005003 struct igb_adapter *adapter = q_vector->adapter;
5004 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005005 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005006 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005007 struct igb_buffer *buffer_info;
5008 struct sk_buff *skb;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005009 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005010 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005011 unsigned int i, eop, count = 0;
5012 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005013
Auke Kok9d5c8242008-01-24 02:22:38 -08005014 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005015 eop = tx_ring->buffer_info[i].next_to_watch;
5016 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5017
5018 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5019 (count < tx_ring->count)) {
5020 for (cleaned = false; !cleaned; count++) {
5021 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005022 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005023 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005024 skb = buffer_info->skb;
5025
5026 if (skb) {
5027 unsigned int segs, bytecount;
5028 /* gso_segs is currently only valid for tcp */
5029 segs = skb_shinfo(skb)->gso_segs ?: 1;
5030 /* multiply data chunks by size of headers */
5031 bytecount = ((segs - 1) * skb_headlen(skb)) +
5032 skb->len;
5033 total_packets += segs;
5034 total_bytes += bytecount;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005035
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005036 igb_tx_hwtstamp(q_vector, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005037 }
5038
Alexander Duyck80785292009-10-27 15:51:47 +00005039 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005040 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005041
5042 i++;
5043 if (i == tx_ring->count)
5044 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005045 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005046 eop = tx_ring->buffer_info[i].next_to_watch;
5047 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5048 }
5049
Auke Kok9d5c8242008-01-24 02:22:38 -08005050 tx_ring->next_to_clean = i;
5051
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005052 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005053 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005054 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005055 /* Make sure that anybody stopping the queue after this
5056 * sees the new next_to_clean.
5057 */
5058 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005059 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5060 !(test_bit(__IGB_DOWN, &adapter->state))) {
5061 netif_wake_subqueue(netdev, tx_ring->queue_index);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005062 tx_ring->tx_stats.restart_queue++;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005063 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005064 }
5065
5066 if (tx_ring->detect_tx_hung) {
5067 /* Detect a transmit hang in hardware, this serializes the
5068 * check with the clearing of time_stamp and movement of i */
5069 tx_ring->detect_tx_hung = false;
5070 if (tx_ring->buffer_info[i].time_stamp &&
5071 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005072 (adapter->tx_timeout_factor * HZ)) &&
5073 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005074
Auke Kok9d5c8242008-01-24 02:22:38 -08005075 /* detected Tx unit hang */
Alexander Duyck80785292009-10-27 15:51:47 +00005076 dev_err(&tx_ring->pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005077 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005078 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005079 " TDH <%x>\n"
5080 " TDT <%x>\n"
5081 " next_to_use <%x>\n"
5082 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005083 "buffer_info[next_to_clean]\n"
5084 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005085 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005086 " jiffies <%lx>\n"
5087 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005088 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005089 readl(tx_ring->head),
5090 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005091 tx_ring->next_to_use,
5092 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005093 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005094 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005095 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005096 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005097 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005098 }
5099 }
5100 tx_ring->total_bytes += total_bytes;
5101 tx_ring->total_packets += total_packets;
Alexander Duycke21ed352008-07-08 15:07:24 -07005102 tx_ring->tx_stats.bytes += total_bytes;
5103 tx_ring->tx_stats.packets += total_packets;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005104 return (count < tx_ring->count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005105}
5106
Auke Kok9d5c8242008-01-24 02:22:38 -08005107/**
5108 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005109 * @q_vector: structure containing interrupt and ring information
5110 * @skb: packet to send up
5111 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005112 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005113static void igb_receive_skb(struct igb_q_vector *q_vector,
5114 struct sk_buff *skb,
5115 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005116{
Alexander Duyck047e0032009-10-27 15:49:27 +00005117 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005118
Alexander Duyck047e0032009-10-27 15:49:27 +00005119 if (vlan_tag)
5120 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5121 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005122 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005123 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005124}
5125
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005126static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005127 u32 status_err, struct sk_buff *skb)
5128{
5129 skb->ip_summed = CHECKSUM_NONE;
5130
5131 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005132 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5133 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005134 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005135
Auke Kok9d5c8242008-01-24 02:22:38 -08005136 /* TCP/UDP checksum error bit is set */
5137 if (status_err &
5138 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005139 /*
5140 * work around errata with sctp packets where the TCPE aka
5141 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5142 * packets, (aka let the stack check the crc32c)
5143 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005144 if ((skb->len == 60) &&
5145 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005146 ring->rx_stats.csum_err++;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005147
Auke Kok9d5c8242008-01-24 02:22:38 -08005148 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005149 return;
5150 }
5151 /* It must be a TCP or UDP packet with a valid checksum */
5152 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5153 skb->ip_summed = CHECKSUM_UNNECESSARY;
5154
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005155 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005156}
5157
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005158static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5159 struct sk_buff *skb)
5160{
5161 struct igb_adapter *adapter = q_vector->adapter;
5162 struct e1000_hw *hw = &adapter->hw;
5163 u64 regval;
5164
5165 /*
5166 * If this bit is set, then the RX registers contain the time stamp. No
5167 * other packet will be time stamped until we read these registers, so
5168 * read the registers to make them available again. Because only one
5169 * packet can be time stamped at a time, we know that the register
5170 * values must belong to this one here and therefore we don't need to
5171 * compare any of the additional attributes stored for it.
5172 *
5173 * If nothing went wrong, then it should have a skb_shared_tx that we
5174 * can turn into a skb_shared_hwtstamps.
5175 */
5176 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5177 return;
5178 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5179 return;
5180
5181 regval = rd32(E1000_RXSTMPL);
5182 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5183
5184 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5185}
Alexander Duyck4c844852009-10-27 15:52:07 +00005186static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005187 union e1000_adv_rx_desc *rx_desc)
5188{
5189 /* HW will not DMA in data larger than the given buffer, even if it
5190 * parses the (NFS, of course) header to be larger. In that case, it
5191 * fills the header buffer and spills the rest into the page.
5192 */
5193 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5194 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005195 if (hlen > rx_ring->rx_buffer_len)
5196 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005197 return hlen;
5198}
5199
Alexander Duyck047e0032009-10-27 15:49:27 +00005200static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5201 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005202{
Alexander Duyck047e0032009-10-27 15:49:27 +00005203 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005204 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck80785292009-10-27 15:51:47 +00005205 struct pci_dev *pdev = rx_ring->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005206 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5207 struct igb_buffer *buffer_info , *next_buffer;
5208 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005209 bool cleaned = false;
5210 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005211 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005212 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005213 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005214 u32 staterr;
5215 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005216 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005217
5218 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005219 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005220 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5221 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5222
5223 while (staterr & E1000_RXD_STAT_DD) {
5224 if (*work_done >= budget)
5225 break;
5226 (*work_done)++;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005227
5228 skb = buffer_info->skb;
5229 prefetch(skb->data - NET_IP_ALIGN);
5230 buffer_info->skb = NULL;
5231
5232 i++;
5233 if (i == rx_ring->count)
5234 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005235
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005236 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5237 prefetch(next_rxd);
5238 next_buffer = &rx_ring->buffer_info[i];
5239
5240 length = le16_to_cpu(rx_desc->wb.upper.length);
5241 cleaned = true;
5242 cleaned_count++;
5243
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005244 if (buffer_info->dma) {
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005245 pci_unmap_single(pdev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005246 rx_ring->rx_buffer_len,
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005247 PCI_DMA_FROMDEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005248 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005249 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005250 skb_put(skb, length);
5251 goto send_up;
5252 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005253 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005254 }
5255
5256 if (length) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005257 pci_unmap_page(pdev, buffer_info->page_dma,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005258 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005259 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005260
5261 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5262 buffer_info->page,
5263 buffer_info->page_offset,
5264 length);
5265
Alexander Duyckd1eff352009-11-12 18:38:35 +00005266 if ((page_count(buffer_info->page) != 1) ||
5267 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005268 buffer_info->page = NULL;
5269 else
5270 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005271
5272 skb->len += length;
5273 skb->data_len += length;
5274 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005275 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005276
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005277 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005278 buffer_info->skb = next_buffer->skb;
5279 buffer_info->dma = next_buffer->dma;
5280 next_buffer->skb = skb;
5281 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005282 goto next_desc;
5283 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005284send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005285 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5286 dev_kfree_skb_irq(skb);
5287 goto next_desc;
5288 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005289
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005290 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005291 total_bytes += skb->len;
5292 total_packets++;
5293
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005294 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005295
5296 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005297 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005298
Alexander Duyck047e0032009-10-27 15:49:27 +00005299 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5300 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5301
5302 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005303
Auke Kok9d5c8242008-01-24 02:22:38 -08005304next_desc:
5305 rx_desc->wb.upper.status_error = 0;
5306
5307 /* return some buffers to hardware, one at a time is too slow */
5308 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005309 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005310 cleaned_count = 0;
5311 }
5312
5313 /* use prefetched values */
5314 rx_desc = next_rxd;
5315 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005316 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5317 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005318
Auke Kok9d5c8242008-01-24 02:22:38 -08005319 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005320 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005321
5322 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005323 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005324
5325 rx_ring->total_packets += total_packets;
5326 rx_ring->total_bytes += total_bytes;
5327 rx_ring->rx_stats.packets += total_packets;
5328 rx_ring->rx_stats.bytes += total_bytes;
Auke Kok9d5c8242008-01-24 02:22:38 -08005329 return cleaned;
5330}
5331
Auke Kok9d5c8242008-01-24 02:22:38 -08005332/**
5333 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5334 * @adapter: address of board private structure
5335 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005336void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005337{
Alexander Duycke694e962009-10-27 15:53:06 +00005338 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005339 union e1000_adv_rx_desc *rx_desc;
5340 struct igb_buffer *buffer_info;
5341 struct sk_buff *skb;
5342 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005343 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005344
5345 i = rx_ring->next_to_use;
5346 buffer_info = &rx_ring->buffer_info[i];
5347
Alexander Duyck4c844852009-10-27 15:52:07 +00005348 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005349
Auke Kok9d5c8242008-01-24 02:22:38 -08005350 while (cleaned_count--) {
5351 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5352
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005353 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005354 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005355 buffer_info->page = netdev_alloc_page(netdev);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005356 if (!buffer_info->page) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005357 rx_ring->rx_stats.alloc_failed++;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005358 goto no_buffers;
5359 }
5360 buffer_info->page_offset = 0;
5361 } else {
5362 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005363 }
5364 buffer_info->page_dma =
Alexander Duyck80785292009-10-27 15:51:47 +00005365 pci_map_page(rx_ring->pdev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005366 buffer_info->page_offset,
5367 PAGE_SIZE / 2,
Auke Kok9d5c8242008-01-24 02:22:38 -08005368 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005369 if (pci_dma_mapping_error(rx_ring->pdev,
5370 buffer_info->page_dma)) {
5371 buffer_info->page_dma = 0;
5372 rx_ring->rx_stats.alloc_failed++;
5373 goto no_buffers;
5374 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005375 }
5376
Alexander Duyck42d07812009-10-27 23:51:16 +00005377 skb = buffer_info->skb;
5378 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005379 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Auke Kok9d5c8242008-01-24 02:22:38 -08005380 if (!skb) {
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005381 rx_ring->rx_stats.alloc_failed++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005382 goto no_buffers;
5383 }
5384
Auke Kok9d5c8242008-01-24 02:22:38 -08005385 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005386 }
5387 if (!buffer_info->dma) {
Alexander Duyck80785292009-10-27 15:51:47 +00005388 buffer_info->dma = pci_map_single(rx_ring->pdev,
5389 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005390 bufsz,
5391 PCI_DMA_FROMDEVICE);
Alexander Duyck42d07812009-10-27 23:51:16 +00005392 if (pci_dma_mapping_error(rx_ring->pdev,
5393 buffer_info->dma)) {
5394 buffer_info->dma = 0;
5395 rx_ring->rx_stats.alloc_failed++;
5396 goto no_buffers;
5397 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005398 }
5399 /* Refresh the desc even if buffer_addrs didn't change because
5400 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005401 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005402 rx_desc->read.pkt_addr =
5403 cpu_to_le64(buffer_info->page_dma);
5404 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5405 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005406 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005407 rx_desc->read.hdr_addr = 0;
5408 }
5409
5410 i++;
5411 if (i == rx_ring->count)
5412 i = 0;
5413 buffer_info = &rx_ring->buffer_info[i];
5414 }
5415
5416no_buffers:
5417 if (rx_ring->next_to_use != i) {
5418 rx_ring->next_to_use = i;
5419 if (i == 0)
5420 i = (rx_ring->count - 1);
5421 else
5422 i--;
5423
5424 /* Force memory writes to complete before letting h/w
5425 * know there are new descriptors to fetch. (Only
5426 * applicable for weak-ordered memory model archs,
5427 * such as IA-64). */
5428 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005429 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005430 }
5431}
5432
5433/**
5434 * igb_mii_ioctl -
5435 * @netdev:
5436 * @ifreq:
5437 * @cmd:
5438 **/
5439static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5440{
5441 struct igb_adapter *adapter = netdev_priv(netdev);
5442 struct mii_ioctl_data *data = if_mii(ifr);
5443
5444 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5445 return -EOPNOTSUPP;
5446
5447 switch (cmd) {
5448 case SIOCGMIIPHY:
5449 data->phy_id = adapter->hw.phy.addr;
5450 break;
5451 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005452 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5453 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005454 return -EIO;
5455 break;
5456 case SIOCSMIIREG:
5457 default:
5458 return -EOPNOTSUPP;
5459 }
5460 return 0;
5461}
5462
5463/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005464 * igb_hwtstamp_ioctl - control hardware time stamping
5465 * @netdev:
5466 * @ifreq:
5467 * @cmd:
5468 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005469 * Outgoing time stamping can be enabled and disabled. Play nice and
5470 * disable it when requested, although it shouldn't case any overhead
5471 * when no packet needs it. At most one packet in the queue may be
5472 * marked for time stamping, otherwise it would be impossible to tell
5473 * for sure to which packet the hardware time stamp belongs.
5474 *
5475 * Incoming time stamping has to be configured via the hardware
5476 * filters. Not all combinations are supported, in particular event
5477 * type has to be specified. Matching the kind of event packet is
5478 * not supported, with the exception of "all V2 events regardless of
5479 * level 2 or 4".
5480 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005481 **/
5482static int igb_hwtstamp_ioctl(struct net_device *netdev,
5483 struct ifreq *ifr, int cmd)
5484{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005485 struct igb_adapter *adapter = netdev_priv(netdev);
5486 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005487 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005488 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5489 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005490 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005491 bool is_l4 = false;
5492 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005493 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005494
5495 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5496 return -EFAULT;
5497
5498 /* reserved for future extensions */
5499 if (config.flags)
5500 return -EINVAL;
5501
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005502 switch (config.tx_type) {
5503 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005504 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005505 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005506 break;
5507 default:
5508 return -ERANGE;
5509 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005510
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005511 switch (config.rx_filter) {
5512 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005513 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005514 break;
5515 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5516 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5517 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5518 case HWTSTAMP_FILTER_ALL:
5519 /*
5520 * register TSYNCRXCFG must be set, therefore it is not
5521 * possible to time stamp both Sync and Delay_Req messages
5522 * => fall back to time stamping all packets
5523 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005524 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005525 config.rx_filter = HWTSTAMP_FILTER_ALL;
5526 break;
5527 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005528 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005529 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005530 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005531 break;
5532 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005533 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005534 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005535 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005536 break;
5537 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5538 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005539 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005540 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005541 is_l2 = true;
5542 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005543 config.rx_filter = HWTSTAMP_FILTER_SOME;
5544 break;
5545 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5546 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005547 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005548 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005549 is_l2 = true;
5550 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005551 config.rx_filter = HWTSTAMP_FILTER_SOME;
5552 break;
5553 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5554 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5555 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005556 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005557 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005558 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005559 break;
5560 default:
5561 return -ERANGE;
5562 }
5563
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005564 if (hw->mac.type == e1000_82575) {
5565 if (tsync_rx_ctl | tsync_tx_ctl)
5566 return -EINVAL;
5567 return 0;
5568 }
5569
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005570 /* enable/disable TX */
5571 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005572 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5573 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005574 wr32(E1000_TSYNCTXCTL, regval);
5575
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005576 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005577 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005578 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5579 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005580 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005581
5582 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005583 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5584
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005585 /* define ethertype filter for timestamped packets */
5586 if (is_l2)
5587 wr32(E1000_ETQF(3),
5588 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5589 E1000_ETQF_1588 | /* enable timestamping */
5590 ETH_P_1588)); /* 1588 eth protocol type */
5591 else
5592 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005593
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005594#define PTP_PORT 319
5595 /* L4 Queue Filter[3]: filter by destination port and protocol */
5596 if (is_l4) {
5597 u32 ftqf = (IPPROTO_UDP /* UDP */
5598 | E1000_FTQF_VF_BP /* VF not compared */
5599 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5600 | E1000_FTQF_MASK); /* mask all inputs */
5601 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005602
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005603 wr32(E1000_IMIR(3), htons(PTP_PORT));
5604 wr32(E1000_IMIREXT(3),
5605 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5606 if (hw->mac.type == e1000_82576) {
5607 /* enable source port check */
5608 wr32(E1000_SPQF(3), htons(PTP_PORT));
5609 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5610 }
5611 wr32(E1000_FTQF(3), ftqf);
5612 } else {
5613 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5614 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005615 wrfl();
5616
5617 adapter->hwtstamp_config = config;
5618
5619 /* clear TX/RX time stamp registers, just to be sure */
5620 regval = rd32(E1000_TXSTMPH);
5621 regval = rd32(E1000_RXSTMPH);
5622
5623 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5624 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005625}
5626
5627/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005628 * igb_ioctl -
5629 * @netdev:
5630 * @ifreq:
5631 * @cmd:
5632 **/
5633static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5634{
5635 switch (cmd) {
5636 case SIOCGMIIPHY:
5637 case SIOCGMIIREG:
5638 case SIOCSMIIREG:
5639 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005640 case SIOCSHWTSTAMP:
5641 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08005642 default:
5643 return -EOPNOTSUPP;
5644 }
5645}
5646
Alexander Duyck009bc062009-07-23 18:08:35 +00005647s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5648{
5649 struct igb_adapter *adapter = hw->back;
5650 u16 cap_offset;
5651
5652 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5653 if (!cap_offset)
5654 return -E1000_ERR_CONFIG;
5655
5656 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5657
5658 return 0;
5659}
5660
5661s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5662{
5663 struct igb_adapter *adapter = hw->back;
5664 u16 cap_offset;
5665
5666 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5667 if (!cap_offset)
5668 return -E1000_ERR_CONFIG;
5669
5670 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5671
5672 return 0;
5673}
5674
Auke Kok9d5c8242008-01-24 02:22:38 -08005675static void igb_vlan_rx_register(struct net_device *netdev,
5676 struct vlan_group *grp)
5677{
5678 struct igb_adapter *adapter = netdev_priv(netdev);
5679 struct e1000_hw *hw = &adapter->hw;
5680 u32 ctrl, rctl;
5681
5682 igb_irq_disable(adapter);
5683 adapter->vlgrp = grp;
5684
5685 if (grp) {
5686 /* enable VLAN tag insert/strip */
5687 ctrl = rd32(E1000_CTRL);
5688 ctrl |= E1000_CTRL_VME;
5689 wr32(E1000_CTRL, ctrl);
5690
Alexander Duyck51466232009-10-27 23:47:35 +00005691 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08005692 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08005693 rctl &= ~E1000_RCTL_CFIEN;
5694 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005695 } else {
5696 /* disable VLAN tag insert/strip */
5697 ctrl = rd32(E1000_CTRL);
5698 ctrl &= ~E1000_CTRL_VME;
5699 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08005700 }
5701
Alexander Duycke1739522009-02-19 20:39:44 -08005702 igb_rlpml_set(adapter);
5703
Auke Kok9d5c8242008-01-24 02:22:38 -08005704 if (!test_bit(__IGB_DOWN, &adapter->state))
5705 igb_irq_enable(adapter);
5706}
5707
5708static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5709{
5710 struct igb_adapter *adapter = netdev_priv(netdev);
5711 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005712 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005713
Alexander Duyck51466232009-10-27 23:47:35 +00005714 /* attempt to add filter to vlvf array */
5715 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005716
Alexander Duyck51466232009-10-27 23:47:35 +00005717 /* add the filter since PF can receive vlans w/o entry in vlvf */
5718 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08005719}
5720
5721static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5722{
5723 struct igb_adapter *adapter = netdev_priv(netdev);
5724 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005725 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00005726 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005727
5728 igb_irq_disable(adapter);
5729 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5730
5731 if (!test_bit(__IGB_DOWN, &adapter->state))
5732 igb_irq_enable(adapter);
5733
Alexander Duyck51466232009-10-27 23:47:35 +00005734 /* remove vlan from VLVF table array */
5735 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08005736
Alexander Duyck51466232009-10-27 23:47:35 +00005737 /* if vid was not present in VLVF just remove it from table */
5738 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005739 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08005740}
5741
5742static void igb_restore_vlan(struct igb_adapter *adapter)
5743{
5744 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5745
5746 if (adapter->vlgrp) {
5747 u16 vid;
5748 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5749 if (!vlan_group_get_device(adapter->vlgrp, vid))
5750 continue;
5751 igb_vlan_rx_add_vid(adapter->netdev, vid);
5752 }
5753 }
5754}
5755
5756int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5757{
Alexander Duyck090b1792009-10-27 23:51:55 +00005758 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005759 struct e1000_mac_info *mac = &adapter->hw.mac;
5760
5761 mac->autoneg = 0;
5762
Auke Kok9d5c8242008-01-24 02:22:38 -08005763 switch (spddplx) {
5764 case SPEED_10 + DUPLEX_HALF:
5765 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5766 break;
5767 case SPEED_10 + DUPLEX_FULL:
5768 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5769 break;
5770 case SPEED_100 + DUPLEX_HALF:
5771 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5772 break;
5773 case SPEED_100 + DUPLEX_FULL:
5774 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5775 break;
5776 case SPEED_1000 + DUPLEX_FULL:
5777 mac->autoneg = 1;
5778 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5779 break;
5780 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5781 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005782 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005783 return -EINVAL;
5784 }
5785 return 0;
5786}
5787
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005788static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08005789{
5790 struct net_device *netdev = pci_get_drvdata(pdev);
5791 struct igb_adapter *adapter = netdev_priv(netdev);
5792 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07005793 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08005794 u32 wufc = adapter->wol;
5795#ifdef CONFIG_PM
5796 int retval = 0;
5797#endif
5798
5799 netif_device_detach(netdev);
5800
Alexander Duycka88f10e2008-07-08 15:13:38 -07005801 if (netif_running(netdev))
5802 igb_close(netdev);
5803
Alexander Duyck047e0032009-10-27 15:49:27 +00005804 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005805
5806#ifdef CONFIG_PM
5807 retval = pci_save_state(pdev);
5808 if (retval)
5809 return retval;
5810#endif
5811
5812 status = rd32(E1000_STATUS);
5813 if (status & E1000_STATUS_LU)
5814 wufc &= ~E1000_WUFC_LNKC;
5815
5816 if (wufc) {
5817 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005818 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005819
5820 /* turn on all-multi mode if wake on multicast is enabled */
5821 if (wufc & E1000_WUFC_MC) {
5822 rctl = rd32(E1000_RCTL);
5823 rctl |= E1000_RCTL_MPE;
5824 wr32(E1000_RCTL, rctl);
5825 }
5826
5827 ctrl = rd32(E1000_CTRL);
5828 /* advertise wake from D3Cold */
5829 #define E1000_CTRL_ADVD3WUC 0x00100000
5830 /* phy power management enable */
5831 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5832 ctrl |= E1000_CTRL_ADVD3WUC;
5833 wr32(E1000_CTRL, ctrl);
5834
Auke Kok9d5c8242008-01-24 02:22:38 -08005835 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00005836 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08005837
5838 wr32(E1000_WUC, E1000_WUC_PME_EN);
5839 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08005840 } else {
5841 wr32(E1000_WUC, 0);
5842 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005843 }
5844
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005845 *enable_wake = wufc || adapter->en_mng_pt;
5846 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00005847 igb_power_down_link(adapter);
5848 else
5849 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005850
5851 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5852 * would have already happened in close and is redundant. */
5853 igb_release_hw_control(adapter);
5854
5855 pci_disable_device(pdev);
5856
Auke Kok9d5c8242008-01-24 02:22:38 -08005857 return 0;
5858}
5859
5860#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005861static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5862{
5863 int retval;
5864 bool wake;
5865
5866 retval = __igb_shutdown(pdev, &wake);
5867 if (retval)
5868 return retval;
5869
5870 if (wake) {
5871 pci_prepare_to_sleep(pdev);
5872 } else {
5873 pci_wake_from_d3(pdev, false);
5874 pci_set_power_state(pdev, PCI_D3hot);
5875 }
5876
5877 return 0;
5878}
5879
Auke Kok9d5c8242008-01-24 02:22:38 -08005880static int igb_resume(struct pci_dev *pdev)
5881{
5882 struct net_device *netdev = pci_get_drvdata(pdev);
5883 struct igb_adapter *adapter = netdev_priv(netdev);
5884 struct e1000_hw *hw = &adapter->hw;
5885 u32 err;
5886
5887 pci_set_power_state(pdev, PCI_D0);
5888 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00005889 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09005890
Alexander Duyckaed5dec2009-02-06 23:16:04 +00005891 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005892 if (err) {
5893 dev_err(&pdev->dev,
5894 "igb: Cannot enable PCI device from suspend\n");
5895 return err;
5896 }
5897 pci_set_master(pdev);
5898
5899 pci_enable_wake(pdev, PCI_D3hot, 0);
5900 pci_enable_wake(pdev, PCI_D3cold, 0);
5901
Alexander Duyck047e0032009-10-27 15:49:27 +00005902 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07005903 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5904 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005905 }
5906
Auke Kok9d5c8242008-01-24 02:22:38 -08005907 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00005908
5909 /* let the f/w know that the h/w is now under the control of the
5910 * driver. */
5911 igb_get_hw_control(adapter);
5912
Auke Kok9d5c8242008-01-24 02:22:38 -08005913 wr32(E1000_WUS, ~0);
5914
Alexander Duycka88f10e2008-07-08 15:13:38 -07005915 if (netif_running(netdev)) {
5916 err = igb_open(netdev);
5917 if (err)
5918 return err;
5919 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005920
5921 netif_device_attach(netdev);
5922
Auke Kok9d5c8242008-01-24 02:22:38 -08005923 return 0;
5924}
5925#endif
5926
5927static void igb_shutdown(struct pci_dev *pdev)
5928{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00005929 bool wake;
5930
5931 __igb_shutdown(pdev, &wake);
5932
5933 if (system_state == SYSTEM_POWER_OFF) {
5934 pci_wake_from_d3(pdev, wake);
5935 pci_set_power_state(pdev, PCI_D3hot);
5936 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005937}
5938
5939#ifdef CONFIG_NET_POLL_CONTROLLER
5940/*
5941 * Polling 'interrupt' - used by things like netconsole to send skbs
5942 * without having to re-enable interrupts. It's not called while
5943 * the interrupt routine is executing.
5944 */
5945static void igb_netpoll(struct net_device *netdev)
5946{
5947 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005948 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005949 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08005950
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005951 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005952 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005953 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00005954 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005955 return;
5956 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005957
Alexander Duyck047e0032009-10-27 15:49:27 +00005958 for (i = 0; i < adapter->num_q_vectors; i++) {
5959 struct igb_q_vector *q_vector = adapter->q_vector[i];
5960 wr32(E1000_EIMC, q_vector->eims_value);
5961 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005962 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005963}
5964#endif /* CONFIG_NET_POLL_CONTROLLER */
5965
5966/**
5967 * igb_io_error_detected - called when PCI error is detected
5968 * @pdev: Pointer to PCI device
5969 * @state: The current pci connection state
5970 *
5971 * This function is called after a PCI bus error affecting
5972 * this device has been detected.
5973 */
5974static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5975 pci_channel_state_t state)
5976{
5977 struct net_device *netdev = pci_get_drvdata(pdev);
5978 struct igb_adapter *adapter = netdev_priv(netdev);
5979
5980 netif_device_detach(netdev);
5981
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00005982 if (state == pci_channel_io_perm_failure)
5983 return PCI_ERS_RESULT_DISCONNECT;
5984
Auke Kok9d5c8242008-01-24 02:22:38 -08005985 if (netif_running(netdev))
5986 igb_down(adapter);
5987 pci_disable_device(pdev);
5988
5989 /* Request a slot slot reset. */
5990 return PCI_ERS_RESULT_NEED_RESET;
5991}
5992
5993/**
5994 * igb_io_slot_reset - called after the pci bus has been reset.
5995 * @pdev: Pointer to PCI device
5996 *
5997 * Restart the card from scratch, as if from a cold-boot. Implementation
5998 * resembles the first-half of the igb_resume routine.
5999 */
6000static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6001{
6002 struct net_device *netdev = pci_get_drvdata(pdev);
6003 struct igb_adapter *adapter = netdev_priv(netdev);
6004 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006005 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006006 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006007
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006008 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006009 dev_err(&pdev->dev,
6010 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006011 result = PCI_ERS_RESULT_DISCONNECT;
6012 } else {
6013 pci_set_master(pdev);
6014 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006015 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006016
6017 pci_enable_wake(pdev, PCI_D3hot, 0);
6018 pci_enable_wake(pdev, PCI_D3cold, 0);
6019
6020 igb_reset(adapter);
6021 wr32(E1000_WUS, ~0);
6022 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006023 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006024
Jeff Kirsherea943d42008-12-11 20:34:19 -08006025 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6026 if (err) {
6027 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6028 "failed 0x%0x\n", err);
6029 /* non-fatal, continue */
6030 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006031
Alexander Duyck40a914f2008-11-27 00:24:37 -08006032 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006033}
6034
6035/**
6036 * igb_io_resume - called when traffic can start flowing again.
6037 * @pdev: Pointer to PCI device
6038 *
6039 * This callback is called when the error recovery driver tells us that
6040 * its OK to resume normal operation. Implementation resembles the
6041 * second-half of the igb_resume routine.
6042 */
6043static void igb_io_resume(struct pci_dev *pdev)
6044{
6045 struct net_device *netdev = pci_get_drvdata(pdev);
6046 struct igb_adapter *adapter = netdev_priv(netdev);
6047
Auke Kok9d5c8242008-01-24 02:22:38 -08006048 if (netif_running(netdev)) {
6049 if (igb_up(adapter)) {
6050 dev_err(&pdev->dev, "igb_up failed after reset\n");
6051 return;
6052 }
6053 }
6054
6055 netif_device_attach(netdev);
6056
6057 /* let the f/w know that the h/w is now under the control of the
6058 * driver. */
6059 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006060}
6061
Alexander Duyck26ad9172009-10-05 06:32:49 +00006062static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6063 u8 qsel)
6064{
6065 u32 rar_low, rar_high;
6066 struct e1000_hw *hw = &adapter->hw;
6067
6068 /* HW expects these in little endian so we reverse the byte order
6069 * from network order (big endian) to little endian
6070 */
6071 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6072 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6073 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6074
6075 /* Indicate to hardware the Address is Valid. */
6076 rar_high |= E1000_RAH_AV;
6077
6078 if (hw->mac.type == e1000_82575)
6079 rar_high |= E1000_RAH_POOL_1 * qsel;
6080 else
6081 rar_high |= E1000_RAH_POOL_1 << qsel;
6082
6083 wr32(E1000_RAL(index), rar_low);
6084 wrfl();
6085 wr32(E1000_RAH(index), rar_high);
6086 wrfl();
6087}
6088
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006089static int igb_set_vf_mac(struct igb_adapter *adapter,
6090 int vf, unsigned char *mac_addr)
6091{
6092 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006093 /* VF MAC addresses start at end of receive addresses and moves
6094 * torwards the first, as a result a collision should not be possible */
6095 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006096
Alexander Duyck37680112009-02-19 20:40:30 -08006097 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006098
Alexander Duyck26ad9172009-10-05 06:32:49 +00006099 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006100
6101 return 0;
6102}
6103
Williams, Mitch A8151d292010-02-10 01:44:24 +00006104static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6105{
6106 struct igb_adapter *adapter = netdev_priv(netdev);
6107 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6108 return -EINVAL;
6109 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6110 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6111 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6112 " change effective.");
6113 if (test_bit(__IGB_DOWN, &adapter->state)) {
6114 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6115 " but the PF device is not up.\n");
6116 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6117 " attempting to use the VF device.\n");
6118 }
6119 return igb_set_vf_mac(adapter, vf, mac);
6120}
6121
6122static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6123{
6124 return -EOPNOTSUPP;
6125}
6126
6127static int igb_ndo_get_vf_config(struct net_device *netdev,
6128 int vf, struct ifla_vf_info *ivi)
6129{
6130 struct igb_adapter *adapter = netdev_priv(netdev);
6131 if (vf >= adapter->vfs_allocated_count)
6132 return -EINVAL;
6133 ivi->vf = vf;
6134 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6135 ivi->tx_rate = 0;
6136 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6137 ivi->qos = adapter->vf_data[vf].pf_qos;
6138 return 0;
6139}
6140
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006141static void igb_vmm_control(struct igb_adapter *adapter)
6142{
6143 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006144 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006145
Alexander Duyckd4960302009-10-27 15:53:45 +00006146 /* replication is not supported for 82575 */
6147 if (hw->mac.type == e1000_82575)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006148 return;
6149
Alexander Duyck10d8e902009-10-27 15:54:04 +00006150 /* enable replication vlan tag stripping */
6151 reg = rd32(E1000_RPLOLR);
6152 reg |= E1000_RPLOLR_STRVLAN;
6153 wr32(E1000_RPLOLR, reg);
6154
6155 /* notify HW that the MAC is adding vlan tags */
6156 reg = rd32(E1000_DTXCTL);
6157 reg |= E1000_DTXCTL_VLAN_ADDED;
6158 wr32(E1000_DTXCTL, reg);
6159
Alexander Duyckd4960302009-10-27 15:53:45 +00006160 if (adapter->vfs_allocated_count) {
6161 igb_vmdq_set_loopback_pf(hw, true);
6162 igb_vmdq_set_replication_pf(hw, true);
6163 } else {
6164 igb_vmdq_set_loopback_pf(hw, false);
6165 igb_vmdq_set_replication_pf(hw, false);
6166 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006167}
6168
Auke Kok9d5c8242008-01-24 02:22:38 -08006169/* igb_main.c */