blob: c9997d83de07a73b96611bea559d5d3fc9a37c3f [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny6e861322012-01-18 22:13:27 +00004 Copyright(c) 2007-2012 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny200e5fd2012-05-31 23:39:30 +000062#define MAJ 4
63#define MIN 0
64#define BUILD 1
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny6e861322012-01-18 22:13:27 +000071static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000119static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000131static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000140static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000144static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700145#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000146static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700147static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700149static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000150static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800158static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800162static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000171static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000172
173#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
Stefan Assmannf5571472012-08-18 04:06:11 +0000175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000176#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800177
Auke Kok9d5c8242008-01-24 02:22:38 -0800178#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000179#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000180static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000181#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000182static int igb_resume(struct device *);
183#ifdef CONFIG_PM_RUNTIME
184static int igb_runtime_suspend(struct device *dev);
185static int igb_runtime_resume(struct device *dev);
186static int igb_runtime_idle(struct device *dev);
187#endif
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
Auke Kok9d5c8242008-01-24 02:22:38 -0800193#endif
194static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700195#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800203#ifdef CONFIG_NET_POLL_CONTROLLER
204/* for netdump / net console */
205static void igb_netpoll(struct net_device *);
206#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800207#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000208static unsigned int max_vfs = 0;
209module_param(max_vfs, uint, 0);
210MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
211 "per physical function");
212#endif /* CONFIG_PCI_IOV */
213
Auke Kok9d5c8242008-01-24 02:22:38 -0800214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = __devexit_p(igb_remove),
232#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000233 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800234#endif
235 .shutdown = igb_shutdown,
236 .err_handler = &igb_err_handler
237};
238
239MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
240MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
241MODULE_LICENSE("GPL");
242MODULE_VERSION(DRV_VERSION);
243
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000244#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
245static int debug = -1;
246module_param(debug, int, 0);
247MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
248
Taku Izumic97ec422010-04-27 14:39:30 +0000249struct igb_reg_info {
250 u32 ofs;
251 char *name;
252};
253
254static const struct igb_reg_info igb_reg_info_tbl[] = {
255
256 /* General Registers */
257 {E1000_CTRL, "CTRL"},
258 {E1000_STATUS, "STATUS"},
259 {E1000_CTRL_EXT, "CTRL_EXT"},
260
261 /* Interrupt Registers */
262 {E1000_ICR, "ICR"},
263
264 /* RX Registers */
265 {E1000_RCTL, "RCTL"},
266 {E1000_RDLEN(0), "RDLEN"},
267 {E1000_RDH(0), "RDH"},
268 {E1000_RDT(0), "RDT"},
269 {E1000_RXDCTL(0), "RXDCTL"},
270 {E1000_RDBAL(0), "RDBAL"},
271 {E1000_RDBAH(0), "RDBAH"},
272
273 /* TX Registers */
274 {E1000_TCTL, "TCTL"},
275 {E1000_TDBAL(0), "TDBAL"},
276 {E1000_TDBAH(0), "TDBAH"},
277 {E1000_TDLEN(0), "TDLEN"},
278 {E1000_TDH(0), "TDH"},
279 {E1000_TDT(0), "TDT"},
280 {E1000_TXDCTL(0), "TXDCTL"},
281 {E1000_TDFH, "TDFH"},
282 {E1000_TDFT, "TDFT"},
283 {E1000_TDFHS, "TDFHS"},
284 {E1000_TDFPC, "TDFPC"},
285
286 /* List Terminator */
287 {}
288};
289
290/*
291 * igb_regdump - register printout routine
292 */
293static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
294{
295 int n = 0;
296 char rname[16];
297 u32 regs[8];
298
299 switch (reginfo->ofs) {
300 case E1000_RDLEN(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDLEN(n));
303 break;
304 case E1000_RDH(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RDH(n));
307 break;
308 case E1000_RDT(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDT(n));
311 break;
312 case E1000_RXDCTL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RXDCTL(n));
315 break;
316 case E1000_RDBAL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAL(n));
319 break;
320 case E1000_RDBAH(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RDBAH(n));
323 break;
324 case E1000_TDBAL(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_RDBAL(n));
327 break;
328 case E1000_TDBAH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDBAH(n));
331 break;
332 case E1000_TDLEN(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDLEN(n));
335 break;
336 case E1000_TDH(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDH(n));
339 break;
340 case E1000_TDT(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TDT(n));
343 break;
344 case E1000_TXDCTL(0):
345 for (n = 0; n < 4; n++)
346 regs[n] = rd32(E1000_TXDCTL(n));
347 break;
348 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000349 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000350 return;
351 }
352
353 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000354 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
355 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000356}
357
358/*
359 * igb_dump - Print registers, tx-rings and rx-rings
360 */
361static void igb_dump(struct igb_adapter *adapter)
362{
363 struct net_device *netdev = adapter->netdev;
364 struct e1000_hw *hw = &adapter->hw;
365 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000366 struct igb_ring *tx_ring;
367 union e1000_adv_tx_desc *tx_desc;
368 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000369 struct igb_ring *rx_ring;
370 union e1000_adv_rx_desc *rx_desc;
371 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000372 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000373
374 if (!netif_msg_hw(adapter))
375 return;
376
377 /* Print netdevice Info */
378 if (netdev) {
379 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000380 pr_info("Device Name state trans_start "
381 "last_rx\n");
382 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
383 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000384 }
385
386 /* Print Registers */
387 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000388 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000389 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
390 reginfo->name; reginfo++) {
391 igb_regdump(hw, reginfo);
392 }
393
394 /* Print TX Ring Summary */
395 if (!netdev || !netif_running(netdev))
396 goto exit;
397
398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000399 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000400 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000401 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000402 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
406 (u64)buffer_info->dma,
407 buffer_info->length,
408 buffer_info->next_to_watch,
409 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000410 }
411
412 /* Print TX Rings */
413 if (!netif_msg_tx_done(adapter))
414 goto rx_ring_summary;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
417
418 /* Transmit Descriptor Formats
419 *
420 * Advanced Transmit Descriptor
421 * +--------------------------------------------------------------+
422 * 0 | Buffer Address [63:0] |
423 * +--------------------------------------------------------------+
424 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
425 * +--------------------------------------------------------------+
426 * 63 46 45 40 39 38 36 35 32 31 24 15 0
427 */
428
429 for (n = 0; n < adapter->num_tx_queues; n++) {
430 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000431 pr_info("------------------------------------\n");
432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
433 pr_info("------------------------------------\n");
434 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
435 "[bi->dma ] leng ntw timestamp "
436 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000437
438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000439 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000440 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000441 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000442 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000443 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000444 if (i == tx_ring->next_to_use &&
445 i == tx_ring->next_to_clean)
446 next_desc = " NTC/U";
447 else if (i == tx_ring->next_to_use)
448 next_desc = " NTU";
449 else if (i == tx_ring->next_to_clean)
450 next_desc = " NTC";
451 else
452 next_desc = "";
453
454 pr_info("T [0x%03X] %016llX %016llX %016llX"
455 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000456 le64_to_cpu(u0->a),
457 le64_to_cpu(u0->b),
458 (u64)buffer_info->dma,
459 buffer_info->length,
460 buffer_info->next_to_watch,
461 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000462 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000463
Emil Tantilovb6695882012-07-28 05:07:48 +0000464 if (netif_msg_pktdata(adapter) && buffer_info->skb)
Taku Izumic97ec422010-04-27 14:39:30 +0000465 print_hex_dump(KERN_INFO, "",
466 DUMP_PREFIX_ADDRESS,
Emil Tantilovb6695882012-07-28 05:07:48 +0000467 16, 1, buffer_info->skb->data,
Taku Izumic97ec422010-04-27 14:39:30 +0000468 buffer_info->length, true);
469 }
470 }
471
472 /* Print RX Rings Summary */
473rx_ring_summary:
474 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000475 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000476 for (n = 0; n < adapter->num_rx_queues; n++) {
477 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000478 pr_info(" %5d %5X %5X\n",
479 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000480 }
481
482 /* Print RX Rings */
483 if (!netif_msg_rx_status(adapter))
484 goto exit;
485
486 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
487
488 /* Advanced Receive Descriptor (Read) Format
489 * 63 1 0
490 * +-----------------------------------------------------+
491 * 0 | Packet Buffer Address [63:1] |A0/NSE|
492 * +----------------------------------------------+------+
493 * 8 | Header Buffer Address [63:1] | DD |
494 * +-----------------------------------------------------+
495 *
496 *
497 * Advanced Receive Descriptor (Write-Back) Format
498 *
499 * 63 48 47 32 31 30 21 20 17 16 4 3 0
500 * +------------------------------------------------------+
501 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
502 * | Checksum Ident | | | | Type | Type |
503 * +------------------------------------------------------+
504 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
505 * +------------------------------------------------------+
506 * 63 48 47 32 31 20 19 0
507 */
508
509 for (n = 0; n < adapter->num_rx_queues; n++) {
510 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000511 pr_info("------------------------------------\n");
512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
513 pr_info("------------------------------------\n");
514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
515 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
516 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
517 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000518
519 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000520 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000521 struct igb_rx_buffer *buffer_info;
522 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000523 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000524 u0 = (struct my_u0 *)rx_desc;
525 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000526
527 if (i == rx_ring->next_to_use)
528 next_desc = " NTU";
529 else if (i == rx_ring->next_to_clean)
530 next_desc = " NTC";
531 else
532 next_desc = "";
533
Taku Izumic97ec422010-04-27 14:39:30 +0000534 if (staterr & E1000_RXD_STAT_DD) {
535 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000536 pr_info("%s[0x%03X] %016llX %016llX -------"
537 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000538 le64_to_cpu(u0->a),
539 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000540 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000541 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000542 pr_info("%s[0x%03X] %016llX %016llX %016llX"
543 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000544 le64_to_cpu(u0->a),
545 le64_to_cpu(u0->b),
546 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000547 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000548
Emil Tantilovb6695882012-07-28 05:07:48 +0000549 if (netif_msg_pktdata(adapter) &&
550 buffer_info->dma && buffer_info->skb) {
Taku Izumic97ec422010-04-27 14:39:30 +0000551 print_hex_dump(KERN_INFO, "",
Emil Tantilovb6695882012-07-28 05:07:48 +0000552 DUMP_PREFIX_ADDRESS,
553 16, 1, buffer_info->skb->data,
554 IGB_RX_HDR_LEN, true);
Alexander Duyck44390ca2011-08-26 07:43:38 +0000555 print_hex_dump(KERN_INFO, "",
556 DUMP_PREFIX_ADDRESS,
557 16, 1,
Emil Tantilovb6695882012-07-28 05:07:48 +0000558 page_address(buffer_info->page) +
559 buffer_info->page_offset,
Alexander Duyck44390ca2011-08-26 07:43:38 +0000560 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000561 }
562 }
Taku Izumic97ec422010-04-27 14:39:30 +0000563 }
564 }
565
566exit:
567 return;
568}
569
Auke Kok9d5c8242008-01-24 02:22:38 -0800570/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000571 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800572 * used by hardware layer to print debugging information
573 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000574struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800575{
576 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000577 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800578}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000579
580/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800581 * igb_init_module - Driver Registration Routine
582 *
583 * igb_init_module is the first routine called when the driver is
584 * loaded. All it does is register with the PCI subsystem.
585 **/
586static int __init igb_init_module(void)
587{
588 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000589 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800590 igb_driver_string, igb_driver_version);
591
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000592 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800593
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700594#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700595 dca_register_notify(&dca_notifier);
596#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800597 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800598 return ret;
599}
600
601module_init(igb_init_module);
602
603/**
604 * igb_exit_module - Driver Exit Cleanup Routine
605 *
606 * igb_exit_module is called just before the driver is removed
607 * from memory.
608 **/
609static void __exit igb_exit_module(void)
610{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700611#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700612 dca_unregister_notify(&dca_notifier);
613#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800614 pci_unregister_driver(&igb_driver);
615}
616
617module_exit(igb_exit_module);
618
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800619#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
620/**
621 * igb_cache_ring_register - Descriptor ring to register mapping
622 * @adapter: board private structure to initialize
623 *
624 * Once we know the feature-set enabled for the device, we'll cache
625 * the register offset the descriptor ring is assigned to.
626 **/
627static void igb_cache_ring_register(struct igb_adapter *adapter)
628{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000629 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000630 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800631
632 switch (adapter->hw.mac.type) {
633 case e1000_82576:
634 /* The queues are allocated for virtualization such that VF 0
635 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
636 * In order to avoid collision we start at the first free queue
637 * and continue consuming queues in the same sequence
638 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000639 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000640 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000641 adapter->rx_ring[i]->reg_idx = rbase_offset +
642 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000643 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800644 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000645 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000646 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000647 case e1000_i210:
648 case e1000_i211:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800649 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000650 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000651 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800654 break;
655 }
656}
657
Alexander Duyck047e0032009-10-27 15:49:27 +0000658static void igb_free_queues(struct igb_adapter *adapter)
659{
Alexander Duyck3025a442010-02-17 01:02:39 +0000660 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000661
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 for (i = 0; i < adapter->num_tx_queues; i++) {
663 kfree(adapter->tx_ring[i]);
664 adapter->tx_ring[i] = NULL;
665 }
666 for (i = 0; i < adapter->num_rx_queues; i++) {
667 kfree(adapter->rx_ring[i]);
668 adapter->rx_ring[i] = NULL;
669 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000670 adapter->num_rx_queues = 0;
671 adapter->num_tx_queues = 0;
672}
673
Auke Kok9d5c8242008-01-24 02:22:38 -0800674/**
675 * igb_alloc_queues - Allocate memory for all rings
676 * @adapter: board private structure to initialize
677 *
678 * We allocate one ring per queue at run-time since we don't know the
679 * number of queues at compile-time.
680 **/
681static int igb_alloc_queues(struct igb_adapter *adapter)
682{
Alexander Duyck3025a442010-02-17 01:02:39 +0000683 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800684 int i;
685
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700686 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckf33005a2012-09-13 06:27:55 +0000687 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000688 if (!ring)
689 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800690 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700691 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000692 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000693 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000694 /* For 82575, context index must be unique per ring. */
695 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000696 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000697 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700698 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000699
Auke Kok9d5c8242008-01-24 02:22:38 -0800700 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckf33005a2012-09-13 06:27:55 +0000701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000702 if (!ring)
703 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800704 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700705 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000706 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000707 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000708 /* set flag indicating ring supports SCTP checksum offload */
709 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000710 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000711
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000712 /*
713 * On i350, i210, and i211, loopback VLAN packets
714 * have the tag byte-swapped.
715 * */
716 if (adapter->hw.mac.type >= e1000_i350)
Alexander Duyck8be10e92011-08-26 07:47:11 +0000717 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
718
Alexander Duyck3025a442010-02-17 01:02:39 +0000719 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800720 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800721
722 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000723
Auke Kok9d5c8242008-01-24 02:22:38 -0800724 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800725
Alexander Duyck047e0032009-10-27 15:49:27 +0000726err:
727 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700728
Alexander Duyck047e0032009-10-27 15:49:27 +0000729 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700730}
731
Alexander Duyck4be000c2011-08-26 07:45:52 +0000732/**
733 * igb_write_ivar - configure ivar for given MSI-X vector
734 * @hw: pointer to the HW structure
735 * @msix_vector: vector number we are allocating to a given ring
736 * @index: row index of IVAR register to write within IVAR table
737 * @offset: column offset of in IVAR, should be multiple of 8
738 *
739 * This function is intended to handle the writing of the IVAR register
740 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
741 * each containing an cause allocation for an Rx and Tx ring, and a
742 * variable number of rows depending on the number of queues supported.
743 **/
744static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
745 int index, int offset)
746{
747 u32 ivar = array_rd32(E1000_IVAR0, index);
748
749 /* clear any bits that are currently set */
750 ivar &= ~((u32)0xFF << offset);
751
752 /* write vector and valid bit */
753 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
754
755 array_wr32(E1000_IVAR0, index, ivar);
756}
757
Auke Kok9d5c8242008-01-24 02:22:38 -0800758#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000759static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800760{
Alexander Duyck047e0032009-10-27 15:49:27 +0000761 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800762 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 int rx_queue = IGB_N0_QUEUE;
764 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000765 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000766
Alexander Duyck0ba82992011-08-26 07:45:47 +0000767 if (q_vector->rx.ring)
768 rx_queue = q_vector->rx.ring->reg_idx;
769 if (q_vector->tx.ring)
770 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700771
772 switch (hw->mac.type) {
773 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800774 /* The 82575 assigns vectors using a bitmask, which matches the
775 bitmask for the EICR/EIMS/EIMC registers. To assign one
776 or more queues to a vector, we write the appropriate bits
777 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000778 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800779 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000780 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800781 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000782 if (!adapter->msix_entries && msix_vector == 0)
783 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800784 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000785 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700786 break;
787 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000788 /*
789 * 82576 uses a table that essentially consists of 2 columns
790 * with 8 rows. The ordering is column-major so we use the
791 * lower 3 bits as the row index, and the 4th bit as the
792 * column offset.
793 */
794 if (rx_queue > IGB_N0_QUEUE)
795 igb_write_ivar(hw, msix_vector,
796 rx_queue & 0x7,
797 (rx_queue & 0x8) << 1);
798 if (tx_queue > IGB_N0_QUEUE)
799 igb_write_ivar(hw, msix_vector,
800 tx_queue & 0x7,
801 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000802 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700803 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000804 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000805 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000806 case e1000_i210:
807 case e1000_i211:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000808 /*
809 * On 82580 and newer adapters the scheme is similar to 82576
810 * however instead of ordering column-major we have things
811 * ordered row-major. So we traverse the table by using
812 * bit 0 as the column offset, and the remaining bits as the
813 * row index.
814 */
815 if (rx_queue > IGB_N0_QUEUE)
816 igb_write_ivar(hw, msix_vector,
817 rx_queue >> 1,
818 (rx_queue & 0x1) << 4);
819 if (tx_queue > IGB_N0_QUEUE)
820 igb_write_ivar(hw, msix_vector,
821 tx_queue >> 1,
822 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000823 q_vector->eims_value = 1 << msix_vector;
824 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700825 default:
826 BUG();
827 break;
828 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000829
830 /* add q_vector eims value to global eims_enable_mask */
831 adapter->eims_enable_mask |= q_vector->eims_value;
832
833 /* configure q_vector to set itr on first interrupt */
834 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800835}
836
837/**
838 * igb_configure_msix - Configure MSI-X hardware
839 *
840 * igb_configure_msix sets up the hardware to properly
841 * generate MSI-X interrupts.
842 **/
843static void igb_configure_msix(struct igb_adapter *adapter)
844{
845 u32 tmp;
846 int i, vector = 0;
847 struct e1000_hw *hw = &adapter->hw;
848
849 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800850
851 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700852 switch (hw->mac.type) {
853 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800854 tmp = rd32(E1000_CTRL_EXT);
855 /* enable MSI-X PBA support*/
856 tmp |= E1000_CTRL_EXT_PBA_CLR;
857
858 /* Auto-Mask interrupts upon ICR read. */
859 tmp |= E1000_CTRL_EXT_EIAME;
860 tmp |= E1000_CTRL_EXT_IRCA;
861
862 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000863
864 /* enable msix_other interrupt */
865 array_wr32(E1000_MSIXBM(0), vector++,
866 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700867 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800868
Alexander Duyck2d064c02008-07-08 15:10:12 -0700869 break;
870
871 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000872 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000873 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000874 case e1000_i210:
875 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000876 /* Turn on MSI-X capability first, or our settings
877 * won't stick. And it will take days to debug. */
878 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
879 E1000_GPIE_PBA | E1000_GPIE_EIAME |
880 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700881
Alexander Duyck047e0032009-10-27 15:49:27 +0000882 /* enable msix_other interrupt */
883 adapter->eims_other = 1 << vector;
884 tmp = (vector++ | E1000_IVAR_VALID) << 8;
885
886 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700887 break;
888 default:
889 /* do nothing, since nothing else supports MSI-X */
890 break;
891 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000892
893 adapter->eims_enable_mask |= adapter->eims_other;
894
Alexander Duyck26b39272010-02-17 01:00:41 +0000895 for (i = 0; i < adapter->num_q_vectors; i++)
896 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000897
Auke Kok9d5c8242008-01-24 02:22:38 -0800898 wrfl();
899}
900
901/**
902 * igb_request_msix - Initialize MSI-X interrupts
903 *
904 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
905 * kernel.
906 **/
907static int igb_request_msix(struct igb_adapter *adapter)
908{
909 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000910 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800911 int i, err = 0, vector = 0;
912
Auke Kok9d5c8242008-01-24 02:22:38 -0800913 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800914 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800915 if (err)
916 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000917 vector++;
918
919 for (i = 0; i < adapter->num_q_vectors; i++) {
920 struct igb_q_vector *q_vector = adapter->q_vector[i];
921
922 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
923
Alexander Duyck0ba82992011-08-26 07:45:47 +0000924 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000925 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000926 q_vector->rx.ring->queue_index);
927 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000928 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000929 q_vector->tx.ring->queue_index);
930 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000931 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000932 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000933 else
934 sprintf(q_vector->name, "%s-unused", netdev->name);
935
936 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800937 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000938 q_vector);
939 if (err)
940 goto out;
941 vector++;
942 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800943
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 igb_configure_msix(adapter);
945 return 0;
946out:
947 return err;
948}
949
950static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
951{
952 if (adapter->msix_entries) {
953 pci_disable_msix(adapter->pdev);
954 kfree(adapter->msix_entries);
955 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000956 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800957 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000958 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800959}
960
Alexander Duyck047e0032009-10-27 15:49:27 +0000961/**
962 * igb_free_q_vectors - Free memory allocated for interrupt vectors
963 * @adapter: board private structure to initialize
964 *
965 * This function frees the memory allocated to the q_vectors. In addition if
966 * NAPI is enabled it will delete any references to the NAPI struct prior
967 * to freeing the q_vector.
968 **/
969static void igb_free_q_vectors(struct igb_adapter *adapter)
970{
971 int v_idx;
972
973 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
974 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
975 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000976 if (!q_vector)
977 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000978 netif_napi_del(&q_vector->napi);
979 kfree(q_vector);
980 }
981 adapter->num_q_vectors = 0;
982}
983
984/**
985 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
986 *
987 * This function resets the device so that it has 0 rx queues, tx queues, and
988 * MSI-X interrupts allocated.
989 */
990static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
991{
992 igb_free_queues(adapter);
993 igb_free_q_vectors(adapter);
994 igb_reset_interrupt_capability(adapter);
995}
Auke Kok9d5c8242008-01-24 02:22:38 -0800996
997/**
998 * igb_set_interrupt_capability - set MSI or MSI-X if supported
999 *
1000 * Attempt to configure interrupts using the best available
1001 * capabilities of the hardware and kernel.
1002 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001003static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001004{
1005 int err;
1006 int numvecs, i;
1007
Alexander Duyck83b71802009-02-06 23:15:45 +00001008 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001009 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001010 if (adapter->vfs_allocated_count)
1011 adapter->num_tx_queues = 1;
1012 else
1013 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001014
Alexander Duyck047e0032009-10-27 15:49:27 +00001015 /* start with one vector for every rx queue */
1016 numvecs = adapter->num_rx_queues;
1017
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001018 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001019 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1020 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001021
1022 /* store the number of vectors reserved for queues */
1023 adapter->num_q_vectors = numvecs;
1024
1025 /* add 1 vector for link status interrupts */
1026 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001027 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1028 GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001029
Auke Kok9d5c8242008-01-24 02:22:38 -08001030 if (!adapter->msix_entries)
1031 goto msi_only;
1032
1033 for (i = 0; i < numvecs; i++)
1034 adapter->msix_entries[i].entry = i;
1035
1036 err = pci_enable_msix(adapter->pdev,
1037 adapter->msix_entries,
1038 numvecs);
1039 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001040 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001041
1042 igb_reset_interrupt_capability(adapter);
1043
1044 /* If we can't do MSI-X, try MSI */
1045msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001046#ifdef CONFIG_PCI_IOV
1047 /* disable SR-IOV for non MSI-X configurations */
1048 if (adapter->vf_data) {
1049 struct e1000_hw *hw = &adapter->hw;
1050 /* disable iov and allow time for transactions to clear */
1051 pci_disable_sriov(adapter->pdev);
1052 msleep(500);
1053
1054 kfree(adapter->vf_data);
1055 adapter->vf_data = NULL;
1056 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001057 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001058 msleep(100);
1059 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1060 }
1061#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001062 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001063 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001064 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001065 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001066 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001067 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001068 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001069 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001070out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001071 /* Notify the stack of the (possibly) reduced queue counts. */
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00001072 rtnl_lock();
Ben Hutchings21adef32010-09-27 08:28:39 +00001073 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00001074 err = netif_set_real_num_rx_queues(adapter->netdev,
1075 adapter->num_rx_queues);
1076 rtnl_unlock();
1077 return err;
Auke Kok9d5c8242008-01-24 02:22:38 -08001078}
1079
1080/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001081 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1082 * @adapter: board private structure to initialize
1083 *
1084 * We allocate one q_vector per queue interrupt. If allocation fails we
1085 * return -ENOMEM.
1086 **/
1087static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1088{
1089 struct igb_q_vector *q_vector;
1090 struct e1000_hw *hw = &adapter->hw;
1091 int v_idx;
1092
1093 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyckf33005a2012-09-13 06:27:55 +00001094 q_vector = kzalloc(sizeof(struct igb_q_vector),
1095 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001096 if (!q_vector)
1097 goto err_out;
1098 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001099 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1100 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001101 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1102 adapter->q_vector[v_idx] = q_vector;
1103 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001104
Alexander Duyck047e0032009-10-27 15:49:27 +00001105 return 0;
1106
1107err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001108 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001109 return -ENOMEM;
1110}
1111
1112static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1113 int ring_idx, int v_idx)
1114{
Alexander Duyck3025a442010-02-17 01:02:39 +00001115 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001116
Alexander Duyck0ba82992011-08-26 07:45:47 +00001117 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1118 q_vector->rx.ring->q_vector = q_vector;
1119 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001120 q_vector->itr_val = adapter->rx_itr_setting;
1121 if (q_vector->itr_val && q_vector->itr_val <= 3)
1122 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001123}
1124
1125static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1126 int ring_idx, int v_idx)
1127{
Alexander Duyck3025a442010-02-17 01:02:39 +00001128 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001129
Alexander Duyck0ba82992011-08-26 07:45:47 +00001130 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1131 q_vector->tx.ring->q_vector = q_vector;
1132 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001133 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001134 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001135 if (q_vector->itr_val && q_vector->itr_val <= 3)
1136 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001137}
1138
1139/**
1140 * igb_map_ring_to_vector - maps allocated queues to vectors
1141 *
1142 * This function maps the recently allocated queues to vectors.
1143 **/
1144static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1145{
1146 int i;
1147 int v_idx = 0;
1148
1149 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1150 (adapter->num_q_vectors < adapter->num_tx_queues))
1151 return -ENOMEM;
1152
1153 if (adapter->num_q_vectors >=
1154 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1155 for (i = 0; i < adapter->num_rx_queues; i++)
1156 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1157 for (i = 0; i < adapter->num_tx_queues; i++)
1158 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1159 } else {
1160 for (i = 0; i < adapter->num_rx_queues; i++) {
1161 if (i < adapter->num_tx_queues)
1162 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1163 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1164 }
1165 for (; i < adapter->num_tx_queues; i++)
1166 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1167 }
1168 return 0;
1169}
1170
1171/**
1172 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1173 *
1174 * This function initializes the interrupts and allocates all of the queues.
1175 **/
1176static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1177{
1178 struct pci_dev *pdev = adapter->pdev;
1179 int err;
1180
Ben Hutchings21adef32010-09-27 08:28:39 +00001181 err = igb_set_interrupt_capability(adapter);
1182 if (err)
1183 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001184
1185 err = igb_alloc_q_vectors(adapter);
1186 if (err) {
1187 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1188 goto err_alloc_q_vectors;
1189 }
1190
1191 err = igb_alloc_queues(adapter);
1192 if (err) {
1193 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1194 goto err_alloc_queues;
1195 }
1196
1197 err = igb_map_ring_to_vector(adapter);
1198 if (err) {
1199 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1200 goto err_map_queues;
1201 }
1202
1203
1204 return 0;
1205err_map_queues:
1206 igb_free_queues(adapter);
1207err_alloc_queues:
1208 igb_free_q_vectors(adapter);
1209err_alloc_q_vectors:
1210 igb_reset_interrupt_capability(adapter);
1211 return err;
1212}
1213
1214/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001215 * igb_request_irq - initialize interrupts
1216 *
1217 * Attempts to configure interrupts using the best available
1218 * capabilities of the hardware and kernel.
1219 **/
1220static int igb_request_irq(struct igb_adapter *adapter)
1221{
1222 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001223 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001224 int err = 0;
1225
1226 if (adapter->msix_entries) {
1227 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001228 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001229 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001230 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001231 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001232 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001233 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001234 igb_free_all_tx_resources(adapter);
1235 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001236 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001237 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001238 adapter->num_q_vectors = 1;
1239 err = igb_alloc_q_vectors(adapter);
1240 if (err) {
1241 dev_err(&pdev->dev,
1242 "Unable to allocate memory for vectors\n");
1243 goto request_done;
1244 }
1245 err = igb_alloc_queues(adapter);
1246 if (err) {
1247 dev_err(&pdev->dev,
1248 "Unable to allocate memory for queues\n");
1249 igb_free_q_vectors(adapter);
1250 goto request_done;
1251 }
1252 igb_setup_all_tx_resources(adapter);
1253 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001254 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001255
Alexander Duyckc74d5882011-08-26 07:46:45 +00001256 igb_assign_vector(adapter->q_vector[0], 0);
1257
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001258 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001259 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001260 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001261 if (!err)
1262 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001263
Auke Kok9d5c8242008-01-24 02:22:38 -08001264 /* fall back to legacy interrupts */
1265 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001266 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001267 }
1268
Alexander Duyckc74d5882011-08-26 07:46:45 +00001269 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001270 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001271
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001272 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001273 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001274 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001275
1276request_done:
1277 return err;
1278}
1279
1280static void igb_free_irq(struct igb_adapter *adapter)
1281{
Auke Kok9d5c8242008-01-24 02:22:38 -08001282 if (adapter->msix_entries) {
1283 int vector = 0, i;
1284
Alexander Duyck047e0032009-10-27 15:49:27 +00001285 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001286
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001287 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001288 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001289 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001290 } else {
1291 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001292 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001293}
1294
1295/**
1296 * igb_irq_disable - Mask off interrupt generation on the NIC
1297 * @adapter: board private structure
1298 **/
1299static void igb_irq_disable(struct igb_adapter *adapter)
1300{
1301 struct e1000_hw *hw = &adapter->hw;
1302
Alexander Duyck25568a52009-10-27 23:49:59 +00001303 /*
1304 * we need to be careful when disabling interrupts. The VFs are also
1305 * mapped into these registers and so clearing the bits can cause
1306 * issues on the VF drivers so we only need to clear what we set
1307 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001309 u32 regval = rd32(E1000_EIAM);
1310 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1311 wr32(E1000_EIMC, adapter->eims_enable_mask);
1312 regval = rd32(E1000_EIAC);
1313 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001314 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001315
1316 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001317 wr32(E1000_IMC, ~0);
1318 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001319 if (adapter->msix_entries) {
1320 int i;
1321 for (i = 0; i < adapter->num_q_vectors; i++)
1322 synchronize_irq(adapter->msix_entries[i].vector);
1323 } else {
1324 synchronize_irq(adapter->pdev->irq);
1325 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001326}
1327
1328/**
1329 * igb_irq_enable - Enable default interrupt generation settings
1330 * @adapter: board private structure
1331 **/
1332static void igb_irq_enable(struct igb_adapter *adapter)
1333{
1334 struct e1000_hw *hw = &adapter->hw;
1335
1336 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001337 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001338 u32 regval = rd32(E1000_EIAC);
1339 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1340 regval = rd32(E1000_EIAM);
1341 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001342 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001343 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001344 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001345 ims |= E1000_IMS_VMMB;
1346 }
1347 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001348 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001349 wr32(E1000_IMS, IMS_ENABLE_MASK |
1350 E1000_IMS_DRSTA);
1351 wr32(E1000_IAM, IMS_ENABLE_MASK |
1352 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001353 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001354}
1355
1356static void igb_update_mng_vlan(struct igb_adapter *adapter)
1357{
Alexander Duyck51466232009-10-27 23:47:35 +00001358 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001359 u16 vid = adapter->hw.mng_cookie.vlan_id;
1360 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001361
Alexander Duyck51466232009-10-27 23:47:35 +00001362 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1363 /* add VID to filter table */
1364 igb_vfta_set(hw, vid, true);
1365 adapter->mng_vlan_id = vid;
1366 } else {
1367 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1368 }
1369
1370 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1371 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001372 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001373 /* remove VID from filter table */
1374 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001375 }
1376}
1377
1378/**
1379 * igb_release_hw_control - release control of the h/w to f/w
1380 * @adapter: address of board private structure
1381 *
1382 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1383 * For ASF and Pass Through versions of f/w this means that the
1384 * driver is no longer loaded.
1385 *
1386 **/
1387static void igb_release_hw_control(struct igb_adapter *adapter)
1388{
1389 struct e1000_hw *hw = &adapter->hw;
1390 u32 ctrl_ext;
1391
1392 /* Let firmware take over control of h/w */
1393 ctrl_ext = rd32(E1000_CTRL_EXT);
1394 wr32(E1000_CTRL_EXT,
1395 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1396}
1397
Auke Kok9d5c8242008-01-24 02:22:38 -08001398/**
1399 * igb_get_hw_control - get control of the h/w from f/w
1400 * @adapter: address of board private structure
1401 *
1402 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1403 * For ASF and Pass Through versions of f/w this means that
1404 * the driver is loaded.
1405 *
1406 **/
1407static void igb_get_hw_control(struct igb_adapter *adapter)
1408{
1409 struct e1000_hw *hw = &adapter->hw;
1410 u32 ctrl_ext;
1411
1412 /* Let firmware know the driver has taken over */
1413 ctrl_ext = rd32(E1000_CTRL_EXT);
1414 wr32(E1000_CTRL_EXT,
1415 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1416}
1417
Auke Kok9d5c8242008-01-24 02:22:38 -08001418/**
1419 * igb_configure - configure the hardware for RX and TX
1420 * @adapter: private board structure
1421 **/
1422static void igb_configure(struct igb_adapter *adapter)
1423{
1424 struct net_device *netdev = adapter->netdev;
1425 int i;
1426
1427 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001428 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001429
1430 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001431
Alexander Duyck85b430b2009-10-27 15:50:29 +00001432 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001433 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001434 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001435
1436 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001437 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001438
1439 igb_rx_fifo_flush_82575(&adapter->hw);
1440
Alexander Duyckc493ea42009-03-20 00:16:50 +00001441 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001442 * at least 1 descriptor unused to make sure
1443 * next_to_use != next_to_clean */
1444 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001445 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001446 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001447 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001448}
1449
Nick Nunley88a268c2010-02-17 01:01:59 +00001450/**
1451 * igb_power_up_link - Power up the phy/serdes link
1452 * @adapter: address of board private structure
1453 **/
1454void igb_power_up_link(struct igb_adapter *adapter)
1455{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001456 igb_reset_phy(&adapter->hw);
1457
Nick Nunley88a268c2010-02-17 01:01:59 +00001458 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1459 igb_power_up_phy_copper(&adapter->hw);
1460 else
1461 igb_power_up_serdes_link_82575(&adapter->hw);
1462}
1463
1464/**
1465 * igb_power_down_link - Power down the phy/serdes link
1466 * @adapter: address of board private structure
1467 */
1468static void igb_power_down_link(struct igb_adapter *adapter)
1469{
1470 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1471 igb_power_down_phy_copper_82575(&adapter->hw);
1472 else
1473 igb_shutdown_serdes_link_82575(&adapter->hw);
1474}
Auke Kok9d5c8242008-01-24 02:22:38 -08001475
1476/**
1477 * igb_up - Open the interface and prepare it to handle traffic
1478 * @adapter: board private structure
1479 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001480int igb_up(struct igb_adapter *adapter)
1481{
1482 struct e1000_hw *hw = &adapter->hw;
1483 int i;
1484
1485 /* hardware has been reset, we need to reload some things */
1486 igb_configure(adapter);
1487
1488 clear_bit(__IGB_DOWN, &adapter->state);
1489
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001490 for (i = 0; i < adapter->num_q_vectors; i++)
1491 napi_enable(&(adapter->q_vector[i]->napi));
1492
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001493 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001494 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001495 else
1496 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001497
1498 /* Clear any pending interrupts. */
1499 rd32(E1000_ICR);
1500 igb_irq_enable(adapter);
1501
Alexander Duyckd4960302009-10-27 15:53:45 +00001502 /* notify VFs that reset has been completed */
1503 if (adapter->vfs_allocated_count) {
1504 u32 reg_data = rd32(E1000_CTRL_EXT);
1505 reg_data |= E1000_CTRL_EXT_PFRSTD;
1506 wr32(E1000_CTRL_EXT, reg_data);
1507 }
1508
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001509 netif_tx_start_all_queues(adapter->netdev);
1510
Alexander Duyck25568a52009-10-27 23:49:59 +00001511 /* start the watchdog. */
1512 hw->mac.get_link_status = 1;
1513 schedule_work(&adapter->watchdog_task);
1514
Auke Kok9d5c8242008-01-24 02:22:38 -08001515 return 0;
1516}
1517
1518void igb_down(struct igb_adapter *adapter)
1519{
Auke Kok9d5c8242008-01-24 02:22:38 -08001520 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001521 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001522 u32 tctl, rctl;
1523 int i;
1524
1525 /* signal that we're down so the interrupt handler does not
1526 * reschedule our watchdog timer */
1527 set_bit(__IGB_DOWN, &adapter->state);
1528
1529 /* disable receives in the hardware */
1530 rctl = rd32(E1000_RCTL);
1531 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1532 /* flush and sleep below */
1533
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001534 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001535
1536 /* disable transmits in the hardware */
1537 tctl = rd32(E1000_TCTL);
1538 tctl &= ~E1000_TCTL_EN;
1539 wr32(E1000_TCTL, tctl);
1540 /* flush both disables and wait for them to finish */
1541 wrfl();
1542 msleep(10);
1543
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001544 for (i = 0; i < adapter->num_q_vectors; i++)
1545 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001546
Auke Kok9d5c8242008-01-24 02:22:38 -08001547 igb_irq_disable(adapter);
1548
1549 del_timer_sync(&adapter->watchdog_timer);
1550 del_timer_sync(&adapter->phy_info_timer);
1551
Auke Kok9d5c8242008-01-24 02:22:38 -08001552 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001553
1554 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001555 spin_lock(&adapter->stats64_lock);
1556 igb_update_stats(adapter, &adapter->stats64);
1557 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001558
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 adapter->link_speed = 0;
1560 adapter->link_duplex = 0;
1561
Jeff Kirsher30236822008-06-24 17:01:15 -07001562 if (!pci_channel_offline(adapter->pdev))
1563 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001564 igb_clean_all_tx_rings(adapter);
1565 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001566#ifdef CONFIG_IGB_DCA
1567
1568 /* since we reset the hardware DCA settings were cleared */
1569 igb_setup_dca(adapter);
1570#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001571}
1572
1573void igb_reinit_locked(struct igb_adapter *adapter)
1574{
1575 WARN_ON(in_interrupt());
1576 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1577 msleep(1);
1578 igb_down(adapter);
1579 igb_up(adapter);
1580 clear_bit(__IGB_RESETTING, &adapter->state);
1581}
1582
1583void igb_reset(struct igb_adapter *adapter)
1584{
Alexander Duyck090b1792009-10-27 23:51:55 +00001585 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001587 struct e1000_mac_info *mac = &hw->mac;
1588 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001589 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1590 u16 hwm;
1591
1592 /* Repartition Pba for greater than 9k mtu
1593 * To take effect CTRL.RST is required.
1594 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001595 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001596 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001597 case e1000_82580:
1598 pba = rd32(E1000_RXPBS);
1599 pba = igb_rxpbs_adjust_82580(pba);
1600 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001601 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001602 pba = rd32(E1000_RXPBS);
1603 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001604 break;
1605 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001606 case e1000_i210:
1607 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001608 default:
1609 pba = E1000_PBA_34K;
1610 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001611 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001612
Alexander Duyck2d064c02008-07-08 15:10:12 -07001613 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1614 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001615 /* adjust PBA for jumbo frames */
1616 wr32(E1000_PBA, pba);
1617
1618 /* To maintain wire speed transmits, the Tx FIFO should be
1619 * large enough to accommodate two full transmit packets,
1620 * rounded up to the next 1KB and expressed in KB. Likewise,
1621 * the Rx FIFO should be large enough to accommodate at least
1622 * one full receive packet and is similarly rounded up and
1623 * expressed in KB. */
1624 pba = rd32(E1000_PBA);
1625 /* upper 16 bits has Tx packet buffer allocation size in KB */
1626 tx_space = pba >> 16;
1627 /* lower 16 bits has Rx packet buffer allocation size in KB */
1628 pba &= 0xffff;
1629 /* the tx fifo also stores 16 bytes of information about the tx
1630 * but don't include ethernet FCS because hardware appends it */
1631 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001632 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001633 ETH_FCS_LEN) * 2;
1634 min_tx_space = ALIGN(min_tx_space, 1024);
1635 min_tx_space >>= 10;
1636 /* software strips receive CRC, so leave room for it */
1637 min_rx_space = adapter->max_frame_size;
1638 min_rx_space = ALIGN(min_rx_space, 1024);
1639 min_rx_space >>= 10;
1640
1641 /* If current Tx allocation is less than the min Tx FIFO size,
1642 * and the min Tx FIFO size is less than the current Rx FIFO
1643 * allocation, take space away from current Rx allocation */
1644 if (tx_space < min_tx_space &&
1645 ((min_tx_space - tx_space) < pba)) {
1646 pba = pba - (min_tx_space - tx_space);
1647
1648 /* if short on rx space, rx wins and must trump tx
1649 * adjustment */
1650 if (pba < min_rx_space)
1651 pba = min_rx_space;
1652 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001653 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001654 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001655
1656 /* flow control settings */
1657 /* The high water mark must be low enough to fit one full frame
1658 * (or the size used for early receive) above it in the Rx FIFO.
1659 * Set it to the lower of:
1660 * - 90% of the Rx FIFO size, or
1661 * - the full Rx FIFO size minus one full frame */
1662 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001663 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001664
Alexander Duyckd405ea32009-12-23 13:21:27 +00001665 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1666 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001667 fc->pause_time = 0xFFFF;
1668 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001669 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001670
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001671 /* disable receive for all VFs and wait one second */
1672 if (adapter->vfs_allocated_count) {
1673 int i;
1674 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001675 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001676
1677 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001678 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001679
1680 /* disable transmits and receives */
1681 wr32(E1000_VFRE, 0);
1682 wr32(E1000_VFTE, 0);
1683 }
1684
Auke Kok9d5c8242008-01-24 02:22:38 -08001685 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001686 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001687 wr32(E1000_WUC, 0);
1688
Alexander Duyck330a6d62009-10-27 23:51:35 +00001689 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001690 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001691
Matthew Vicka27416b2012-04-18 02:57:44 +00001692 /*
1693 * Flow control settings reset on hardware reset, so guarantee flow
1694 * control is off when forcing speed.
1695 */
1696 if (!hw->mac.autoneg)
1697 igb_force_mac_fc(hw);
1698
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001699 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001700 if (!netif_running(adapter->netdev))
1701 igb_power_down_link(adapter);
1702
Auke Kok9d5c8242008-01-24 02:22:38 -08001703 igb_update_mng_vlan(adapter);
1704
1705 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1706 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1707
Matthew Vick1f6e8172012-08-18 07:26:33 +00001708#ifdef CONFIG_IGB_PTP
1709 /* Re-enable PTP, where applicable. */
1710 igb_ptp_reset(adapter);
1711#endif /* CONFIG_IGB_PTP */
1712
Alexander Duyck330a6d62009-10-27 23:51:35 +00001713 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001714}
1715
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001716static netdev_features_t igb_fix_features(struct net_device *netdev,
1717 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001718{
1719 /*
1720 * Since there is no support for separate rx/tx vlan accel
1721 * enable/disable make sure tx flag is always in same state as rx.
1722 */
1723 if (features & NETIF_F_HW_VLAN_RX)
1724 features |= NETIF_F_HW_VLAN_TX;
1725 else
1726 features &= ~NETIF_F_HW_VLAN_TX;
1727
1728 return features;
1729}
1730
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001731static int igb_set_features(struct net_device *netdev,
1732 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001733{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001734 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001735 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001736
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001737 if (changed & NETIF_F_HW_VLAN_RX)
1738 igb_vlan_mode(netdev, features);
1739
Ben Greear89eaefb2012-03-06 09:41:58 +00001740 if (!(changed & NETIF_F_RXALL))
1741 return 0;
1742
1743 netdev->features = features;
1744
1745 if (netif_running(netdev))
1746 igb_reinit_locked(adapter);
1747 else
1748 igb_reset(adapter);
1749
Michał Mirosławac52caa2011-06-08 08:38:01 +00001750 return 0;
1751}
1752
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001753static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001754 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001755 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001756 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001757 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001758 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001759 .ndo_set_mac_address = igb_set_mac,
1760 .ndo_change_mtu = igb_change_mtu,
1761 .ndo_do_ioctl = igb_ioctl,
1762 .ndo_tx_timeout = igb_tx_timeout,
1763 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001764 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1765 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001766 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1767 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1768 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1769 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001770#ifdef CONFIG_NET_POLL_CONTROLLER
1771 .ndo_poll_controller = igb_netpoll,
1772#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001773 .ndo_fix_features = igb_fix_features,
1774 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001775};
1776
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001777/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001778 * igb_set_fw_version - Configure version string for ethtool
1779 * @adapter: adapter struct
1780 *
1781 **/
1782void igb_set_fw_version(struct igb_adapter *adapter)
1783{
1784 struct e1000_hw *hw = &adapter->hw;
1785 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1786 u16 major, build, patch, fw_version;
1787 u32 etrack_id;
1788
1789 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1790 if (adapter->hw.mac.type != e1000_i211) {
1791 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1792 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1793 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1794
1795 /* combo image version needs to be found */
1796 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1797 if ((comb_offset != 0x0) &&
1798 (comb_offset != IGB_NVM_VER_INVALID)) {
1799 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1800 + 1), 1, &comb_verh);
1801 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1802 1, &comb_verl);
1803
1804 /* Only display Option Rom if it exists and is valid */
1805 if ((comb_verh && comb_verl) &&
1806 ((comb_verh != IGB_NVM_VER_INVALID) &&
1807 (comb_verl != IGB_NVM_VER_INVALID))) {
1808 major = comb_verl >> IGB_COMB_VER_SHFT;
1809 build = (comb_verl << IGB_COMB_VER_SHFT) |
1810 (comb_verh >> IGB_COMB_VER_SHFT);
1811 patch = comb_verh & IGB_COMB_VER_MASK;
1812 snprintf(adapter->fw_version,
1813 sizeof(adapter->fw_version),
1814 "%d.%d%d, 0x%08x, %d.%d.%d",
1815 (fw_version & IGB_MAJOR_MASK) >>
1816 IGB_MAJOR_SHIFT,
1817 (fw_version & IGB_MINOR_MASK) >>
1818 IGB_MINOR_SHIFT,
1819 (fw_version & IGB_BUILD_MASK),
1820 etrack_id, major, build, patch);
1821 goto out;
1822 }
1823 }
1824 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1825 "%d.%d%d, 0x%08x",
1826 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1827 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1828 (fw_version & IGB_BUILD_MASK), etrack_id);
1829 } else {
1830 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1831 "%d.%d%d",
1832 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1833 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1834 (fw_version & IGB_BUILD_MASK));
1835 }
1836out:
1837 return;
1838}
1839
1840/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001841 * igb_probe - Device Initialization Routine
1842 * @pdev: PCI device information struct
1843 * @ent: entry in igb_pci_tbl
1844 *
1845 * Returns 0 on success, negative on failure
1846 *
1847 * igb_probe initializes an adapter identified by a pci_dev structure.
1848 * The OS initialization, configuring of the adapter private structure,
1849 * and a hardware reset occur.
1850 **/
1851static int __devinit igb_probe(struct pci_dev *pdev,
1852 const struct pci_device_id *ent)
1853{
1854 struct net_device *netdev;
1855 struct igb_adapter *adapter;
1856 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001857 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001858 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001859 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001860 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1861 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001862 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001863 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001864 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001865
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001866 /* Catch broken hardware that put the wrong VF device ID in
1867 * the PCIe SR-IOV capability.
1868 */
1869 if (pdev->is_virtfn) {
1870 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001871 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001872 return -EINVAL;
1873 }
1874
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001875 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001876 if (err)
1877 return err;
1878
1879 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001880 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001881 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001882 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001883 if (!err)
1884 pci_using_dac = 1;
1885 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001886 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001887 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001888 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001889 if (err) {
1890 dev_err(&pdev->dev, "No usable DMA "
1891 "configuration, aborting\n");
1892 goto err_dma;
1893 }
1894 }
1895 }
1896
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001897 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1898 IORESOURCE_MEM),
1899 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001900 if (err)
1901 goto err_pci_reg;
1902
Frans Pop19d5afd2009-10-02 10:04:12 -07001903 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001904
Auke Kok9d5c8242008-01-24 02:22:38 -08001905 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001906 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001907
1908 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001909 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001910 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001911 if (!netdev)
1912 goto err_alloc_etherdev;
1913
1914 SET_NETDEV_DEV(netdev, &pdev->dev);
1915
1916 pci_set_drvdata(pdev, netdev);
1917 adapter = netdev_priv(netdev);
1918 adapter->netdev = netdev;
1919 adapter->pdev = pdev;
1920 hw = &adapter->hw;
1921 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00001922 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08001923
1924 mmio_start = pci_resource_start(pdev, 0);
1925 mmio_len = pci_resource_len(pdev, 0);
1926
1927 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001928 hw->hw_addr = ioremap(mmio_start, mmio_len);
1929 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001930 goto err_ioremap;
1931
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001932 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001933 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001934 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001935
1936 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1937
1938 netdev->mem_start = mmio_start;
1939 netdev->mem_end = mmio_start + mmio_len;
1940
Auke Kok9d5c8242008-01-24 02:22:38 -08001941 /* PCI config space info */
1942 hw->vendor_id = pdev->vendor;
1943 hw->device_id = pdev->device;
1944 hw->revision_id = pdev->revision;
1945 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1946 hw->subsystem_device_id = pdev->subsystem_device;
1947
Auke Kok9d5c8242008-01-24 02:22:38 -08001948 /* Copy the default MAC, PHY and NVM function pointers */
1949 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1950 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1951 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1952 /* Initialize skew-specific constants */
1953 err = ei->get_invariants(hw);
1954 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001955 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001956
Alexander Duyck450c87c2009-02-06 23:22:11 +00001957 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001958 err = igb_sw_init(adapter);
1959 if (err)
1960 goto err_sw_init;
1961
1962 igb_get_bus_info_pcie(hw);
1963
1964 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001965
1966 /* Copper options */
1967 if (hw->phy.media_type == e1000_media_type_copper) {
1968 hw->phy.mdix = AUTO_ALL_MODES;
1969 hw->phy.disable_polarity_correction = false;
1970 hw->phy.ms_type = e1000_ms_hw_default;
1971 }
1972
1973 if (igb_check_reset_block(hw))
1974 dev_info(&pdev->dev,
1975 "PHY reset is blocked due to SOL/IDER session.\n");
1976
Alexander Duyck077887c2011-08-26 07:46:29 +00001977 /*
1978 * features is initialized to 0 in allocation, it might have bits
1979 * set by igb_sw_init so we should use an or instead of an
1980 * assignment.
1981 */
1982 netdev->features |= NETIF_F_SG |
1983 NETIF_F_IP_CSUM |
1984 NETIF_F_IPV6_CSUM |
1985 NETIF_F_TSO |
1986 NETIF_F_TSO6 |
1987 NETIF_F_RXHASH |
1988 NETIF_F_RXCSUM |
1989 NETIF_F_HW_VLAN_RX |
1990 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001991
Alexander Duyck077887c2011-08-26 07:46:29 +00001992 /* copy netdev features into list of user selectable features */
1993 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001994 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08001995
Alexander Duyck077887c2011-08-26 07:46:29 +00001996 /* set this bit last since it cannot be part of hw_features */
1997 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1998
1999 netdev->vlan_features |= NETIF_F_TSO |
2000 NETIF_F_TSO6 |
2001 NETIF_F_IP_CSUM |
2002 NETIF_F_IPV6_CSUM |
2003 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002004
Ben Greear6b8f0922012-03-06 09:41:53 +00002005 netdev->priv_flags |= IFF_SUPP_NOFCS;
2006
Yi Zou7b872a52010-09-22 17:57:58 +00002007 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002008 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002009 netdev->vlan_features |= NETIF_F_HIGHDMA;
2010 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002011
Michał Mirosławac52caa2011-06-08 08:38:01 +00002012 if (hw->mac.type >= e1000_82576) {
2013 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002014 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002015 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002016
Jiri Pirko01789342011-08-16 06:29:00 +00002017 netdev->priv_flags |= IFF_UNICAST_FLT;
2018
Alexander Duyck330a6d62009-10-27 23:51:35 +00002019 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002020
2021 /* before reading the NVM, reset the controller to put the device in a
2022 * known good starting state */
2023 hw->mac.ops.reset_hw(hw);
2024
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002025 /*
2026 * make sure the NVM is good , i211 parts have special NVM that
2027 * doesn't contain a checksum
2028 */
2029 if (hw->mac.type != e1000_i211) {
2030 if (hw->nvm.ops.validate(hw) < 0) {
2031 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2032 err = -EIO;
2033 goto err_eeprom;
2034 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002035 }
2036
2037 /* copy the MAC address out of the NVM */
2038 if (hw->mac.ops.read_mac_addr(hw))
2039 dev_err(&pdev->dev, "NVM Read Error\n");
2040
2041 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2042 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2043
2044 if (!is_valid_ether_addr(netdev->perm_addr)) {
2045 dev_err(&pdev->dev, "Invalid MAC Address\n");
2046 err = -EIO;
2047 goto err_eeprom;
2048 }
2049
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002050 /* get firmware version for ethtool -i */
2051 igb_set_fw_version(adapter);
2052
Joe Perchesc061b182010-08-23 18:20:03 +00002053 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002054 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002055 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002056 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002057
2058 INIT_WORK(&adapter->reset_task, igb_reset_task);
2059 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2060
Alexander Duyck450c87c2009-02-06 23:22:11 +00002061 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002062 adapter->fc_autoneg = true;
2063 hw->mac.autoneg = true;
2064 hw->phy.autoneg_advertised = 0x2f;
2065
Alexander Duyck0cce1192009-07-23 18:10:24 +00002066 hw->fc.requested_mode = e1000_fc_default;
2067 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002068
Auke Kok9d5c8242008-01-24 02:22:38 -08002069 igb_validate_mdi_setting(hw);
2070
Auke Kok9d5c8242008-01-24 02:22:38 -08002071 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2072 * enable the ACPI Magic Packet filter
2073 */
2074
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002075 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002076 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002077 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002078 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2079 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2080 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002081 else if (hw->bus.func == 1)
2082 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002083
2084 if (eeprom_data & eeprom_apme_mask)
2085 adapter->eeprom_wol |= E1000_WUFC_MAG;
2086
2087 /* now that we have the eeprom settings, apply the special cases where
2088 * the eeprom may be wrong or the board simply won't support wake on
2089 * lan on a particular port */
2090 switch (pdev->device) {
2091 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2092 adapter->eeprom_wol = 0;
2093 break;
2094 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002095 case E1000_DEV_ID_82576_FIBER:
2096 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002097 /* Wake events only supported on port A for dual fiber
2098 * regardless of eeprom setting */
2099 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2100 adapter->eeprom_wol = 0;
2101 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002102 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002103 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002104 /* if quad port adapter, disable WoL on all but port A */
2105 if (global_quad_port_a != 0)
2106 adapter->eeprom_wol = 0;
2107 else
2108 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2109 /* Reset for multiple quad port adapters */
2110 if (++global_quad_port_a == 4)
2111 global_quad_port_a = 0;
2112 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002113 }
2114
2115 /* initialize the wol settings based on the eeprom settings */
2116 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002117 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002118
2119 /* reset the hardware with the new settings */
2120 igb_reset(adapter);
2121
2122 /* let the f/w know that the h/w is now under the control of the
2123 * driver. */
2124 igb_get_hw_control(adapter);
2125
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 strcpy(netdev->name, "eth%d");
2127 err = register_netdev(netdev);
2128 if (err)
2129 goto err_register;
2130
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002131 /* carrier off reporting is important to ethtool even BEFORE open */
2132 netif_carrier_off(netdev);
2133
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002134#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002135 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002136 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002137 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002138 igb_setup_dca(adapter);
2139 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002140
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002141#endif
Matthew Vick3c89f6d2012-08-10 05:40:43 +00002142
Richard Cochran7ebae812012-03-16 10:55:37 +00002143#ifdef CONFIG_IGB_PTP
Anders Berggren673b8b72011-02-04 07:32:32 +00002144 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002145 igb_ptp_init(adapter);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00002146#endif /* CONFIG_IGB_PTP */
Anders Berggren673b8b72011-02-04 07:32:32 +00002147
Auke Kok9d5c8242008-01-24 02:22:38 -08002148 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2149 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002150 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002151 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002152 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002153 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002154 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002155 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2156 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2157 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2158 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002159 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002160
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002161 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2162 if (ret_val)
2163 strcpy(part_str, "Unknown");
2164 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002165 dev_info(&pdev->dev,
2166 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2167 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002168 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002169 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002170 switch (hw->mac.type) {
2171 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002172 case e1000_i210:
2173 case e1000_i211:
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002174 igb_set_eee_i350(hw);
2175 break;
2176 default:
2177 break;
2178 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002179
2180 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002181 return 0;
2182
2183err_register:
2184 igb_release_hw_control(adapter);
2185err_eeprom:
2186 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002187 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002188
2189 if (hw->flash_address)
2190 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002191err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002192 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002193 iounmap(hw->hw_addr);
2194err_ioremap:
2195 free_netdev(netdev);
2196err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002197 pci_release_selected_regions(pdev,
2198 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002199err_pci_reg:
2200err_dma:
2201 pci_disable_device(pdev);
2202 return err;
2203}
2204
2205/**
2206 * igb_remove - Device Removal Routine
2207 * @pdev: PCI device information struct
2208 *
2209 * igb_remove is called by the PCI subsystem to alert the driver
2210 * that it should release a PCI device. The could be caused by a
2211 * Hot-Plug event, or because the driver is going to be removed from
2212 * memory.
2213 **/
2214static void __devexit igb_remove(struct pci_dev *pdev)
2215{
2216 struct net_device *netdev = pci_get_drvdata(pdev);
2217 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002218 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002219
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002220 pm_runtime_get_noresume(&pdev->dev);
Richard Cochran7ebae812012-03-16 10:55:37 +00002221#ifdef CONFIG_IGB_PTP
Matthew Vicka79f4f82012-08-10 05:40:44 +00002222 igb_ptp_stop(adapter);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00002223#endif /* CONFIG_IGB_PTP */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002224
Tejun Heo760141a2010-12-12 16:45:14 +01002225 /*
2226 * The watchdog timer may be rescheduled, so explicitly
2227 * disable watchdog from being rescheduled.
2228 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002229 set_bit(__IGB_DOWN, &adapter->state);
2230 del_timer_sync(&adapter->watchdog_timer);
2231 del_timer_sync(&adapter->phy_info_timer);
2232
Tejun Heo760141a2010-12-12 16:45:14 +01002233 cancel_work_sync(&adapter->reset_task);
2234 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002235
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002236#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002237 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002238 dev_info(&pdev->dev, "DCA disabled\n");
2239 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002240 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002241 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002242 }
2243#endif
2244
Auke Kok9d5c8242008-01-24 02:22:38 -08002245 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2246 * would have already happened in close and is redundant. */
2247 igb_release_hw_control(adapter);
2248
2249 unregister_netdev(netdev);
2250
Alexander Duyck047e0032009-10-27 15:49:27 +00002251 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002252
Alexander Duyck37680112009-02-19 20:40:30 -08002253#ifdef CONFIG_PCI_IOV
2254 /* reclaim resources allocated to VFs */
2255 if (adapter->vf_data) {
2256 /* disable iov and allow time for transactions to clear */
Stefan Assmannf5571472012-08-18 04:06:11 +00002257 if (igb_vfs_are_assigned(adapter)) {
2258 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2259 } else {
Greg Rose0224d662011-10-14 02:57:14 +00002260 pci_disable_sriov(pdev);
2261 msleep(500);
Greg Rose0224d662011-10-14 02:57:14 +00002262 }
Alexander Duyck37680112009-02-19 20:40:30 -08002263
2264 kfree(adapter->vf_data);
2265 adapter->vf_data = NULL;
2266 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002267 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002268 msleep(100);
2269 dev_info(&pdev->dev, "IOV Disabled\n");
2270 }
2271#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002272
Alexander Duyck28b07592009-02-06 23:20:31 +00002273 iounmap(hw->hw_addr);
2274 if (hw->flash_address)
2275 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002276 pci_release_selected_regions(pdev,
2277 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002278
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002279 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002280 free_netdev(netdev);
2281
Frans Pop19d5afd2009-10-02 10:04:12 -07002282 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002283
Auke Kok9d5c8242008-01-24 02:22:38 -08002284 pci_disable_device(pdev);
2285}
2286
2287/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002288 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2289 * @adapter: board private structure to initialize
2290 *
2291 * This function initializes the vf specific data storage and then attempts to
2292 * allocate the VFs. The reason for ordering it this way is because it is much
2293 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2294 * the memory for the VFs.
2295 **/
2296static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2297{
2298#ifdef CONFIG_PCI_IOV
2299 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002300 struct e1000_hw *hw = &adapter->hw;
Stefan Assmannf5571472012-08-18 04:06:11 +00002301 int old_vfs = pci_num_vf(adapter->pdev);
Greg Rose0224d662011-10-14 02:57:14 +00002302 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002303
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002304 /* Virtualization features not supported on i210 family. */
2305 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2306 return;
2307
Greg Rose0224d662011-10-14 02:57:14 +00002308 if (old_vfs) {
2309 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2310 "max_vfs setting of %d\n", old_vfs, max_vfs);
2311 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002312 }
2313
Greg Rose0224d662011-10-14 02:57:14 +00002314 if (!adapter->vfs_allocated_count)
2315 return;
2316
2317 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2318 sizeof(struct vf_data_storage), GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002319
Greg Rose0224d662011-10-14 02:57:14 +00002320 /* if allocation failed then we do not support SR-IOV */
2321 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002322 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002323 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2324 "Data Storage\n");
2325 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002326 }
Greg Rose0224d662011-10-14 02:57:14 +00002327
2328 if (!old_vfs) {
2329 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2330 goto err_out;
2331 }
2332 dev_info(&pdev->dev, "%d VFs allocated\n",
2333 adapter->vfs_allocated_count);
2334 for (i = 0; i < adapter->vfs_allocated_count; i++)
2335 igb_vf_configure(adapter, i);
2336
2337 /* DMA Coalescing is not supported in IOV mode. */
2338 adapter->flags &= ~IGB_FLAG_DMAC;
2339 goto out;
2340err_out:
2341 kfree(adapter->vf_data);
2342 adapter->vf_data = NULL;
2343 adapter->vfs_allocated_count = 0;
2344out:
2345 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002346#endif /* CONFIG_PCI_IOV */
2347}
2348
Alexander Duyck115f4592009-11-12 18:37:00 +00002349/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002350 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2351 * @adapter: board private structure to initialize
2352 *
2353 * igb_sw_init initializes the Adapter private data structure.
2354 * Fields are initialized based on PCI device information and
2355 * OS network device settings (MTU size).
2356 **/
2357static int __devinit igb_sw_init(struct igb_adapter *adapter)
2358{
2359 struct e1000_hw *hw = &adapter->hw;
2360 struct net_device *netdev = adapter->netdev;
2361 struct pci_dev *pdev = adapter->pdev;
Matthew Vick374a5422012-05-18 04:54:58 +00002362 u32 max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08002363
2364 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2365
Alexander Duyck13fde972011-10-05 13:35:24 +00002366 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002367 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2368 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002369
2370 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002371 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2372 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2373
Alexander Duyck13fde972011-10-05 13:35:24 +00002374 /* set default work limits */
2375 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2376
Alexander Duyck153285f2011-08-26 07:43:32 +00002377 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2378 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002379 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2380
Eric Dumazet12dcd862010-10-15 17:27:10 +00002381 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002382#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002383 switch (hw->mac.type) {
2384 case e1000_82576:
2385 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002386 if (max_vfs > 7) {
2387 dev_warn(&pdev->dev,
2388 "Maximum of 7 VFs per PF, using max\n");
2389 adapter->vfs_allocated_count = 7;
2390 } else
2391 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002392 break;
2393 default:
2394 break;
2395 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002396#endif /* CONFIG_PCI_IOV */
Matthew Vick374a5422012-05-18 04:54:58 +00002397
2398 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002399 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002400 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00002401 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002402 break;
Matthew Vick374a5422012-05-18 04:54:58 +00002403 case e1000_82575:
2404 case e1000_i210:
2405 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2406 break;
2407 case e1000_i350:
2408 /* I350 cannot do RSS and SR-IOV at the same time */
2409 if (!!adapter->vfs_allocated_count) {
2410 max_rss_queues = 1;
2411 break;
2412 }
2413 /* fall through */
2414 case e1000_82576:
2415 if (!!adapter->vfs_allocated_count) {
2416 max_rss_queues = 2;
2417 break;
2418 }
2419 /* fall through */
2420 case e1000_82580:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002421 default:
Matthew Vick374a5422012-05-18 04:54:58 +00002422 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002423 break;
2424 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002425
Matthew Vick374a5422012-05-18 04:54:58 +00002426 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2427
2428 /* Determine if we need to pair queues. */
2429 switch (hw->mac.type) {
2430 case e1000_82575:
2431 case e1000_i211:
2432 /* Device supports enough interrupts without queue pairing. */
2433 break;
2434 case e1000_82576:
2435 /*
2436 * If VFs are going to be allocated with RSS queues then we
2437 * should pair the queues in order to conserve interrupts due
2438 * to limited supply.
2439 */
2440 if ((adapter->rss_queues > 1) &&
2441 (adapter->vfs_allocated_count > 6))
2442 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2443 /* fall through */
2444 case e1000_82580:
2445 case e1000_i350:
2446 case e1000_i210:
2447 default:
2448 /*
2449 * If rss_queues > half of max_rss_queues, pair the queues in
2450 * order to conserve interrupts due to limited supply.
2451 */
2452 if (adapter->rss_queues > (max_rss_queues / 2))
2453 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2454 break;
2455 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002456
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002457 /* Setup and initialize a copy of the hw vlan table array */
2458 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2459 E1000_VLAN_FILTER_TBL_SIZE,
2460 GFP_ATOMIC);
2461
Alexander Duycka6b623e2009-10-27 23:47:53 +00002462 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002463 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002464 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2465 return -ENOMEM;
2466 }
2467
Alexander Duycka6b623e2009-10-27 23:47:53 +00002468 igb_probe_vfs(adapter);
2469
Auke Kok9d5c8242008-01-24 02:22:38 -08002470 /* Explicitly disable IRQ since the NIC can be in any state. */
2471 igb_irq_disable(adapter);
2472
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002473 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002474 adapter->flags &= ~IGB_FLAG_DMAC;
2475
Auke Kok9d5c8242008-01-24 02:22:38 -08002476 set_bit(__IGB_DOWN, &adapter->state);
2477 return 0;
2478}
2479
2480/**
2481 * igb_open - Called when a network interface is made active
2482 * @netdev: network interface device structure
2483 *
2484 * Returns 0 on success, negative value on failure
2485 *
2486 * The open entry point is called when a network interface is made
2487 * active by the system (IFF_UP). At this point all resources needed
2488 * for transmit and receive operations are allocated, the interrupt
2489 * handler is registered with the OS, the watchdog timer is started,
2490 * and the stack is notified that the interface is ready.
2491 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002492static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002493{
2494 struct igb_adapter *adapter = netdev_priv(netdev);
2495 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002496 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002497 int err;
2498 int i;
2499
2500 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002501 if (test_bit(__IGB_TESTING, &adapter->state)) {
2502 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002503 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002504 }
2505
2506 if (!resuming)
2507 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002508
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002509 netif_carrier_off(netdev);
2510
Auke Kok9d5c8242008-01-24 02:22:38 -08002511 /* allocate transmit descriptors */
2512 err = igb_setup_all_tx_resources(adapter);
2513 if (err)
2514 goto err_setup_tx;
2515
2516 /* allocate receive descriptors */
2517 err = igb_setup_all_rx_resources(adapter);
2518 if (err)
2519 goto err_setup_rx;
2520
Nick Nunley88a268c2010-02-17 01:01:59 +00002521 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002522
Auke Kok9d5c8242008-01-24 02:22:38 -08002523 /* before we allocate an interrupt, we must be ready to handle it.
2524 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2525 * as soon as we call pci_request_irq, so we have to setup our
2526 * clean_rx handler before we do so. */
2527 igb_configure(adapter);
2528
2529 err = igb_request_irq(adapter);
2530 if (err)
2531 goto err_req_irq;
2532
2533 /* From here on the code is the same as igb_up() */
2534 clear_bit(__IGB_DOWN, &adapter->state);
2535
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002536 for (i = 0; i < adapter->num_q_vectors; i++)
2537 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002538
2539 /* Clear any pending interrupts. */
2540 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002541
2542 igb_irq_enable(adapter);
2543
Alexander Duyckd4960302009-10-27 15:53:45 +00002544 /* notify VFs that reset has been completed */
2545 if (adapter->vfs_allocated_count) {
2546 u32 reg_data = rd32(E1000_CTRL_EXT);
2547 reg_data |= E1000_CTRL_EXT_PFRSTD;
2548 wr32(E1000_CTRL_EXT, reg_data);
2549 }
2550
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002551 netif_tx_start_all_queues(netdev);
2552
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002553 if (!resuming)
2554 pm_runtime_put(&pdev->dev);
2555
Alexander Duyck25568a52009-10-27 23:49:59 +00002556 /* start the watchdog. */
2557 hw->mac.get_link_status = 1;
2558 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002559
2560 return 0;
2561
2562err_req_irq:
2563 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002564 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002565 igb_free_all_rx_resources(adapter);
2566err_setup_rx:
2567 igb_free_all_tx_resources(adapter);
2568err_setup_tx:
2569 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002570 if (!resuming)
2571 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002572
2573 return err;
2574}
2575
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002576static int igb_open(struct net_device *netdev)
2577{
2578 return __igb_open(netdev, false);
2579}
2580
Auke Kok9d5c8242008-01-24 02:22:38 -08002581/**
2582 * igb_close - Disables a network interface
2583 * @netdev: network interface device structure
2584 *
2585 * Returns 0, this is not allowed to fail
2586 *
2587 * The close entry point is called when an interface is de-activated
2588 * by the OS. The hardware is still under the driver's control, but
2589 * needs to be disabled. A global MAC reset is issued to stop the
2590 * hardware, and all transmit and receive resources are freed.
2591 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002592static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002593{
2594 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002595 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002596
2597 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002598
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002599 if (!suspending)
2600 pm_runtime_get_sync(&pdev->dev);
2601
2602 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002603 igb_free_irq(adapter);
2604
2605 igb_free_all_tx_resources(adapter);
2606 igb_free_all_rx_resources(adapter);
2607
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002608 if (!suspending)
2609 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002610 return 0;
2611}
2612
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002613static int igb_close(struct net_device *netdev)
2614{
2615 return __igb_close(netdev, false);
2616}
2617
Auke Kok9d5c8242008-01-24 02:22:38 -08002618/**
2619 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002620 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2621 *
2622 * Return 0 on success, negative on failure
2623 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002624int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002625{
Alexander Duyck59d71982010-04-27 13:09:25 +00002626 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002627 int size;
2628
Alexander Duyck06034642011-08-26 07:44:22 +00002629 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002630
2631 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002632 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002633 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002634
2635 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002636 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002637 tx_ring->size = ALIGN(tx_ring->size, 4096);
2638
Alexander Duyck59d71982010-04-27 13:09:25 +00002639 tx_ring->desc = dma_alloc_coherent(dev,
2640 tx_ring->size,
2641 &tx_ring->dma,
2642 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002643 if (!tx_ring->desc)
2644 goto err;
2645
Auke Kok9d5c8242008-01-24 02:22:38 -08002646 tx_ring->next_to_use = 0;
2647 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002648
Auke Kok9d5c8242008-01-24 02:22:38 -08002649 return 0;
2650
2651err:
Alexander Duyck06034642011-08-26 07:44:22 +00002652 vfree(tx_ring->tx_buffer_info);
Alexander Duyckf33005a2012-09-13 06:27:55 +00002653 tx_ring->tx_buffer_info = NULL;
2654 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002655 return -ENOMEM;
2656}
2657
2658/**
2659 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2660 * (Descriptors) for all queues
2661 * @adapter: board private structure
2662 *
2663 * Return 0 on success, negative on failure
2664 **/
2665static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2666{
Alexander Duyck439705e2009-10-27 23:49:20 +00002667 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002668 int i, err = 0;
2669
2670 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002671 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002673 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002674 "Allocation for Tx Queue %u failed\n", i);
2675 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002676 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002677 break;
2678 }
2679 }
2680
2681 return err;
2682}
2683
2684/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002685 * igb_setup_tctl - configure the transmit control registers
2686 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002687 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002688void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002689{
Auke Kok9d5c8242008-01-24 02:22:38 -08002690 struct e1000_hw *hw = &adapter->hw;
2691 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002692
Alexander Duyck85b430b2009-10-27 15:50:29 +00002693 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2694 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002695
2696 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002697 tctl = rd32(E1000_TCTL);
2698 tctl &= ~E1000_TCTL_CT;
2699 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2700 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2701
2702 igb_config_collision_dist(hw);
2703
Auke Kok9d5c8242008-01-24 02:22:38 -08002704 /* Enable transmits */
2705 tctl |= E1000_TCTL_EN;
2706
2707 wr32(E1000_TCTL, tctl);
2708}
2709
2710/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002711 * igb_configure_tx_ring - Configure transmit ring after Reset
2712 * @adapter: board private structure
2713 * @ring: tx ring to configure
2714 *
2715 * Configure a transmit ring after a reset.
2716 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002717void igb_configure_tx_ring(struct igb_adapter *adapter,
2718 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002719{
2720 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002721 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002722 u64 tdba = ring->dma;
2723 int reg_idx = ring->reg_idx;
2724
2725 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002726 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002727 wrfl();
2728 mdelay(10);
2729
2730 wr32(E1000_TDLEN(reg_idx),
2731 ring->count * sizeof(union e1000_adv_tx_desc));
2732 wr32(E1000_TDBAL(reg_idx),
2733 tdba & 0x00000000ffffffffULL);
2734 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2735
Alexander Duyckfce99e32009-10-27 15:51:27 +00002736 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002737 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002738 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002739
2740 txdctl |= IGB_TX_PTHRESH;
2741 txdctl |= IGB_TX_HTHRESH << 8;
2742 txdctl |= IGB_TX_WTHRESH << 16;
2743
2744 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2745 wr32(E1000_TXDCTL(reg_idx), txdctl);
2746}
2747
2748/**
2749 * igb_configure_tx - Configure transmit Unit after Reset
2750 * @adapter: board private structure
2751 *
2752 * Configure the Tx unit of the MAC after a reset.
2753 **/
2754static void igb_configure_tx(struct igb_adapter *adapter)
2755{
2756 int i;
2757
2758 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002759 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002760}
2761
2762/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002763 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002764 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2765 *
2766 * Returns 0 on success, negative on failure
2767 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002768int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002769{
Alexander Duyck59d71982010-04-27 13:09:25 +00002770 struct device *dev = rx_ring->dev;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002771 int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08002772
Alexander Duyck06034642011-08-26 07:44:22 +00002773 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002774
2775 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002776 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002777 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002778
Auke Kok9d5c8242008-01-24 02:22:38 -08002779
2780 /* Round up to nearest 4K */
Alexander Duyckf33005a2012-09-13 06:27:55 +00002781 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002782 rx_ring->size = ALIGN(rx_ring->size, 4096);
2783
Alexander Duyck59d71982010-04-27 13:09:25 +00002784 rx_ring->desc = dma_alloc_coherent(dev,
2785 rx_ring->size,
2786 &rx_ring->dma,
2787 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002788 if (!rx_ring->desc)
2789 goto err;
2790
2791 rx_ring->next_to_clean = 0;
2792 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002793
Auke Kok9d5c8242008-01-24 02:22:38 -08002794 return 0;
2795
2796err:
Alexander Duyck06034642011-08-26 07:44:22 +00002797 vfree(rx_ring->rx_buffer_info);
2798 rx_ring->rx_buffer_info = NULL;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002799 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002800 return -ENOMEM;
2801}
2802
2803/**
2804 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2805 * (Descriptors) for all queues
2806 * @adapter: board private structure
2807 *
2808 * Return 0 on success, negative on failure
2809 **/
2810static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2811{
Alexander Duyck439705e2009-10-27 23:49:20 +00002812 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002813 int i, err = 0;
2814
2815 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002816 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002817 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002818 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002819 "Allocation for Rx Queue %u failed\n", i);
2820 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002821 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002822 break;
2823 }
2824 }
2825
2826 return err;
2827}
2828
2829/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002830 * igb_setup_mrqc - configure the multiple receive queue control registers
2831 * @adapter: Board private structure
2832 **/
2833static void igb_setup_mrqc(struct igb_adapter *adapter)
2834{
2835 struct e1000_hw *hw = &adapter->hw;
2836 u32 mrqc, rxcsum;
2837 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2838 union e1000_reta {
2839 u32 dword;
2840 u8 bytes[4];
2841 } reta;
2842 static const u8 rsshash[40] = {
2843 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2844 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2845 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2846 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2847
2848 /* Fill out hash function seeds */
2849 for (j = 0; j < 10; j++) {
2850 u32 rsskey = rsshash[(j * 4)];
2851 rsskey |= rsshash[(j * 4) + 1] << 8;
2852 rsskey |= rsshash[(j * 4) + 2] << 16;
2853 rsskey |= rsshash[(j * 4) + 3] << 24;
2854 array_wr32(E1000_RSSRK(0), j, rsskey);
2855 }
2856
Alexander Duycka99955f2009-11-12 18:37:19 +00002857 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002858
2859 if (adapter->vfs_allocated_count) {
2860 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2861 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002862 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002863 case e1000_82580:
2864 num_rx_queues = 1;
2865 shift = 0;
2866 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002867 case e1000_82576:
2868 shift = 3;
2869 num_rx_queues = 2;
2870 break;
2871 case e1000_82575:
2872 shift = 2;
2873 shift2 = 6;
2874 default:
2875 break;
2876 }
2877 } else {
2878 if (hw->mac.type == e1000_82575)
2879 shift = 6;
2880 }
2881
2882 for (j = 0; j < (32 * 4); j++) {
2883 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2884 if (shift2)
2885 reta.bytes[j & 3] |= num_rx_queues << shift2;
2886 if ((j & 3) == 3)
2887 wr32(E1000_RETA(j >> 2), reta.dword);
2888 }
2889
2890 /*
2891 * Disable raw packet checksumming so that RSS hash is placed in
2892 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2893 * offloads as they are enabled by default
2894 */
2895 rxcsum = rd32(E1000_RXCSUM);
2896 rxcsum |= E1000_RXCSUM_PCSD;
2897
2898 if (adapter->hw.mac.type >= e1000_82576)
2899 /* Enable Receive Checksum Offload for SCTP */
2900 rxcsum |= E1000_RXCSUM_CRCOFL;
2901
2902 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2903 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002904 /*
2905 * Generate RSS hash based on TCP port numbers and/or
2906 * IPv4/v6 src and dst addresses since UDP cannot be
2907 * hashed reliably due to IP fragmentation
2908 */
2909
2910 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2911 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2912 E1000_MRQC_RSS_FIELD_IPV6 |
2913 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2914 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002915
2916 /* If VMDq is enabled then we set the appropriate mode for that, else
2917 * we default to RSS so that an RSS hash is calculated per packet even
2918 * if we are only using one queue */
2919 if (adapter->vfs_allocated_count) {
2920 if (hw->mac.type > e1000_82575) {
2921 /* Set the default pool for the PF's first queue */
2922 u32 vtctl = rd32(E1000_VT_CTL);
2923 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2924 E1000_VT_CTL_DISABLE_DEF_POOL);
2925 vtctl |= adapter->vfs_allocated_count <<
2926 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2927 wr32(E1000_VT_CTL, vtctl);
2928 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002929 if (adapter->rss_queues > 1)
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002930 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002931 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002932 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002933 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002934 if (hw->mac.type != e1000_i211)
2935 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002936 }
2937 igb_vmm_control(adapter);
2938
Alexander Duyck06cf2662009-10-27 15:53:25 +00002939 wr32(E1000_MRQC, mrqc);
2940}
2941
2942/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002943 * igb_setup_rctl - configure the receive control registers
2944 * @adapter: Board private structure
2945 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002946void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002947{
2948 struct e1000_hw *hw = &adapter->hw;
2949 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002950
2951 rctl = rd32(E1000_RCTL);
2952
2953 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002954 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002955
Alexander Duyck69d728b2008-11-25 01:04:03 -08002956 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002957 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002958
Auke Kok87cb7e82008-07-08 15:08:29 -07002959 /*
2960 * enable stripping of CRC. It's unlikely this will break BMC
2961 * redirection as it did with e1000. Newer features require
2962 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002963 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002964 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002965
Alexander Duyck559e9c42009-10-27 23:52:50 +00002966 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002967 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002968
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002969 /* enable LPE to prevent packets larger than max_frame_size */
2970 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002971
Alexander Duyck952f72a2009-10-27 15:51:07 +00002972 /* disable queue 0 to prevent tail write w/o re-config */
2973 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002974
Alexander Duycke1739522009-02-19 20:39:44 -08002975 /* Attention!!! For SR-IOV PF driver operations you must enable
2976 * queue drop for all VF and PF queues to prevent head of line blocking
2977 * if an un-trusted VF does not provide descriptors to hardware.
2978 */
2979 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002980 /* set all queue drop enable bits */
2981 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002982 }
2983
Ben Greear89eaefb2012-03-06 09:41:58 +00002984 /* This is useful for sniffing bad packets. */
2985 if (adapter->netdev->features & NETIF_F_RXALL) {
2986 /* UPE and MPE will be handled by normal PROMISC logic
2987 * in e1000e_set_rx_mode */
2988 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
2989 E1000_RCTL_BAM | /* RX All Bcast Pkts */
2990 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
2991
2992 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
2993 E1000_RCTL_DPF | /* Allow filtered pause */
2994 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
2995 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
2996 * and that breaks VLANs.
2997 */
2998 }
2999
Auke Kok9d5c8242008-01-24 02:22:38 -08003000 wr32(E1000_RCTL, rctl);
3001}
3002
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003003static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3004 int vfn)
3005{
3006 struct e1000_hw *hw = &adapter->hw;
3007 u32 vmolr;
3008
3009 /* if it isn't the PF check to see if VFs are enabled and
3010 * increase the size to support vlan tags */
3011 if (vfn < adapter->vfs_allocated_count &&
3012 adapter->vf_data[vfn].vlans_enabled)
3013 size += VLAN_TAG_SIZE;
3014
3015 vmolr = rd32(E1000_VMOLR(vfn));
3016 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3017 vmolr |= size | E1000_VMOLR_LPE;
3018 wr32(E1000_VMOLR(vfn), vmolr);
3019
3020 return 0;
3021}
3022
Auke Kok9d5c8242008-01-24 02:22:38 -08003023/**
Alexander Duycke1739522009-02-19 20:39:44 -08003024 * igb_rlpml_set - set maximum receive packet size
3025 * @adapter: board private structure
3026 *
3027 * Configure maximum receivable packet size.
3028 **/
3029static void igb_rlpml_set(struct igb_adapter *adapter)
3030{
Alexander Duyck153285f2011-08-26 07:43:32 +00003031 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003032 struct e1000_hw *hw = &adapter->hw;
3033 u16 pf_id = adapter->vfs_allocated_count;
3034
Alexander Duycke1739522009-02-19 20:39:44 -08003035 if (pf_id) {
3036 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003037 /*
3038 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3039 * to our max jumbo frame size, in case we need to enable
3040 * jumbo frames on one of the rings later.
3041 * This will not pass over-length frames into the default
3042 * queue because it's gated by the VMOLR.RLPML.
3043 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003044 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003045 }
3046
3047 wr32(E1000_RLPML, max_frame_size);
3048}
3049
Williams, Mitch A8151d292010-02-10 01:44:24 +00003050static inline void igb_set_vmolr(struct igb_adapter *adapter,
3051 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003052{
3053 struct e1000_hw *hw = &adapter->hw;
3054 u32 vmolr;
3055
3056 /*
3057 * This register exists only on 82576 and newer so if we are older then
3058 * we should exit and do nothing
3059 */
3060 if (hw->mac.type < e1000_82576)
3061 return;
3062
3063 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003064 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3065 if (aupe)
3066 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3067 else
3068 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003069
3070 /* clear all bits that might not be set */
3071 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3072
Alexander Duycka99955f2009-11-12 18:37:19 +00003073 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003074 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3075 /*
3076 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3077 * multicast packets
3078 */
3079 if (vfn <= adapter->vfs_allocated_count)
3080 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3081
3082 wr32(E1000_VMOLR(vfn), vmolr);
3083}
3084
Alexander Duycke1739522009-02-19 20:39:44 -08003085/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003086 * igb_configure_rx_ring - Configure a receive ring after Reset
3087 * @adapter: board private structure
3088 * @ring: receive ring to be configured
3089 *
3090 * Configure the Rx unit of the MAC after a reset.
3091 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003092void igb_configure_rx_ring(struct igb_adapter *adapter,
3093 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003094{
3095 struct e1000_hw *hw = &adapter->hw;
3096 u64 rdba = ring->dma;
3097 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003098 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003099
3100 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003101 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003102
3103 /* Set DMA base address registers */
3104 wr32(E1000_RDBAL(reg_idx),
3105 rdba & 0x00000000ffffffffULL);
3106 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3107 wr32(E1000_RDLEN(reg_idx),
3108 ring->count * sizeof(union e1000_adv_rx_desc));
3109
3110 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003111 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003112 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003113 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003114
Alexander Duyck952f72a2009-10-27 15:51:07 +00003115 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003116 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003117#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003118 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003119#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003120 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003121#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003122 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00003123#ifdef CONFIG_IGB_PTP
Alexander Duyck06218a82011-08-26 07:46:55 +00003124 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003125 srrctl |= E1000_SRRCTL_TIMESTAMP;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00003126#endif /* CONFIG_IGB_PTP */
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003127 /* Only set Drop Enable if we are supporting multiple queues */
3128 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3129 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003130
3131 wr32(E1000_SRRCTL(reg_idx), srrctl);
3132
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003133 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003134 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003135
Alexander Duyck85b430b2009-10-27 15:50:29 +00003136 rxdctl |= IGB_RX_PTHRESH;
3137 rxdctl |= IGB_RX_HTHRESH << 8;
3138 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003139
3140 /* enable receive descriptor fetching */
3141 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003142 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3143}
3144
3145/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003146 * igb_configure_rx - Configure receive Unit after Reset
3147 * @adapter: board private structure
3148 *
3149 * Configure the Rx unit of the MAC after a reset.
3150 **/
3151static void igb_configure_rx(struct igb_adapter *adapter)
3152{
Hannes Eder91075842009-02-18 19:36:04 -08003153 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003154
Alexander Duyck68d480c2009-10-05 06:33:08 +00003155 /* set UTA to appropriate mode */
3156 igb_set_uta(adapter);
3157
Alexander Duyck26ad9172009-10-05 06:32:49 +00003158 /* set the correct pool for the PF default MAC address in entry 0 */
3159 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3160 adapter->vfs_allocated_count);
3161
Alexander Duyck06cf2662009-10-27 15:53:25 +00003162 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3163 * the Base and Length of the Rx Descriptor Ring */
3164 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003165 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003166}
3167
3168/**
3169 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003170 * @tx_ring: Tx descriptor ring for a specific queue
3171 *
3172 * Free all transmit software resources
3173 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003174void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003175{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003176 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003177
Alexander Duyck06034642011-08-26 07:44:22 +00003178 vfree(tx_ring->tx_buffer_info);
3179 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003180
Alexander Duyck439705e2009-10-27 23:49:20 +00003181 /* if not set, then don't free */
3182 if (!tx_ring->desc)
3183 return;
3184
Alexander Duyck59d71982010-04-27 13:09:25 +00003185 dma_free_coherent(tx_ring->dev, tx_ring->size,
3186 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003187
3188 tx_ring->desc = NULL;
3189}
3190
3191/**
3192 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3193 * @adapter: board private structure
3194 *
3195 * Free all transmit software resources
3196 **/
3197static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3198{
3199 int i;
3200
3201 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003202 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003203}
3204
Alexander Duyckebe42d12011-08-26 07:45:09 +00003205void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3206 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003207{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003208 if (tx_buffer->skb) {
3209 dev_kfree_skb_any(tx_buffer->skb);
3210 if (tx_buffer->dma)
3211 dma_unmap_single(ring->dev,
3212 tx_buffer->dma,
3213 tx_buffer->length,
3214 DMA_TO_DEVICE);
3215 } else if (tx_buffer->dma) {
3216 dma_unmap_page(ring->dev,
3217 tx_buffer->dma,
3218 tx_buffer->length,
3219 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003220 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003221 tx_buffer->next_to_watch = NULL;
3222 tx_buffer->skb = NULL;
3223 tx_buffer->dma = 0;
3224 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003225}
3226
3227/**
3228 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003229 * @tx_ring: ring to be cleaned
3230 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003231static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003232{
Alexander Duyck06034642011-08-26 07:44:22 +00003233 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003234 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003235 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003236
Alexander Duyck06034642011-08-26 07:44:22 +00003237 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003238 return;
3239 /* Free all the Tx ring sk_buffs */
3240
3241 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003242 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003243 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003244 }
3245
John Fastabenddad8a3b2012-04-23 12:22:39 +00003246 netdev_tx_reset_queue(txring_txq(tx_ring));
3247
Alexander Duyck06034642011-08-26 07:44:22 +00003248 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3249 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003250
3251 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003252 memset(tx_ring->desc, 0, tx_ring->size);
3253
3254 tx_ring->next_to_use = 0;
3255 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003256}
3257
3258/**
3259 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3260 * @adapter: board private structure
3261 **/
3262static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3263{
3264 int i;
3265
3266 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003267 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003268}
3269
3270/**
3271 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003272 * @rx_ring: ring to clean the resources from
3273 *
3274 * Free all receive software resources
3275 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003276void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003277{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003278 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003279
Alexander Duyck06034642011-08-26 07:44:22 +00003280 vfree(rx_ring->rx_buffer_info);
3281 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003282
Alexander Duyck439705e2009-10-27 23:49:20 +00003283 /* if not set, then don't free */
3284 if (!rx_ring->desc)
3285 return;
3286
Alexander Duyck59d71982010-04-27 13:09:25 +00003287 dma_free_coherent(rx_ring->dev, rx_ring->size,
3288 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003289
3290 rx_ring->desc = NULL;
3291}
3292
3293/**
3294 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3295 * @adapter: board private structure
3296 *
3297 * Free all receive software resources
3298 **/
3299static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3300{
3301 int i;
3302
3303 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003304 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003305}
3306
3307/**
3308 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003309 * @rx_ring: ring to free buffers from
3310 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003311static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003312{
Auke Kok9d5c8242008-01-24 02:22:38 -08003313 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003314 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003315
Alexander Duyck06034642011-08-26 07:44:22 +00003316 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003318
Auke Kok9d5c8242008-01-24 02:22:38 -08003319 /* Free all the Rx ring sk_buffs */
3320 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003321 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003322 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003323 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003324 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003325 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003326 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003327 buffer_info->dma = 0;
3328 }
3329
3330 if (buffer_info->skb) {
3331 dev_kfree_skb(buffer_info->skb);
3332 buffer_info->skb = NULL;
3333 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003334 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003335 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003336 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003337 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003338 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003339 buffer_info->page_dma = 0;
3340 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003341 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003342 put_page(buffer_info->page);
3343 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003344 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003345 }
3346 }
3347
Alexander Duyck06034642011-08-26 07:44:22 +00003348 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3349 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003350
3351 /* Zero out the descriptor ring */
3352 memset(rx_ring->desc, 0, rx_ring->size);
3353
3354 rx_ring->next_to_clean = 0;
3355 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003356}
3357
3358/**
3359 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3360 * @adapter: board private structure
3361 **/
3362static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3363{
3364 int i;
3365
3366 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003367 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003368}
3369
3370/**
3371 * igb_set_mac - Change the Ethernet Address of the NIC
3372 * @netdev: network interface device structure
3373 * @p: pointer to an address structure
3374 *
3375 * Returns 0 on success, negative on failure
3376 **/
3377static int igb_set_mac(struct net_device *netdev, void *p)
3378{
3379 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003380 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003381 struct sockaddr *addr = p;
3382
3383 if (!is_valid_ether_addr(addr->sa_data))
3384 return -EADDRNOTAVAIL;
3385
3386 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003387 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003388
Alexander Duyck26ad9172009-10-05 06:32:49 +00003389 /* set the correct pool for the new PF MAC address in entry 0 */
3390 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3391 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003392
Auke Kok9d5c8242008-01-24 02:22:38 -08003393 return 0;
3394}
3395
3396/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003397 * igb_write_mc_addr_list - write multicast addresses to MTA
3398 * @netdev: network interface device structure
3399 *
3400 * Writes multicast address list to the MTA hash table.
3401 * Returns: -ENOMEM on failure
3402 * 0 on no addresses written
3403 * X on writing X addresses to MTA
3404 **/
3405static int igb_write_mc_addr_list(struct net_device *netdev)
3406{
3407 struct igb_adapter *adapter = netdev_priv(netdev);
3408 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003409 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003410 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003411 int i;
3412
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003413 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003414 /* nothing to program, so clear mc list */
3415 igb_update_mc_addr_list(hw, NULL, 0);
3416 igb_restore_vf_multicasts(adapter);
3417 return 0;
3418 }
3419
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003420 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003421 if (!mta_list)
3422 return -ENOMEM;
3423
Alexander Duyck68d480c2009-10-05 06:33:08 +00003424 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003425 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003426 netdev_for_each_mc_addr(ha, netdev)
3427 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428
Alexander Duyck68d480c2009-10-05 06:33:08 +00003429 igb_update_mc_addr_list(hw, mta_list, i);
3430 kfree(mta_list);
3431
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003432 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003433}
3434
3435/**
3436 * igb_write_uc_addr_list - write unicast addresses to RAR table
3437 * @netdev: network interface device structure
3438 *
3439 * Writes unicast address list to the RAR table.
3440 * Returns: -ENOMEM on failure/insufficient address space
3441 * 0 on no addresses written
3442 * X on writing X addresses to the RAR table
3443 **/
3444static int igb_write_uc_addr_list(struct net_device *netdev)
3445{
3446 struct igb_adapter *adapter = netdev_priv(netdev);
3447 struct e1000_hw *hw = &adapter->hw;
3448 unsigned int vfn = adapter->vfs_allocated_count;
3449 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3450 int count = 0;
3451
3452 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003453 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003454 return -ENOMEM;
3455
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003456 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003457 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003458
3459 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003460 if (!rar_entries)
3461 break;
3462 igb_rar_set_qsel(adapter, ha->addr,
3463 rar_entries--,
3464 vfn);
3465 count++;
3466 }
3467 }
3468 /* write the addresses in reverse order to avoid write combining */
3469 for (; rar_entries > 0 ; rar_entries--) {
3470 wr32(E1000_RAH(rar_entries), 0);
3471 wr32(E1000_RAL(rar_entries), 0);
3472 }
3473 wrfl();
3474
3475 return count;
3476}
3477
3478/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003479 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003480 * @netdev: network interface device structure
3481 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003482 * The set_rx_mode entry point is called whenever the unicast or multicast
3483 * address lists or the network interface flags are updated. This routine is
3484 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003485 * promiscuous mode, and all-multi behavior.
3486 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003487static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003488{
3489 struct igb_adapter *adapter = netdev_priv(netdev);
3490 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003491 unsigned int vfn = adapter->vfs_allocated_count;
3492 u32 rctl, vmolr = 0;
3493 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003494
3495 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003496 rctl = rd32(E1000_RCTL);
3497
Alexander Duyck68d480c2009-10-05 06:33:08 +00003498 /* clear the effected bits */
3499 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3500
Patrick McHardy746b9f02008-07-16 20:15:45 -07003501 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003502 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003503 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003504 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003505 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003506 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003507 vmolr |= E1000_VMOLR_MPME;
3508 } else {
3509 /*
3510 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003511 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003512 * that we can at least receive multicast traffic
3513 */
3514 count = igb_write_mc_addr_list(netdev);
3515 if (count < 0) {
3516 rctl |= E1000_RCTL_MPE;
3517 vmolr |= E1000_VMOLR_MPME;
3518 } else if (count) {
3519 vmolr |= E1000_VMOLR_ROMPE;
3520 }
3521 }
3522 /*
3523 * Write addresses to available RAR registers, if there is not
3524 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003525 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003526 */
3527 count = igb_write_uc_addr_list(netdev);
3528 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003529 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003530 vmolr |= E1000_VMOLR_ROPE;
3531 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003532 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003533 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003534 wr32(E1000_RCTL, rctl);
3535
Alexander Duyck68d480c2009-10-05 06:33:08 +00003536 /*
3537 * In order to support SR-IOV and eventually VMDq it is necessary to set
3538 * the VMOLR to enable the appropriate modes. Without this workaround
3539 * we will have issues with VLAN tag stripping not being done for frames
3540 * that are only arriving because we are the default pool
3541 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003542 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003543 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003544
Alexander Duyck68d480c2009-10-05 06:33:08 +00003545 vmolr |= rd32(E1000_VMOLR(vfn)) &
3546 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3547 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003548 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003549}
3550
Greg Rose13800462010-11-06 02:08:26 +00003551static void igb_check_wvbr(struct igb_adapter *adapter)
3552{
3553 struct e1000_hw *hw = &adapter->hw;
3554 u32 wvbr = 0;
3555
3556 switch (hw->mac.type) {
3557 case e1000_82576:
3558 case e1000_i350:
3559 if (!(wvbr = rd32(E1000_WVBR)))
3560 return;
3561 break;
3562 default:
3563 break;
3564 }
3565
3566 adapter->wvbr |= wvbr;
3567}
3568
3569#define IGB_STAGGERED_QUEUE_OFFSET 8
3570
3571static void igb_spoof_check(struct igb_adapter *adapter)
3572{
3573 int j;
3574
3575 if (!adapter->wvbr)
3576 return;
3577
3578 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3579 if (adapter->wvbr & (1 << j) ||
3580 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3581 dev_warn(&adapter->pdev->dev,
3582 "Spoof event(s) detected on VF %d\n", j);
3583 adapter->wvbr &=
3584 ~((1 << j) |
3585 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3586 }
3587 }
3588}
3589
Auke Kok9d5c8242008-01-24 02:22:38 -08003590/* Need to wait a few seconds after link up to get diagnostic information from
3591 * the phy */
3592static void igb_update_phy_info(unsigned long data)
3593{
3594 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003595 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003596}
3597
3598/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003599 * igb_has_link - check shared code for link and determine up/down
3600 * @adapter: pointer to driver private info
3601 **/
Nick Nunley31455352010-02-17 01:01:21 +00003602bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003603{
3604 struct e1000_hw *hw = &adapter->hw;
3605 bool link_active = false;
3606 s32 ret_val = 0;
3607
3608 /* get_link_status is set on LSC (link status) interrupt or
3609 * rx sequence error interrupt. get_link_status will stay
3610 * false until the e1000_check_for_link establishes link
3611 * for copper adapters ONLY
3612 */
3613 switch (hw->phy.media_type) {
3614 case e1000_media_type_copper:
3615 if (hw->mac.get_link_status) {
3616 ret_val = hw->mac.ops.check_for_link(hw);
3617 link_active = !hw->mac.get_link_status;
3618 } else {
3619 link_active = true;
3620 }
3621 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003622 case e1000_media_type_internal_serdes:
3623 ret_val = hw->mac.ops.check_for_link(hw);
3624 link_active = hw->mac.serdes_has_link;
3625 break;
3626 default:
3627 case e1000_media_type_unknown:
3628 break;
3629 }
3630
3631 return link_active;
3632}
3633
Stefan Assmann563988d2011-04-05 04:27:15 +00003634static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3635{
3636 bool ret = false;
3637 u32 ctrl_ext, thstat;
3638
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003639 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00003640 if (hw->mac.type == e1000_i350) {
3641 thstat = rd32(E1000_THSTAT);
3642 ctrl_ext = rd32(E1000_CTRL_EXT);
3643
3644 if ((hw->phy.media_type == e1000_media_type_copper) &&
3645 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3646 ret = !!(thstat & event);
3647 }
3648 }
3649
3650 return ret;
3651}
3652
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003653/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003654 * igb_watchdog - Timer Call-back
3655 * @data: pointer to adapter cast into an unsigned long
3656 **/
3657static void igb_watchdog(unsigned long data)
3658{
3659 struct igb_adapter *adapter = (struct igb_adapter *)data;
3660 /* Do the rest outside of interrupt context */
3661 schedule_work(&adapter->watchdog_task);
3662}
3663
3664static void igb_watchdog_task(struct work_struct *work)
3665{
3666 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003667 struct igb_adapter,
3668 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003669 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003670 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003671 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003672 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003673
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003674 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003675 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003676 /* Cancel scheduled suspend requests. */
3677 pm_runtime_resume(netdev->dev.parent);
3678
Auke Kok9d5c8242008-01-24 02:22:38 -08003679 if (!netif_carrier_ok(netdev)) {
3680 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003681 hw->mac.ops.get_speed_and_duplex(hw,
3682 &adapter->link_speed,
3683 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003684
3685 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003686 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003687 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3688 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003689 netdev->name,
3690 adapter->link_speed,
3691 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003692 "Full" : "Half",
3693 (ctrl & E1000_CTRL_TFCE) &&
3694 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3695 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3696 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003697
Stefan Assmann563988d2011-04-05 04:27:15 +00003698 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003699 if (igb_thermal_sensor_event(hw,
3700 E1000_THSTAT_LINK_THROTTLE)) {
3701 netdev_info(netdev, "The network adapter link "
3702 "speed was downshifted because it "
3703 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003704 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003705
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003706 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003707 adapter->tx_timeout_factor = 1;
3708 switch (adapter->link_speed) {
3709 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003710 adapter->tx_timeout_factor = 14;
3711 break;
3712 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003713 /* maybe add some timeout factor ? */
3714 break;
3715 }
3716
3717 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003718
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003719 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003720 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003721
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003722 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003723 if (!test_bit(__IGB_DOWN, &adapter->state))
3724 mod_timer(&adapter->phy_info_timer,
3725 round_jiffies(jiffies + 2 * HZ));
3726 }
3727 } else {
3728 if (netif_carrier_ok(netdev)) {
3729 adapter->link_speed = 0;
3730 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003731
3732 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003733 if (igb_thermal_sensor_event(hw,
3734 E1000_THSTAT_PWR_DOWN)) {
3735 netdev_err(netdev, "The network adapter was "
3736 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003737 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003738
Alexander Duyck527d47c2008-11-27 00:21:39 -08003739 /* Links status message must follow this format */
3740 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3741 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003742 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003743
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003744 igb_ping_all_vfs(adapter);
3745
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003746 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003747 if (!test_bit(__IGB_DOWN, &adapter->state))
3748 mod_timer(&adapter->phy_info_timer,
3749 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003750
3751 pm_schedule_suspend(netdev->dev.parent,
3752 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003753 }
3754 }
3755
Eric Dumazet12dcd862010-10-15 17:27:10 +00003756 spin_lock(&adapter->stats64_lock);
3757 igb_update_stats(adapter, &adapter->stats64);
3758 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003759
Alexander Duyckdbabb062009-11-12 18:38:16 +00003760 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003761 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003762 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003763 /* We've lost link, so the controller stops DMA,
3764 * but we've got queued Tx work that's never going
3765 * to get done, so reset controller to flush Tx.
3766 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003767 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3768 adapter->tx_timeout_count++;
3769 schedule_work(&adapter->reset_task);
3770 /* return immediately since reset is imminent */
3771 return;
3772 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003774
Alexander Duyckdbabb062009-11-12 18:38:16 +00003775 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003776 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003777 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003778
Auke Kok9d5c8242008-01-24 02:22:38 -08003779 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003780 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003781 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003782 for (i = 0; i < adapter->num_q_vectors; i++)
3783 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003784 wr32(E1000_EICS, eics);
3785 } else {
3786 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3787 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003788
Greg Rose13800462010-11-06 02:08:26 +00003789 igb_spoof_check(adapter);
3790
Auke Kok9d5c8242008-01-24 02:22:38 -08003791 /* Reset the timer */
3792 if (!test_bit(__IGB_DOWN, &adapter->state))
3793 mod_timer(&adapter->watchdog_timer,
3794 round_jiffies(jiffies + 2 * HZ));
3795}
3796
3797enum latency_range {
3798 lowest_latency = 0,
3799 low_latency = 1,
3800 bulk_latency = 2,
3801 latency_invalid = 255
3802};
3803
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003804/**
3805 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3806 *
3807 * Stores a new ITR value based on strictly on packet size. This
3808 * algorithm is less sophisticated than that used in igb_update_itr,
3809 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003810 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003811 * were determined based on theoretical maximum wire speed and testing
3812 * data, in order to minimize response time while increasing bulk
3813 * throughput.
3814 * This functionality is controlled by the InterruptThrottleRate module
3815 * parameter (see igb_param.c)
3816 * NOTE: This function is called only when operating in a multiqueue
3817 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003818 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003819 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003820static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003821{
Alexander Duyck047e0032009-10-27 15:49:27 +00003822 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003823 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003824 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003825 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003826
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003827 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3828 * ints/sec - ITR timer value of 120 ticks.
3829 */
3830 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003831 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003832 goto set_itr_val;
3833 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003834
Alexander Duyck0ba82992011-08-26 07:45:47 +00003835 packets = q_vector->rx.total_packets;
3836 if (packets)
3837 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003838
Alexander Duyck0ba82992011-08-26 07:45:47 +00003839 packets = q_vector->tx.total_packets;
3840 if (packets)
3841 avg_wire_size = max_t(u32, avg_wire_size,
3842 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003843
3844 /* if avg_wire_size isn't set no work was done */
3845 if (!avg_wire_size)
3846 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003847
3848 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3849 avg_wire_size += 24;
3850
3851 /* Don't starve jumbo frames */
3852 avg_wire_size = min(avg_wire_size, 3000);
3853
3854 /* Give a little boost to mid-size frames */
3855 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3856 new_val = avg_wire_size / 3;
3857 else
3858 new_val = avg_wire_size / 2;
3859
Alexander Duyck0ba82992011-08-26 07:45:47 +00003860 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3861 if (new_val < IGB_20K_ITR &&
3862 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3863 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3864 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003865
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003866set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003867 if (new_val != q_vector->itr_val) {
3868 q_vector->itr_val = new_val;
3869 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003870 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003871clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003872 q_vector->rx.total_bytes = 0;
3873 q_vector->rx.total_packets = 0;
3874 q_vector->tx.total_bytes = 0;
3875 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003876}
3877
3878/**
3879 * igb_update_itr - update the dynamic ITR value based on statistics
3880 * Stores a new ITR value based on packets and byte
3881 * counts during the last interrupt. The advantage of per interrupt
3882 * computation is faster updates and more accurate ITR for the current
3883 * traffic pattern. Constants in this function were computed
3884 * based on theoretical maximum wire speed and thresholds were set based
3885 * on testing data as well as attempting to minimize response time
3886 * while increasing bulk throughput.
3887 * this functionality is controlled by the InterruptThrottleRate module
3888 * parameter (see igb_param.c)
3889 * NOTE: These calculations are only valid when operating in a single-
3890 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003891 * @q_vector: pointer to q_vector
3892 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003893 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003894static void igb_update_itr(struct igb_q_vector *q_vector,
3895 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003896{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003897 unsigned int packets = ring_container->total_packets;
3898 unsigned int bytes = ring_container->total_bytes;
3899 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003900
Alexander Duyck0ba82992011-08-26 07:45:47 +00003901 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003902 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003904
Alexander Duyck0ba82992011-08-26 07:45:47 +00003905 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003906 case lowest_latency:
3907 /* handle TSO and jumbo frames */
3908 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 break;
3913 case low_latency: /* 50 usec aka 20000 ints/s */
3914 if (bytes > 10000) {
3915 /* this if handles the TSO accounting */
3916 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003918 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003921 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 }
3923 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003924 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003925 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003926 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003927 }
3928 break;
3929 case bulk_latency: /* 250 usec aka 4000 ints/s */
3930 if (bytes > 25000) {
3931 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003932 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003933 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003934 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003935 }
3936 break;
3937 }
3938
Alexander Duyck0ba82992011-08-26 07:45:47 +00003939 /* clear work counters since we have the values we need */
3940 ring_container->total_bytes = 0;
3941 ring_container->total_packets = 0;
3942
3943 /* write updated itr to ring container */
3944 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003945}
3946
Alexander Duyck0ba82992011-08-26 07:45:47 +00003947static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003948{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003949 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003950 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003951 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003952
3953 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3954 if (adapter->link_speed != SPEED_1000) {
3955 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003956 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003957 goto set_itr_now;
3958 }
3959
Alexander Duyck0ba82992011-08-26 07:45:47 +00003960 igb_update_itr(q_vector, &q_vector->tx);
3961 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003962
Alexander Duyck0ba82992011-08-26 07:45:47 +00003963 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003964
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003965 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003966 if (current_itr == lowest_latency &&
3967 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3968 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003969 current_itr = low_latency;
3970
Auke Kok9d5c8242008-01-24 02:22:38 -08003971 switch (current_itr) {
3972 /* counts and packets in update_itr are dependent on these numbers */
3973 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003974 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003975 break;
3976 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003977 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003978 break;
3979 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003980 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003981 break;
3982 default:
3983 break;
3984 }
3985
3986set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003987 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003988 /* this attempts to bias the interrupt rate towards Bulk
3989 * by adding intermediate steps when interrupt rate is
3990 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003991 new_itr = new_itr > q_vector->itr_val ?
3992 max((new_itr * q_vector->itr_val) /
3993 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003994 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003995 new_itr;
3996 /* Don't write the value here; it resets the adapter's
3997 * internal timer, and causes us to delay far longer than
3998 * we should between interrupts. Instead, we write the ITR
3999 * value at the beginning of the next interrupt so the timing
4000 * ends up being correct.
4001 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004002 q_vector->itr_val = new_itr;
4003 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004004 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004005}
4006
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00004007static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4008 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004009{
4010 struct e1000_adv_tx_context_desc *context_desc;
4011 u16 i = tx_ring->next_to_use;
4012
4013 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4014
4015 i++;
4016 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4017
4018 /* set bits to identify this as an advanced context descriptor */
4019 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4020
4021 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004022 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004023 mss_l4len_idx |= tx_ring->reg_idx << 4;
4024
4025 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4026 context_desc->seqnum_seed = 0;
4027 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4028 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4029}
4030
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004031static int igb_tso(struct igb_ring *tx_ring,
4032 struct igb_tx_buffer *first,
4033 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004034{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004035 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004036 u32 vlan_macip_lens, type_tucmd;
4037 u32 mss_l4len_idx, l4len;
4038
4039 if (!skb_is_gso(skb))
4040 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004041
4042 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004043 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004044 if (err)
4045 return err;
4046 }
4047
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004048 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4049 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004050
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004051 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004052 struct iphdr *iph = ip_hdr(skb);
4053 iph->tot_len = 0;
4054 iph->check = 0;
4055 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4056 iph->daddr, 0,
4057 IPPROTO_TCP,
4058 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004059 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004060 first->tx_flags |= IGB_TX_FLAGS_TSO |
4061 IGB_TX_FLAGS_CSUM |
4062 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004063 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004064 ipv6_hdr(skb)->payload_len = 0;
4065 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4066 &ipv6_hdr(skb)->daddr,
4067 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004068 first->tx_flags |= IGB_TX_FLAGS_TSO |
4069 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004070 }
4071
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004072 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004073 l4len = tcp_hdrlen(skb);
4074 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004075
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004076 /* update gso size and bytecount with header size */
4077 first->gso_segs = skb_shinfo(skb)->gso_segs;
4078 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4079
Auke Kok9d5c8242008-01-24 02:22:38 -08004080 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004081 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4082 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004083
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004084 /* VLAN MACLEN IPLEN */
4085 vlan_macip_lens = skb_network_header_len(skb);
4086 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004087 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004088
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004089 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004090
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004091 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004092}
4093
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004094static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004095{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004096 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004097 u32 vlan_macip_lens = 0;
4098 u32 mss_l4len_idx = 0;
4099 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004100
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004101 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004102 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4103 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004104 } else {
4105 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004106 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004107 case __constant_htons(ETH_P_IP):
4108 vlan_macip_lens |= skb_network_header_len(skb);
4109 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4110 l4_hdr = ip_hdr(skb)->protocol;
4111 break;
4112 case __constant_htons(ETH_P_IPV6):
4113 vlan_macip_lens |= skb_network_header_len(skb);
4114 l4_hdr = ipv6_hdr(skb)->nexthdr;
4115 break;
4116 default:
4117 if (unlikely(net_ratelimit())) {
4118 dev_warn(tx_ring->dev,
4119 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004120 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004121 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004122 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004123 }
4124
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004125 switch (l4_hdr) {
4126 case IPPROTO_TCP:
4127 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4128 mss_l4len_idx = tcp_hdrlen(skb) <<
4129 E1000_ADVTXD_L4LEN_SHIFT;
4130 break;
4131 case IPPROTO_SCTP:
4132 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4133 mss_l4len_idx = sizeof(struct sctphdr) <<
4134 E1000_ADVTXD_L4LEN_SHIFT;
4135 break;
4136 case IPPROTO_UDP:
4137 mss_l4len_idx = sizeof(struct udphdr) <<
4138 E1000_ADVTXD_L4LEN_SHIFT;
4139 break;
4140 default:
4141 if (unlikely(net_ratelimit())) {
4142 dev_warn(tx_ring->dev,
4143 "partial checksum but l4 proto=%x!\n",
4144 l4_hdr);
4145 }
4146 break;
4147 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004148
4149 /* update TX checksum flag */
4150 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004151 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004152
4153 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004154 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004155
4156 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004157}
4158
Alexander Duycke032afc2011-08-26 07:44:48 +00004159static __le32 igb_tx_cmd_type(u32 tx_flags)
4160{
4161 /* set type for advanced descriptor with frame checksum insertion */
4162 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4163 E1000_ADVTXD_DCMD_IFCS |
4164 E1000_ADVTXD_DCMD_DEXT);
4165
4166 /* set HW vlan bit if vlan is present */
4167 if (tx_flags & IGB_TX_FLAGS_VLAN)
4168 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4169
Matthew Vick3c89f6d2012-08-10 05:40:43 +00004170#ifdef CONFIG_IGB_PTP
Alexander Duycke032afc2011-08-26 07:44:48 +00004171 /* set timestamp bit if present */
Matthew Vick1f6e8172012-08-18 07:26:33 +00004172 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
Alexander Duycke032afc2011-08-26 07:44:48 +00004173 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00004174#endif /* CONFIG_IGB_PTP */
Alexander Duycke032afc2011-08-26 07:44:48 +00004175
4176 /* set segmentation bits for TSO */
4177 if (tx_flags & IGB_TX_FLAGS_TSO)
4178 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4179
4180 return cmd_type;
4181}
4182
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004183static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4184 union e1000_adv_tx_desc *tx_desc,
4185 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004186{
4187 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4188
4189 /* 82575 requires a unique index per ring if any offload is enabled */
4190 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004191 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004192 olinfo_status |= tx_ring->reg_idx << 4;
4193
4194 /* insert L4 checksum */
4195 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4196 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4197
4198 /* insert IPv4 checksum */
4199 if (tx_flags & IGB_TX_FLAGS_IPV4)
4200 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4201 }
4202
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004203 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004204}
4205
Alexander Duyckebe42d12011-08-26 07:45:09 +00004206/*
4207 * The largest size we can write to the descriptor is 65535. In order to
4208 * maintain a power of two alignment we have to limit ourselves to 32K.
4209 */
4210#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004211#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004212
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004213static void igb_tx_map(struct igb_ring *tx_ring,
4214 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004215 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004216{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004217 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004218 struct igb_tx_buffer *tx_buffer_info;
4219 union e1000_adv_tx_desc *tx_desc;
4220 dma_addr_t dma;
4221 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4222 unsigned int data_len = skb->data_len;
4223 unsigned int size = skb_headlen(skb);
4224 unsigned int paylen = skb->len - hdr_len;
4225 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004226 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004227 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004228
4229 tx_desc = IGB_TX_DESC(tx_ring, i);
4230
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004231 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004232 cmd_type = igb_tx_cmd_type(tx_flags);
4233
4234 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4235 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004236 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004237
Alexander Duyckebe42d12011-08-26 07:45:09 +00004238 /* record length, and DMA address */
4239 first->length = size;
4240 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004241 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004242
Alexander Duyckebe42d12011-08-26 07:45:09 +00004243 for (;;) {
4244 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4245 tx_desc->read.cmd_type_len =
4246 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004247
Alexander Duyckebe42d12011-08-26 07:45:09 +00004248 i++;
4249 tx_desc++;
4250 if (i == tx_ring->count) {
4251 tx_desc = IGB_TX_DESC(tx_ring, 0);
4252 i = 0;
4253 }
4254
4255 dma += IGB_MAX_DATA_PER_TXD;
4256 size -= IGB_MAX_DATA_PER_TXD;
4257
4258 tx_desc->read.olinfo_status = 0;
4259 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4260 }
4261
4262 if (likely(!data_len))
4263 break;
4264
4265 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4266
Alexander Duyck65689fe2009-03-20 00:17:43 +00004267 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004268 tx_desc++;
4269 if (i == tx_ring->count) {
4270 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004271 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004272 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004273
Eric Dumazet9e903e02011-10-18 21:00:24 +00004274 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004275 data_len -= size;
4276
4277 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4278 size, DMA_TO_DEVICE);
4279 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004280 goto dma_error;
4281
Alexander Duyckebe42d12011-08-26 07:45:09 +00004282 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4283 tx_buffer_info->length = size;
4284 tx_buffer_info->dma = dma;
4285
4286 tx_desc->read.olinfo_status = 0;
4287 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4288
4289 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 }
4291
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004292 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4293
Alexander Duyckebe42d12011-08-26 07:45:09 +00004294 /* write last descriptor with RS and EOP bits */
4295 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
Ben Greear6b8f0922012-03-06 09:41:53 +00004296 if (unlikely(skb->no_fcs))
4297 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
Alexander Duyckebe42d12011-08-26 07:45:09 +00004298 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004299
4300 /* set the timestamp */
4301 first->time_stamp = jiffies;
4302
Alexander Duyckebe42d12011-08-26 07:45:09 +00004303 /*
4304 * Force memory writes to complete before letting h/w know there
4305 * are new descriptors to fetch. (Only applicable for weak-ordered
4306 * memory model archs, such as IA-64).
4307 *
4308 * We also need this memory barrier to make certain all of the
4309 * status bits have been updated before next_to_watch is written.
4310 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004311 wmb();
4312
Alexander Duyckebe42d12011-08-26 07:45:09 +00004313 /* set next_to_watch value indicating a packet is present */
4314 first->next_to_watch = tx_desc;
4315
4316 i++;
4317 if (i == tx_ring->count)
4318 i = 0;
4319
Auke Kok9d5c8242008-01-24 02:22:38 -08004320 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004321
Alexander Duyckfce99e32009-10-27 15:51:27 +00004322 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004323
Auke Kok9d5c8242008-01-24 02:22:38 -08004324 /* we need this if more than one processor can write to our tail
4325 * at a time, it syncronizes IO on IA64/Altix systems */
4326 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004327
4328 return;
4329
4330dma_error:
4331 dev_err(tx_ring->dev, "TX DMA map failed\n");
4332
4333 /* clear dma mappings for failed tx_buffer_info map */
4334 for (;;) {
4335 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4336 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4337 if (tx_buffer_info == first)
4338 break;
4339 if (i == 0)
4340 i = tx_ring->count;
4341 i--;
4342 }
4343
4344 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004345}
4346
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004347static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004348{
Alexander Duycke694e962009-10-27 15:53:06 +00004349 struct net_device *netdev = tx_ring->netdev;
4350
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004351 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004352
Auke Kok9d5c8242008-01-24 02:22:38 -08004353 /* Herbert's original patch had:
4354 * smp_mb__after_netif_stop_queue();
4355 * but since that doesn't exist yet, just open code it. */
4356 smp_mb();
4357
4358 /* We need to check again in a case another CPU has just
4359 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004360 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004361 return -EBUSY;
4362
4363 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004364 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004365
4366 u64_stats_update_begin(&tx_ring->tx_syncp2);
4367 tx_ring->tx_stats.restart_queue2++;
4368 u64_stats_update_end(&tx_ring->tx_syncp2);
4369
Auke Kok9d5c8242008-01-24 02:22:38 -08004370 return 0;
4371}
4372
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004373static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004374{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004375 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004376 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004377 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004378}
4379
Alexander Duyckcd392f52011-08-26 07:43:59 +00004380netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4381 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004382{
Matthew Vick1f6e8172012-08-18 07:26:33 +00004383#ifdef CONFIG_IGB_PTP
4384 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4385#endif /* CONFIG_IGB_PTP */
Alexander Duyck8542db02011-08-26 07:44:43 +00004386 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004387 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004388 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004389 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004390 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004391
Auke Kok9d5c8242008-01-24 02:22:38 -08004392 /* need: 1 descriptor per page,
4393 * + 2 desc gap to keep tail from touching head,
4394 * + 1 desc for skb->data,
4395 * + 1 desc for context descriptor,
4396 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004397 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004398 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004399 return NETDEV_TX_BUSY;
4400 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004401
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004402 /* record the location of the first descriptor for this packet */
4403 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4404 first->skb = skb;
4405 first->bytecount = skb->len;
4406 first->gso_segs = 1;
4407
Matthew Vick3c89f6d2012-08-10 05:40:43 +00004408#ifdef CONFIG_IGB_PTP
Matthew Vick1f6e8172012-08-18 07:26:33 +00004409 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4410 !(adapter->ptp_tx_skb))) {
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004411 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004412 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Matthew Vick1f6e8172012-08-18 07:26:33 +00004413
4414 adapter->ptp_tx_skb = skb_get(skb);
4415 if (adapter->hw.mac.type == e1000_82576)
4416 schedule_work(&adapter->ptp_tx_work);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004417 }
Matthew Vick3c89f6d2012-08-10 05:40:43 +00004418#endif /* CONFIG_IGB_PTP */
Auke Kok9d5c8242008-01-24 02:22:38 -08004419
Jesse Grosseab6d182010-10-20 13:56:03 +00004420 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004421 tx_flags |= IGB_TX_FLAGS_VLAN;
4422 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4423 }
4424
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004425 /* record initial flags and protocol */
4426 first->tx_flags = tx_flags;
4427 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004428
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004429 tso = igb_tso(tx_ring, first, &hdr_len);
4430 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004431 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004432 else if (!tso)
4433 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004434
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004435 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004436
4437 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004438 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004439
Auke Kok9d5c8242008-01-24 02:22:38 -08004440 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004441
4442out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004443 igb_unmap_and_free_tx_resource(tx_ring, first);
4444
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004445 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004446}
4447
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004448static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4449 struct sk_buff *skb)
4450{
4451 unsigned int r_idx = skb->queue_mapping;
4452
4453 if (r_idx >= adapter->num_tx_queues)
4454 r_idx = r_idx % adapter->num_tx_queues;
4455
4456 return adapter->tx_ring[r_idx];
4457}
4458
Alexander Duyckcd392f52011-08-26 07:43:59 +00004459static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4460 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004461{
4462 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004463
4464 if (test_bit(__IGB_DOWN, &adapter->state)) {
4465 dev_kfree_skb_any(skb);
4466 return NETDEV_TX_OK;
4467 }
4468
4469 if (skb->len <= 0) {
4470 dev_kfree_skb_any(skb);
4471 return NETDEV_TX_OK;
4472 }
4473
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004474 /*
4475 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4476 * in order to meet this minimum size requirement.
4477 */
4478 if (skb->len < 17) {
4479 if (skb_padto(skb, 17))
4480 return NETDEV_TX_OK;
4481 skb->len = 17;
4482 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004483
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004484 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004485}
4486
4487/**
4488 * igb_tx_timeout - Respond to a Tx Hang
4489 * @netdev: network interface device structure
4490 **/
4491static void igb_tx_timeout(struct net_device *netdev)
4492{
4493 struct igb_adapter *adapter = netdev_priv(netdev);
4494 struct e1000_hw *hw = &adapter->hw;
4495
4496 /* Do the reset outside of interrupt context */
4497 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004498
Alexander Duyck06218a82011-08-26 07:46:55 +00004499 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004500 hw->dev_spec._82575.global_device_reset = true;
4501
Auke Kok9d5c8242008-01-24 02:22:38 -08004502 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004503 wr32(E1000_EICS,
4504 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004505}
4506
4507static void igb_reset_task(struct work_struct *work)
4508{
4509 struct igb_adapter *adapter;
4510 adapter = container_of(work, struct igb_adapter, reset_task);
4511
Taku Izumic97ec422010-04-27 14:39:30 +00004512 igb_dump(adapter);
4513 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004514 igb_reinit_locked(adapter);
4515}
4516
4517/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004518 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004519 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004520 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004521 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004522 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004523static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4524 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004525{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004526 struct igb_adapter *adapter = netdev_priv(netdev);
4527
4528 spin_lock(&adapter->stats64_lock);
4529 igb_update_stats(adapter, &adapter->stats64);
4530 memcpy(stats, &adapter->stats64, sizeof(*stats));
4531 spin_unlock(&adapter->stats64_lock);
4532
4533 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004534}
4535
4536/**
4537 * igb_change_mtu - Change the Maximum Transfer Unit
4538 * @netdev: network interface device structure
4539 * @new_mtu: new value for maximum frame size
4540 *
4541 * Returns 0 on success, negative on failure
4542 **/
4543static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4544{
4545 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004546 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004547 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004548
Alexander Duyckc809d222009-10-27 23:52:13 +00004549 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004550 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004551 return -EINVAL;
4552 }
4553
Alexander Duyck153285f2011-08-26 07:43:32 +00004554#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004555 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004556 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004557 return -EINVAL;
4558 }
4559
4560 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4561 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004562
Auke Kok9d5c8242008-01-24 02:22:38 -08004563 /* igb_down has a dependency on max_frame_size */
4564 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004565
Alexander Duyck4c844852009-10-27 15:52:07 +00004566 if (netif_running(netdev))
4567 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004568
Alexander Duyck090b1792009-10-27 23:51:55 +00004569 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004570 netdev->mtu, new_mtu);
4571 netdev->mtu = new_mtu;
4572
4573 if (netif_running(netdev))
4574 igb_up(adapter);
4575 else
4576 igb_reset(adapter);
4577
4578 clear_bit(__IGB_RESETTING, &adapter->state);
4579
4580 return 0;
4581}
4582
4583/**
4584 * igb_update_stats - Update the board statistics counters
4585 * @adapter: board private structure
4586 **/
4587
Eric Dumazet12dcd862010-10-15 17:27:10 +00004588void igb_update_stats(struct igb_adapter *adapter,
4589 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004590{
4591 struct e1000_hw *hw = &adapter->hw;
4592 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004593 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004594 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004595 int i;
4596 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004597 unsigned int start;
4598 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004599
4600#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4601
4602 /*
4603 * Prevent stats update while adapter is being reset, or if the pci
4604 * connection is down.
4605 */
4606 if (adapter->link_speed == 0)
4607 return;
4608 if (pci_channel_offline(pdev))
4609 return;
4610
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004611 bytes = 0;
4612 packets = 0;
4613 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004614 u32 rqdpc = rd32(E1000_RQDPC(i));
Alexander Duyck3025a442010-02-17 01:02:39 +00004615 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004616
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004617 if (rqdpc) {
4618 ring->rx_stats.drops += rqdpc;
4619 net_stats->rx_fifo_errors += rqdpc;
4620 }
Eric Dumazet12dcd862010-10-15 17:27:10 +00004621
4622 do {
4623 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4624 _bytes = ring->rx_stats.bytes;
4625 _packets = ring->rx_stats.packets;
4626 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4627 bytes += _bytes;
4628 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004629 }
4630
Alexander Duyck128e45e2009-11-12 18:37:38 +00004631 net_stats->rx_bytes = bytes;
4632 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004633
4634 bytes = 0;
4635 packets = 0;
4636 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004637 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004638 do {
4639 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4640 _bytes = ring->tx_stats.bytes;
4641 _packets = ring->tx_stats.packets;
4642 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4643 bytes += _bytes;
4644 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004645 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004646 net_stats->tx_bytes = bytes;
4647 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004648
4649 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004650 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4651 adapter->stats.gprc += rd32(E1000_GPRC);
4652 adapter->stats.gorc += rd32(E1000_GORCL);
4653 rd32(E1000_GORCH); /* clear GORCL */
4654 adapter->stats.bprc += rd32(E1000_BPRC);
4655 adapter->stats.mprc += rd32(E1000_MPRC);
4656 adapter->stats.roc += rd32(E1000_ROC);
4657
4658 adapter->stats.prc64 += rd32(E1000_PRC64);
4659 adapter->stats.prc127 += rd32(E1000_PRC127);
4660 adapter->stats.prc255 += rd32(E1000_PRC255);
4661 adapter->stats.prc511 += rd32(E1000_PRC511);
4662 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4663 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4664 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4665 adapter->stats.sec += rd32(E1000_SEC);
4666
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004667 mpc = rd32(E1000_MPC);
4668 adapter->stats.mpc += mpc;
4669 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004670 adapter->stats.scc += rd32(E1000_SCC);
4671 adapter->stats.ecol += rd32(E1000_ECOL);
4672 adapter->stats.mcc += rd32(E1000_MCC);
4673 adapter->stats.latecol += rd32(E1000_LATECOL);
4674 adapter->stats.dc += rd32(E1000_DC);
4675 adapter->stats.rlec += rd32(E1000_RLEC);
4676 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4677 adapter->stats.xontxc += rd32(E1000_XONTXC);
4678 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4679 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4680 adapter->stats.fcruc += rd32(E1000_FCRUC);
4681 adapter->stats.gptc += rd32(E1000_GPTC);
4682 adapter->stats.gotc += rd32(E1000_GOTCL);
4683 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004684 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004685 adapter->stats.ruc += rd32(E1000_RUC);
4686 adapter->stats.rfc += rd32(E1000_RFC);
4687 adapter->stats.rjc += rd32(E1000_RJC);
4688 adapter->stats.tor += rd32(E1000_TORH);
4689 adapter->stats.tot += rd32(E1000_TOTH);
4690 adapter->stats.tpr += rd32(E1000_TPR);
4691
4692 adapter->stats.ptc64 += rd32(E1000_PTC64);
4693 adapter->stats.ptc127 += rd32(E1000_PTC127);
4694 adapter->stats.ptc255 += rd32(E1000_PTC255);
4695 adapter->stats.ptc511 += rd32(E1000_PTC511);
4696 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4697 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4698
4699 adapter->stats.mptc += rd32(E1000_MPTC);
4700 adapter->stats.bptc += rd32(E1000_BPTC);
4701
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004702 adapter->stats.tpt += rd32(E1000_TPT);
4703 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004704
4705 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004706 /* read internal phy specific stats */
4707 reg = rd32(E1000_CTRL_EXT);
4708 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4709 adapter->stats.rxerrc += rd32(E1000_RXERRC);
Carolyn Wyborny3dbdf962012-09-12 04:36:24 +00004710
4711 /* this stat has invalid values on i210/i211 */
4712 if ((hw->mac.type != e1000_i210) &&
4713 (hw->mac.type != e1000_i211))
4714 adapter->stats.tncrs += rd32(E1000_TNCRS);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004715 }
4716
Auke Kok9d5c8242008-01-24 02:22:38 -08004717 adapter->stats.tsctc += rd32(E1000_TSCTC);
4718 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4719
4720 adapter->stats.iac += rd32(E1000_IAC);
4721 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4722 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4723 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4724 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4725 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4726 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4727 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4728 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4729
4730 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004731 net_stats->multicast = adapter->stats.mprc;
4732 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004733
4734 /* Rx Errors */
4735
4736 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004737 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004738 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004739 adapter->stats.crcerrs + adapter->stats.algnerrc +
4740 adapter->stats.ruc + adapter->stats.roc +
4741 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004742 net_stats->rx_length_errors = adapter->stats.ruc +
4743 adapter->stats.roc;
4744 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4745 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4746 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004747
4748 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004749 net_stats->tx_errors = adapter->stats.ecol +
4750 adapter->stats.latecol;
4751 net_stats->tx_aborted_errors = adapter->stats.ecol;
4752 net_stats->tx_window_errors = adapter->stats.latecol;
4753 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004754
4755 /* Tx Dropped needs to be maintained elsewhere */
4756
4757 /* Phy Stats */
4758 if (hw->phy.media_type == e1000_media_type_copper) {
4759 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004760 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004761 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4762 adapter->phy_stats.idle_errors += phy_tmp;
4763 }
4764 }
4765
4766 /* Management Stats */
4767 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4768 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4769 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004770
4771 /* OS2BMC Stats */
4772 reg = rd32(E1000_MANC);
4773 if (reg & E1000_MANC_EN_BMC2OS) {
4774 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4775 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4776 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4777 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4778 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004779}
4780
Auke Kok9d5c8242008-01-24 02:22:38 -08004781static irqreturn_t igb_msix_other(int irq, void *data)
4782{
Alexander Duyck047e0032009-10-27 15:49:27 +00004783 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004784 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004785 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004786 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004787
Alexander Duyck7f081d42010-01-07 17:41:00 +00004788 if (icr & E1000_ICR_DRSTA)
4789 schedule_work(&adapter->reset_task);
4790
Alexander Duyck047e0032009-10-27 15:49:27 +00004791 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004792 /* HW is reporting DMA is out of sync */
4793 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004794 /* The DMA Out of Sync is also indication of a spoof event
4795 * in IOV mode. Check the Wrong VM Behavior register to
4796 * see if it is really a spoof event. */
4797 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004798 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004799
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004800 /* Check for a mailbox event */
4801 if (icr & E1000_ICR_VMMB)
4802 igb_msg_task(adapter);
4803
4804 if (icr & E1000_ICR_LSC) {
4805 hw->mac.get_link_status = 1;
4806 /* guard against interrupt when we're going down */
4807 if (!test_bit(__IGB_DOWN, &adapter->state))
4808 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4809 }
4810
Matthew Vick1f6e8172012-08-18 07:26:33 +00004811#ifdef CONFIG_IGB_PTP
4812 if (icr & E1000_ICR_TS) {
4813 u32 tsicr = rd32(E1000_TSICR);
4814
4815 if (tsicr & E1000_TSICR_TXTS) {
4816 /* acknowledge the interrupt */
4817 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4818 /* retrieve hardware timestamp */
4819 schedule_work(&adapter->ptp_tx_work);
4820 }
4821 }
4822#endif /* CONFIG_IGB_PTP */
4823
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004824 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004825
4826 return IRQ_HANDLED;
4827}
4828
Alexander Duyck047e0032009-10-27 15:49:27 +00004829static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004830{
Alexander Duyck26b39272010-02-17 01:00:41 +00004831 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004832 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004833
Alexander Duyck047e0032009-10-27 15:49:27 +00004834 if (!q_vector->set_itr)
4835 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004836
Alexander Duyck047e0032009-10-27 15:49:27 +00004837 if (!itr_val)
4838 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004839
Alexander Duyck26b39272010-02-17 01:00:41 +00004840 if (adapter->hw.mac.type == e1000_82575)
4841 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004842 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004843 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004844
4845 writel(itr_val, q_vector->itr_register);
4846 q_vector->set_itr = 0;
4847}
4848
4849static irqreturn_t igb_msix_ring(int irq, void *data)
4850{
4851 struct igb_q_vector *q_vector = data;
4852
4853 /* Write the ITR value calculated from the previous interrupt. */
4854 igb_write_itr(q_vector);
4855
4856 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004857
Auke Kok9d5c8242008-01-24 02:22:38 -08004858 return IRQ_HANDLED;
4859}
4860
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004861#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004862static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004863{
Alexander Duyck047e0032009-10-27 15:49:27 +00004864 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004865 struct e1000_hw *hw = &adapter->hw;
4866 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004867
Alexander Duyck047e0032009-10-27 15:49:27 +00004868 if (q_vector->cpu == cpu)
4869 goto out_no_update;
4870
Alexander Duyck0ba82992011-08-26 07:45:47 +00004871 if (q_vector->tx.ring) {
4872 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004873 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4874 if (hw->mac.type == e1000_82575) {
4875 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4876 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4877 } else {
4878 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4879 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4880 E1000_DCA_TXCTRL_CPUID_SHIFT;
4881 }
4882 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4883 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4884 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004885 if (q_vector->rx.ring) {
4886 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004887 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4888 if (hw->mac.type == e1000_82575) {
4889 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4890 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4891 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004892 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004893 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004894 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004895 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004896 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4897 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4898 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4899 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004900 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004901 q_vector->cpu = cpu;
4902out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004903 put_cpu();
4904}
4905
4906static void igb_setup_dca(struct igb_adapter *adapter)
4907{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004908 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004909 int i;
4910
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004911 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004912 return;
4913
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004914 /* Always use CB2 mode, difference is masked in the CB driver. */
4915 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4916
Alexander Duyck047e0032009-10-27 15:49:27 +00004917 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004918 adapter->q_vector[i]->cpu = -1;
4919 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004920 }
4921}
4922
4923static int __igb_notify_dca(struct device *dev, void *data)
4924{
4925 struct net_device *netdev = dev_get_drvdata(dev);
4926 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004927 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004928 struct e1000_hw *hw = &adapter->hw;
4929 unsigned long event = *(unsigned long *)data;
4930
4931 switch (event) {
4932 case DCA_PROVIDER_ADD:
4933 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004934 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004935 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004936 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004937 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004938 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004939 igb_setup_dca(adapter);
4940 break;
4941 }
4942 /* Fall Through since DCA is disabled. */
4943 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004944 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004945 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004946 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004947 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004948 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004949 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004950 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004951 }
4952 break;
4953 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004954
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004955 return 0;
4956}
4957
4958static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4959 void *p)
4960{
4961 int ret_val;
4962
4963 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4964 __igb_notify_dca);
4965
4966 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4967}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004968#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004969
Greg Rose0224d662011-10-14 02:57:14 +00004970#ifdef CONFIG_PCI_IOV
4971static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4972{
4973 unsigned char mac_addr[ETH_ALEN];
Greg Rose0224d662011-10-14 02:57:14 +00004974
Joe Perches7efd26d2012-07-12 19:33:06 +00004975 eth_random_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00004976 igb_set_vf_mac(adapter, vf, mac_addr);
4977
Stefan Assmannf5571472012-08-18 04:06:11 +00004978 return 0;
Greg Rose0224d662011-10-14 02:57:14 +00004979}
4980
Stefan Assmannf5571472012-08-18 04:06:11 +00004981static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
Greg Rose0224d662011-10-14 02:57:14 +00004982{
Greg Rose0224d662011-10-14 02:57:14 +00004983 struct pci_dev *pdev = adapter->pdev;
Stefan Assmannf5571472012-08-18 04:06:11 +00004984 struct pci_dev *vfdev;
4985 int dev_id;
Greg Rose0224d662011-10-14 02:57:14 +00004986
4987 switch (adapter->hw.mac.type) {
4988 case e1000_82576:
Stefan Assmannf5571472012-08-18 04:06:11 +00004989 dev_id = IGB_82576_VF_DEV_ID;
Greg Rose0224d662011-10-14 02:57:14 +00004990 break;
4991 case e1000_i350:
Stefan Assmannf5571472012-08-18 04:06:11 +00004992 dev_id = IGB_I350_VF_DEV_ID;
Greg Rose0224d662011-10-14 02:57:14 +00004993 break;
4994 default:
Stefan Assmannf5571472012-08-18 04:06:11 +00004995 return false;
Greg Rose0224d662011-10-14 02:57:14 +00004996 }
4997
Stefan Assmannf5571472012-08-18 04:06:11 +00004998 /* loop through all the VFs to see if we own any that are assigned */
4999 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5000 while (vfdev) {
5001 /* if we don't own it we don't care */
5002 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5003 /* if it is assigned we cannot release it */
5004 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
Greg Rose0224d662011-10-14 02:57:14 +00005005 return true;
5006 }
Stefan Assmannf5571472012-08-18 04:06:11 +00005007
5008 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
Greg Rose0224d662011-10-14 02:57:14 +00005009 }
Stefan Assmannf5571472012-08-18 04:06:11 +00005010
Greg Rose0224d662011-10-14 02:57:14 +00005011 return false;
5012}
5013
5014#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005015static void igb_ping_all_vfs(struct igb_adapter *adapter)
5016{
5017 struct e1000_hw *hw = &adapter->hw;
5018 u32 ping;
5019 int i;
5020
5021 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5022 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005023 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005024 ping |= E1000_VT_MSGTYPE_CTS;
5025 igb_write_mbx(hw, &ping, 1, i);
5026 }
5027}
5028
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005029static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5030{
5031 struct e1000_hw *hw = &adapter->hw;
5032 u32 vmolr = rd32(E1000_VMOLR(vf));
5033 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5034
Alexander Duyckd85b90042010-09-22 17:56:20 +00005035 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005036 IGB_VF_FLAG_MULTI_PROMISC);
5037 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5038
5039 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5040 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005041 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005042 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5043 } else {
5044 /*
5045 * if we have hashes and we are clearing a multicast promisc
5046 * flag we need to write the hashes to the MTA as this step
5047 * was previously skipped
5048 */
5049 if (vf_data->num_vf_mc_hashes > 30) {
5050 vmolr |= E1000_VMOLR_MPME;
5051 } else if (vf_data->num_vf_mc_hashes) {
5052 int j;
5053 vmolr |= E1000_VMOLR_ROMPE;
5054 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5055 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5056 }
5057 }
5058
5059 wr32(E1000_VMOLR(vf), vmolr);
5060
5061 /* there are flags left unprocessed, likely not supported */
5062 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5063 return -EINVAL;
5064
5065 return 0;
5066
5067}
5068
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005069static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5070 u32 *msgbuf, u32 vf)
5071{
5072 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5073 u16 *hash_list = (u16 *)&msgbuf[1];
5074 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5075 int i;
5076
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005077 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005078 * to this VF for later use to restore when the PF multi cast
5079 * list changes
5080 */
5081 vf_data->num_vf_mc_hashes = n;
5082
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005083 /* only up to 30 hash values supported */
5084 if (n > 30)
5085 n = 30;
5086
5087 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005088 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005089 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005090
5091 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005092 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005093
5094 return 0;
5095}
5096
5097static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5098{
5099 struct e1000_hw *hw = &adapter->hw;
5100 struct vf_data_storage *vf_data;
5101 int i, j;
5102
5103 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005104 u32 vmolr = rd32(E1000_VMOLR(i));
5105 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5106
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005107 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005108
5109 if ((vf_data->num_vf_mc_hashes > 30) ||
5110 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5111 vmolr |= E1000_VMOLR_MPME;
5112 } else if (vf_data->num_vf_mc_hashes) {
5113 vmolr |= E1000_VMOLR_ROMPE;
5114 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5115 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5116 }
5117 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005118 }
5119}
5120
5121static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5122{
5123 struct e1000_hw *hw = &adapter->hw;
5124 u32 pool_mask, reg, vid;
5125 int i;
5126
5127 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5128
5129 /* Find the vlan filter for this id */
5130 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5131 reg = rd32(E1000_VLVF(i));
5132
5133 /* remove the vf from the pool */
5134 reg &= ~pool_mask;
5135
5136 /* if pool is empty then remove entry from vfta */
5137 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5138 (reg & E1000_VLVF_VLANID_ENABLE)) {
5139 reg = 0;
5140 vid = reg & E1000_VLVF_VLANID_MASK;
5141 igb_vfta_set(hw, vid, false);
5142 }
5143
5144 wr32(E1000_VLVF(i), reg);
5145 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005146
5147 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005148}
5149
5150static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5151{
5152 struct e1000_hw *hw = &adapter->hw;
5153 u32 reg, i;
5154
Alexander Duyck51466232009-10-27 23:47:35 +00005155 /* The vlvf table only exists on 82576 hardware and newer */
5156 if (hw->mac.type < e1000_82576)
5157 return -1;
5158
5159 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005160 if (!adapter->vfs_allocated_count)
5161 return -1;
5162
5163 /* Find the vlan filter for this id */
5164 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5165 reg = rd32(E1000_VLVF(i));
5166 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5167 vid == (reg & E1000_VLVF_VLANID_MASK))
5168 break;
5169 }
5170
5171 if (add) {
5172 if (i == E1000_VLVF_ARRAY_SIZE) {
5173 /* Did not find a matching VLAN ID entry that was
5174 * enabled. Search for a free filter entry, i.e.
5175 * one without the enable bit set
5176 */
5177 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5178 reg = rd32(E1000_VLVF(i));
5179 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5180 break;
5181 }
5182 }
5183 if (i < E1000_VLVF_ARRAY_SIZE) {
5184 /* Found an enabled/available entry */
5185 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5186
5187 /* if !enabled we need to set this up in vfta */
5188 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005189 /* add VID to filter table */
5190 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005191 reg |= E1000_VLVF_VLANID_ENABLE;
5192 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005193 reg &= ~E1000_VLVF_VLANID_MASK;
5194 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005195 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005196
5197 /* do not modify RLPML for PF devices */
5198 if (vf >= adapter->vfs_allocated_count)
5199 return 0;
5200
5201 if (!adapter->vf_data[vf].vlans_enabled) {
5202 u32 size;
5203 reg = rd32(E1000_VMOLR(vf));
5204 size = reg & E1000_VMOLR_RLPML_MASK;
5205 size += 4;
5206 reg &= ~E1000_VMOLR_RLPML_MASK;
5207 reg |= size;
5208 wr32(E1000_VMOLR(vf), reg);
5209 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005210
Alexander Duyck51466232009-10-27 23:47:35 +00005211 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005212 }
5213 } else {
5214 if (i < E1000_VLVF_ARRAY_SIZE) {
5215 /* remove vf from the pool */
5216 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5217 /* if pool is empty then remove entry from vfta */
5218 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5219 reg = 0;
5220 igb_vfta_set(hw, vid, false);
5221 }
5222 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005223
5224 /* do not modify RLPML for PF devices */
5225 if (vf >= adapter->vfs_allocated_count)
5226 return 0;
5227
5228 adapter->vf_data[vf].vlans_enabled--;
5229 if (!adapter->vf_data[vf].vlans_enabled) {
5230 u32 size;
5231 reg = rd32(E1000_VMOLR(vf));
5232 size = reg & E1000_VMOLR_RLPML_MASK;
5233 size -= 4;
5234 reg &= ~E1000_VMOLR_RLPML_MASK;
5235 reg |= size;
5236 wr32(E1000_VMOLR(vf), reg);
5237 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005238 }
5239 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005240 return 0;
5241}
5242
5243static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5244{
5245 struct e1000_hw *hw = &adapter->hw;
5246
5247 if (vid)
5248 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5249 else
5250 wr32(E1000_VMVIR(vf), 0);
5251}
5252
5253static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5254 int vf, u16 vlan, u8 qos)
5255{
5256 int err = 0;
5257 struct igb_adapter *adapter = netdev_priv(netdev);
5258
5259 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5260 return -EINVAL;
5261 if (vlan || qos) {
5262 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5263 if (err)
5264 goto out;
5265 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5266 igb_set_vmolr(adapter, vf, !vlan);
5267 adapter->vf_data[vf].pf_vlan = vlan;
5268 adapter->vf_data[vf].pf_qos = qos;
5269 dev_info(&adapter->pdev->dev,
5270 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5271 if (test_bit(__IGB_DOWN, &adapter->state)) {
5272 dev_warn(&adapter->pdev->dev,
5273 "The VF VLAN has been set,"
5274 " but the PF device is not up.\n");
5275 dev_warn(&adapter->pdev->dev,
5276 "Bring the PF device up before"
5277 " attempting to use the VF device.\n");
5278 }
5279 } else {
5280 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5281 false, vf);
5282 igb_set_vmvir(adapter, vlan, vf);
5283 igb_set_vmolr(adapter, vf, true);
5284 adapter->vf_data[vf].pf_vlan = 0;
5285 adapter->vf_data[vf].pf_qos = 0;
5286 }
5287out:
5288 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005289}
5290
5291static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5292{
5293 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5294 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5295
5296 return igb_vlvf_set(adapter, vid, add, vf);
5297}
5298
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005299static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005300{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005301 /* clear flags - except flag that indicates PF has set the MAC */
5302 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005303 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005304
5305 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005306 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005307
5308 /* reset vlans for device */
5309 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005310 if (adapter->vf_data[vf].pf_vlan)
5311 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5312 adapter->vf_data[vf].pf_vlan,
5313 adapter->vf_data[vf].pf_qos);
5314 else
5315 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005316
5317 /* reset multicast table array for vf */
5318 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5319
5320 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005321 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005322}
5323
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005324static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5325{
5326 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5327
5328 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005329 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
Joe Perches7efd26d2012-07-12 19:33:06 +00005330 eth_random_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005331
5332 /* process remaining reset events */
5333 igb_vf_reset(adapter, vf);
5334}
5335
5336static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005337{
5338 struct e1000_hw *hw = &adapter->hw;
5339 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005340 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005341 u32 reg, msgbuf[3];
5342 u8 *addr = (u8 *)(&msgbuf[1]);
5343
5344 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005345 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005346
5347 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005348 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005349
5350 /* enable transmit and receive for vf */
5351 reg = rd32(E1000_VFTE);
5352 wr32(E1000_VFTE, reg | (1 << vf));
5353 reg = rd32(E1000_VFRE);
5354 wr32(E1000_VFRE, reg | (1 << vf));
5355
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005356 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005357
5358 /* reply to reset with ack and vf mac address */
5359 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5360 memcpy(addr, vf_mac, 6);
5361 igb_write_mbx(hw, msgbuf, 3, vf);
5362}
5363
5364static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5365{
Greg Rosede42edd2010-07-01 13:39:23 +00005366 /*
5367 * The VF MAC Address is stored in a packed array of bytes
5368 * starting at the second 32 bit word of the msg array
5369 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005370 unsigned char *addr = (char *)&msg[1];
5371 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005372
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005373 if (is_valid_ether_addr(addr))
5374 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005375
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005376 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005377}
5378
5379static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5380{
5381 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005382 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005383 u32 msg = E1000_VT_MSGTYPE_NACK;
5384
5385 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005386 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5387 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005388 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005389 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005390 }
5391}
5392
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005393static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005394{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005395 struct pci_dev *pdev = adapter->pdev;
5396 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005397 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005398 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005399 s32 retval;
5400
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005401 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005402
Alexander Duyckfef45f42009-12-11 22:57:34 -08005403 if (retval) {
5404 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005405 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005406 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5407 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5408 return;
5409 goto out;
5410 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005411
5412 /* this is a message we already processed, do nothing */
5413 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005414 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005415
5416 /*
5417 * until the vf completes a reset it should not be
5418 * allowed to start any configuration.
5419 */
5420
5421 if (msgbuf[0] == E1000_VF_RESET) {
5422 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005423 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005424 }
5425
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005426 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005427 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5428 return;
5429 retval = -1;
5430 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005431 }
5432
5433 switch ((msgbuf[0] & 0xFFFF)) {
5434 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005435 retval = -EINVAL;
5436 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5437 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5438 else
5439 dev_warn(&pdev->dev,
5440 "VF %d attempted to override administratively "
5441 "set MAC address\nReload the VF driver to "
5442 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005443 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005444 case E1000_VF_SET_PROMISC:
5445 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5446 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005447 case E1000_VF_SET_MULTICAST:
5448 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5449 break;
5450 case E1000_VF_SET_LPE:
5451 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5452 break;
5453 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005454 retval = -1;
5455 if (vf_data->pf_vlan)
5456 dev_warn(&pdev->dev,
5457 "VF %d attempted to override administratively "
5458 "set VLAN tag\nReload the VF driver to "
5459 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005460 else
5461 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005462 break;
5463 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005464 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005465 retval = -1;
5466 break;
5467 }
5468
Alexander Duyckfef45f42009-12-11 22:57:34 -08005469 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5470out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005471 /* notify the VF of the results of what it sent us */
5472 if (retval)
5473 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5474 else
5475 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5476
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005477 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005478}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005479
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005480static void igb_msg_task(struct igb_adapter *adapter)
5481{
5482 struct e1000_hw *hw = &adapter->hw;
5483 u32 vf;
5484
5485 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5486 /* process any reset requests */
5487 if (!igb_check_for_rst(hw, vf))
5488 igb_vf_reset_event(adapter, vf);
5489
5490 /* process any messages pending */
5491 if (!igb_check_for_msg(hw, vf))
5492 igb_rcv_msg_from_vf(adapter, vf);
5493
5494 /* process any acks */
5495 if (!igb_check_for_ack(hw, vf))
5496 igb_rcv_ack_from_vf(adapter, vf);
5497 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005498}
5499
Auke Kok9d5c8242008-01-24 02:22:38 -08005500/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005501 * igb_set_uta - Set unicast filter table address
5502 * @adapter: board private structure
5503 *
5504 * The unicast table address is a register array of 32-bit registers.
5505 * The table is meant to be used in a way similar to how the MTA is used
5506 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005507 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5508 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005509 **/
5510static void igb_set_uta(struct igb_adapter *adapter)
5511{
5512 struct e1000_hw *hw = &adapter->hw;
5513 int i;
5514
5515 /* The UTA table only exists on 82576 hardware and newer */
5516 if (hw->mac.type < e1000_82576)
5517 return;
5518
5519 /* we only need to do this if VMDq is enabled */
5520 if (!adapter->vfs_allocated_count)
5521 return;
5522
5523 for (i = 0; i < hw->mac.uta_reg_count; i++)
5524 array_wr32(E1000_UTA, i, ~0);
5525}
5526
5527/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005528 * igb_intr_msi - Interrupt Handler
5529 * @irq: interrupt number
5530 * @data: pointer to a network interface device structure
5531 **/
5532static irqreturn_t igb_intr_msi(int irq, void *data)
5533{
Alexander Duyck047e0032009-10-27 15:49:27 +00005534 struct igb_adapter *adapter = data;
5535 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005536 struct e1000_hw *hw = &adapter->hw;
5537 /* read ICR disables interrupts using IAM */
5538 u32 icr = rd32(E1000_ICR);
5539
Alexander Duyck047e0032009-10-27 15:49:27 +00005540 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005541
Alexander Duyck7f081d42010-01-07 17:41:00 +00005542 if (icr & E1000_ICR_DRSTA)
5543 schedule_work(&adapter->reset_task);
5544
Alexander Duyck047e0032009-10-27 15:49:27 +00005545 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005546 /* HW is reporting DMA is out of sync */
5547 adapter->stats.doosync++;
5548 }
5549
Auke Kok9d5c8242008-01-24 02:22:38 -08005550 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5551 hw->mac.get_link_status = 1;
5552 if (!test_bit(__IGB_DOWN, &adapter->state))
5553 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5554 }
5555
Matthew Vick1f6e8172012-08-18 07:26:33 +00005556#ifdef CONFIG_IGB_PTP
5557 if (icr & E1000_ICR_TS) {
5558 u32 tsicr = rd32(E1000_TSICR);
5559
5560 if (tsicr & E1000_TSICR_TXTS) {
5561 /* acknowledge the interrupt */
5562 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5563 /* retrieve hardware timestamp */
5564 schedule_work(&adapter->ptp_tx_work);
5565 }
5566 }
5567#endif /* CONFIG_IGB_PTP */
5568
Alexander Duyck047e0032009-10-27 15:49:27 +00005569 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005570
5571 return IRQ_HANDLED;
5572}
5573
5574/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005575 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005576 * @irq: interrupt number
5577 * @data: pointer to a network interface device structure
5578 **/
5579static irqreturn_t igb_intr(int irq, void *data)
5580{
Alexander Duyck047e0032009-10-27 15:49:27 +00005581 struct igb_adapter *adapter = data;
5582 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005583 struct e1000_hw *hw = &adapter->hw;
5584 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5585 * need for the IMC write */
5586 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005587
5588 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5589 * not set, then the adapter didn't send an interrupt */
5590 if (!(icr & E1000_ICR_INT_ASSERTED))
5591 return IRQ_NONE;
5592
Alexander Duyck0ba82992011-08-26 07:45:47 +00005593 igb_write_itr(q_vector);
5594
Alexander Duyck7f081d42010-01-07 17:41:00 +00005595 if (icr & E1000_ICR_DRSTA)
5596 schedule_work(&adapter->reset_task);
5597
Alexander Duyck047e0032009-10-27 15:49:27 +00005598 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005599 /* HW is reporting DMA is out of sync */
5600 adapter->stats.doosync++;
5601 }
5602
Auke Kok9d5c8242008-01-24 02:22:38 -08005603 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5604 hw->mac.get_link_status = 1;
5605 /* guard against interrupt when we're going down */
5606 if (!test_bit(__IGB_DOWN, &adapter->state))
5607 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5608 }
5609
Matthew Vick1f6e8172012-08-18 07:26:33 +00005610#ifdef CONFIG_IGB_PTP
5611 if (icr & E1000_ICR_TS) {
5612 u32 tsicr = rd32(E1000_TSICR);
5613
5614 if (tsicr & E1000_TSICR_TXTS) {
5615 /* acknowledge the interrupt */
5616 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5617 /* retrieve hardware timestamp */
5618 schedule_work(&adapter->ptp_tx_work);
5619 }
5620 }
5621#endif /* CONFIG_IGB_PTP */
5622
Alexander Duyck047e0032009-10-27 15:49:27 +00005623 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005624
5625 return IRQ_HANDLED;
5626}
5627
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005628static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005629{
Alexander Duyck047e0032009-10-27 15:49:27 +00005630 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005631 struct e1000_hw *hw = &adapter->hw;
5632
Alexander Duyck0ba82992011-08-26 07:45:47 +00005633 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5634 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5635 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5636 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005637 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005638 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005639 }
5640
5641 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5642 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005643 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005644 else
5645 igb_irq_enable(adapter);
5646 }
5647}
5648
Auke Kok9d5c8242008-01-24 02:22:38 -08005649/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005650 * igb_poll - NAPI Rx polling callback
5651 * @napi: napi polling structure
5652 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005653 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005654static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005655{
Alexander Duyck047e0032009-10-27 15:49:27 +00005656 struct igb_q_vector *q_vector = container_of(napi,
5657 struct igb_q_vector,
5658 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005659 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005660
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005661#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005662 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5663 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005664#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005665 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005666 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005667
Alexander Duyck0ba82992011-08-26 07:45:47 +00005668 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005669 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005670
Alexander Duyck16eb8812011-08-26 07:43:54 +00005671 /* If all work not completed, return budget and keep polling */
5672 if (!clean_complete)
5673 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005674
Alexander Duyck46544252009-02-19 20:39:04 -08005675 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005676 napi_complete(napi);
5677 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005678
Alexander Duyck16eb8812011-08-26 07:43:54 +00005679 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005680}
Al Viro6d8126f2008-03-16 22:23:24 +00005681
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005682/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005683 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005684 * @q_vector: pointer to q_vector containing needed info
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005685 *
Auke Kok9d5c8242008-01-24 02:22:38 -08005686 * returns true if ring is completely cleaned
5687 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005688static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005689{
Alexander Duyck047e0032009-10-27 15:49:27 +00005690 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005691 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005692 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005693 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005695 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005696 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005697
Alexander Duyck13fde972011-10-05 13:35:24 +00005698 if (test_bit(__IGB_DOWN, &adapter->state))
5699 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005700
Alexander Duyck06034642011-08-26 07:44:22 +00005701 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005702 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005703 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005704
Alexander Duyck13fde972011-10-05 13:35:24 +00005705 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005706 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005707
Alexander Duyck8542db02011-08-26 07:44:43 +00005708 /* prevent any other reads prior to eop_desc */
5709 rmb();
5710
5711 /* if next_to_watch is not set then there is no work pending */
5712 if (!eop_desc)
5713 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005714
5715 /* if DD is not set pending work has not been completed */
5716 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5717 break;
5718
Alexander Duyck8542db02011-08-26 07:44:43 +00005719 /* clear next_to_watch to prevent false hangs */
5720 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005721
Alexander Duyckebe42d12011-08-26 07:45:09 +00005722 /* update the statistics for this packet */
5723 total_bytes += tx_buffer->bytecount;
5724 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005725
Alexander Duyckebe42d12011-08-26 07:45:09 +00005726 /* free the skb */
5727 dev_kfree_skb_any(tx_buffer->skb);
5728 tx_buffer->skb = NULL;
5729
5730 /* unmap skb header data */
5731 dma_unmap_single(tx_ring->dev,
5732 tx_buffer->dma,
5733 tx_buffer->length,
5734 DMA_TO_DEVICE);
5735
5736 /* clear last DMA location and unmap remaining buffers */
5737 while (tx_desc != eop_desc) {
5738 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005739
Alexander Duyck13fde972011-10-05 13:35:24 +00005740 tx_buffer++;
5741 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005742 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005743 if (unlikely(!i)) {
5744 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005745 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005746 tx_desc = IGB_TX_DESC(tx_ring, 0);
5747 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005748
5749 /* unmap any remaining paged data */
5750 if (tx_buffer->dma) {
5751 dma_unmap_page(tx_ring->dev,
5752 tx_buffer->dma,
5753 tx_buffer->length,
5754 DMA_TO_DEVICE);
5755 }
5756 }
5757
5758 /* clear last DMA location */
5759 tx_buffer->dma = 0;
5760
5761 /* move us one more past the eop_desc for start of next pkt */
5762 tx_buffer++;
5763 tx_desc++;
5764 i++;
5765 if (unlikely(!i)) {
5766 i -= tx_ring->count;
5767 tx_buffer = tx_ring->tx_buffer_info;
5768 tx_desc = IGB_TX_DESC(tx_ring, 0);
5769 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005770 }
5771
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005772 netdev_tx_completed_queue(txring_txq(tx_ring),
5773 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005774 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005775 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005776 u64_stats_update_begin(&tx_ring->tx_syncp);
5777 tx_ring->tx_stats.bytes += total_bytes;
5778 tx_ring->tx_stats.packets += total_packets;
5779 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005780 q_vector->tx.total_bytes += total_bytes;
5781 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005782
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005783 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005784 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005785
Alexander Duyck8542db02011-08-26 07:44:43 +00005786 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005787
Auke Kok9d5c8242008-01-24 02:22:38 -08005788 /* Detect a transmit hang in hardware, this serializes the
5789 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005790 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005791 if (eop_desc &&
5792 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005793 (adapter->tx_timeout_factor * HZ)) &&
5794 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005795
Auke Kok9d5c8242008-01-24 02:22:38 -08005796 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005797 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005798 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005799 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005800 " TDH <%x>\n"
5801 " TDT <%x>\n"
5802 " next_to_use <%x>\n"
5803 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005804 "buffer_info[next_to_clean]\n"
5805 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005806 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005807 " jiffies <%lx>\n"
5808 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005809 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005810 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005811 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005812 tx_ring->next_to_use,
5813 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005814 tx_buffer->time_stamp,
5815 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005816 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005817 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005818 netif_stop_subqueue(tx_ring->netdev,
5819 tx_ring->queue_index);
5820
5821 /* we are about to reset, no point in enabling stuff */
5822 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005823 }
5824 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005825
5826 if (unlikely(total_packets &&
5827 netif_carrier_ok(tx_ring->netdev) &&
5828 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5829 /* Make sure that anybody stopping the queue after this
5830 * sees the new next_to_clean.
5831 */
5832 smp_mb();
5833 if (__netif_subqueue_stopped(tx_ring->netdev,
5834 tx_ring->queue_index) &&
5835 !(test_bit(__IGB_DOWN, &adapter->state))) {
5836 netif_wake_subqueue(tx_ring->netdev,
5837 tx_ring->queue_index);
5838
5839 u64_stats_update_begin(&tx_ring->tx_syncp);
5840 tx_ring->tx_stats.restart_queue++;
5841 u64_stats_update_end(&tx_ring->tx_syncp);
5842 }
5843 }
5844
5845 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005846}
5847
Alexander Duyckcd392f52011-08-26 07:43:59 +00005848static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005849 union e1000_adv_rx_desc *rx_desc,
5850 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005851{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005852 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005853
Alexander Duyck294e7d72011-08-26 07:45:57 +00005854 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005855 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005856 return;
5857
5858 /* Rx checksum disabled via ethtool */
5859 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005860 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005861
Auke Kok9d5c8242008-01-24 02:22:38 -08005862 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005863 if (igb_test_staterr(rx_desc,
5864 E1000_RXDEXT_STATERR_TCPE |
5865 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005866 /*
5867 * work around errata with sctp packets where the TCPE aka
5868 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5869 * packets, (aka let the stack check the crc32c)
5870 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005871 if (!((skb->len == 60) &&
5872 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005873 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005874 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005875 u64_stats_update_end(&ring->rx_syncp);
5876 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005877 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005878 return;
5879 }
5880 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005881 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5882 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 skb->ip_summed = CHECKSUM_UNNECESSARY;
5884
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005885 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5886 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005887}
5888
Alexander Duyck077887c2011-08-26 07:46:29 +00005889static inline void igb_rx_hash(struct igb_ring *ring,
5890 union e1000_adv_rx_desc *rx_desc,
5891 struct sk_buff *skb)
5892{
5893 if (ring->netdev->features & NETIF_F_RXHASH)
5894 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5895}
5896
Alexander Duyck8be10e92011-08-26 07:47:11 +00005897static void igb_rx_vlan(struct igb_ring *ring,
5898 union e1000_adv_rx_desc *rx_desc,
5899 struct sk_buff *skb)
5900{
5901 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5902 u16 vid;
5903 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5904 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5905 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5906 else
5907 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5908
5909 __vlan_hwaccel_put_tag(skb, vid);
5910 }
5911}
5912
Alexander Duyck44390ca2011-08-26 07:43:38 +00005913static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005914{
5915 /* HW will not DMA in data larger than the given buffer, even if it
5916 * parses the (NFS, of course) header to be larger. In that case, it
5917 * fills the header buffer and spills the rest into the page.
5918 */
5919 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5920 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005921 if (hlen > IGB_RX_HDR_LEN)
5922 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005923 return hlen;
5924}
5925
Alexander Duyckcd392f52011-08-26 07:43:59 +00005926static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005927{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005928 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005929 union e1000_adv_rx_desc *rx_desc;
5930 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005931 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005932 u16 cleaned_count = igb_desc_unused(rx_ring);
5933 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005934
Alexander Duyck601369062011-08-26 07:44:05 +00005935 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005936
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005937 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005938 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005939 struct sk_buff *skb = buffer_info->skb;
5940 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005941
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005942 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005943 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005944
5945 i++;
5946 if (i == rx_ring->count)
5947 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005948
Alexander Duyck601369062011-08-26 07:44:05 +00005949 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005950 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005951
Alexander Duyck16eb8812011-08-26 07:43:54 +00005952 /*
5953 * This memory barrier is needed to keep us from reading
5954 * any other fields out of the rx_desc until we know the
5955 * RXD_STAT_DD bit is set
5956 */
5957 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005958
Alexander Duyck16eb8812011-08-26 07:43:54 +00005959 if (!skb_is_nonlinear(skb)) {
5960 __skb_put(skb, igb_get_hlen(rx_desc));
5961 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005962 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005963 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005964 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005965 }
5966
Alexander Duyck16eb8812011-08-26 07:43:54 +00005967 if (rx_desc->wb.upper.length) {
5968 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005969
Koki Sanagiaa913402010-04-27 01:01:19 +00005970 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005971 buffer_info->page,
5972 buffer_info->page_offset,
5973 length);
5974
Alexander Duyck16eb8812011-08-26 07:43:54 +00005975 skb->len += length;
5976 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00005977 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005978
Alexander Duyckd1eff352009-11-12 18:38:35 +00005979 if ((page_count(buffer_info->page) != 1) ||
5980 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005981 buffer_info->page = NULL;
5982 else
5983 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005984
Alexander Duyck16eb8812011-08-26 07:43:54 +00005985 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5986 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5987 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005988 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005989
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005990 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005991 struct igb_rx_buffer *next_buffer;
5992 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005993 buffer_info->skb = next_buffer->skb;
5994 buffer_info->dma = next_buffer->dma;
5995 next_buffer->skb = skb;
5996 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005997 goto next_desc;
5998 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005999
Ben Greear89eaefb2012-03-06 09:41:58 +00006000 if (unlikely((igb_test_staterr(rx_desc,
6001 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6002 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006003 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006004 goto next_desc;
6005 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006006
Richard Cochran7ebae812012-03-16 10:55:37 +00006007#ifdef CONFIG_IGB_PTP
Matthew Vicka79f4f82012-08-10 05:40:44 +00006008 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00006009#endif /* CONFIG_IGB_PTP */
Alexander Duyck077887c2011-08-26 07:46:29 +00006010 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006011 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006012 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006013
6014 total_bytes += skb->len;
6015 total_packets++;
6016
6017 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6018
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006019 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006020
Alexander Duyck16eb8812011-08-26 07:43:54 +00006021 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006022next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006023 if (!budget)
6024 break;
6025
6026 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006027 /* return some buffers to hardware, one at a time is too slow */
6028 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006029 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006030 cleaned_count = 0;
6031 }
6032
6033 /* use prefetched values */
6034 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006035 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006036
Auke Kok9d5c8242008-01-24 02:22:38 -08006037 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006038 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006039 rx_ring->rx_stats.packets += total_packets;
6040 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006041 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006042 q_vector->rx.total_packets += total_packets;
6043 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006044
6045 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006046 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006047
Alexander Duyck16eb8812011-08-26 07:43:54 +00006048 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006049}
6050
Alexander Duyckc023cd82011-08-26 07:43:43 +00006051static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006052 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006053{
6054 struct sk_buff *skb = bi->skb;
6055 dma_addr_t dma = bi->dma;
6056
6057 if (dma)
6058 return true;
6059
6060 if (likely(!skb)) {
6061 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6062 IGB_RX_HDR_LEN);
6063 bi->skb = skb;
6064 if (!skb) {
6065 rx_ring->rx_stats.alloc_failed++;
6066 return false;
6067 }
6068
6069 /* initialize skb for ring */
6070 skb_record_rx_queue(skb, rx_ring->queue_index);
6071 }
6072
6073 dma = dma_map_single(rx_ring->dev, skb->data,
6074 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6075
6076 if (dma_mapping_error(rx_ring->dev, dma)) {
6077 rx_ring->rx_stats.alloc_failed++;
6078 return false;
6079 }
6080
6081 bi->dma = dma;
6082 return true;
6083}
6084
6085static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006086 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006087{
6088 struct page *page = bi->page;
6089 dma_addr_t page_dma = bi->page_dma;
6090 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6091
6092 if (page_dma)
6093 return true;
6094
6095 if (!page) {
Mel Gorman06140022012-07-31 16:44:24 -07006096 page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006097 bi->page = page;
6098 if (unlikely(!page)) {
6099 rx_ring->rx_stats.alloc_failed++;
6100 return false;
6101 }
6102 }
6103
6104 page_dma = dma_map_page(rx_ring->dev, page,
6105 page_offset, PAGE_SIZE / 2,
6106 DMA_FROM_DEVICE);
6107
6108 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6109 rx_ring->rx_stats.alloc_failed++;
6110 return false;
6111 }
6112
6113 bi->page_dma = page_dma;
6114 bi->page_offset = page_offset;
6115 return true;
6116}
6117
Auke Kok9d5c8242008-01-24 02:22:38 -08006118/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006119 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006120 * @adapter: address of board private structure
6121 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006122void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006123{
Auke Kok9d5c8242008-01-24 02:22:38 -08006124 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006125 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006126 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006127
Alexander Duyck601369062011-08-26 07:44:05 +00006128 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006129 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006130 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006131
6132 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006133 if (!igb_alloc_mapped_skb(rx_ring, bi))
6134 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006135
Alexander Duyckc023cd82011-08-26 07:43:43 +00006136 /* Refresh the desc even if buffer_addrs didn't change
6137 * because each write-back erases this info. */
6138 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006139
Alexander Duyckc023cd82011-08-26 07:43:43 +00006140 if (!igb_alloc_mapped_page(rx_ring, bi))
6141 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006142
Alexander Duyckc023cd82011-08-26 07:43:43 +00006143 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006144
Alexander Duyckc023cd82011-08-26 07:43:43 +00006145 rx_desc++;
6146 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006147 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006148 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006149 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006150 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006151 i -= rx_ring->count;
6152 }
6153
6154 /* clear the hdr_addr for the next_to_use descriptor */
6155 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006156 }
6157
Alexander Duyckc023cd82011-08-26 07:43:43 +00006158 i += rx_ring->count;
6159
Auke Kok9d5c8242008-01-24 02:22:38 -08006160 if (rx_ring->next_to_use != i) {
6161 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006162
6163 /* Force memory writes to complete before letting h/w
6164 * know there are new descriptors to fetch. (Only
6165 * applicable for weak-ordered memory model archs,
6166 * such as IA-64). */
6167 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006168 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006169 }
6170}
6171
6172/**
6173 * igb_mii_ioctl -
6174 * @netdev:
6175 * @ifreq:
6176 * @cmd:
6177 **/
6178static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6179{
6180 struct igb_adapter *adapter = netdev_priv(netdev);
6181 struct mii_ioctl_data *data = if_mii(ifr);
6182
6183 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6184 return -EOPNOTSUPP;
6185
6186 switch (cmd) {
6187 case SIOCGMIIPHY:
6188 data->phy_id = adapter->hw.phy.addr;
6189 break;
6190 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006191 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6192 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006193 return -EIO;
6194 break;
6195 case SIOCSMIIREG:
6196 default:
6197 return -EOPNOTSUPP;
6198 }
6199 return 0;
6200}
6201
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006202/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006203 * igb_ioctl -
6204 * @netdev:
6205 * @ifreq:
6206 * @cmd:
6207 **/
6208static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6209{
6210 switch (cmd) {
6211 case SIOCGMIIPHY:
6212 case SIOCGMIIREG:
6213 case SIOCSMIIREG:
6214 return igb_mii_ioctl(netdev, ifr, cmd);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00006215#ifdef CONFIG_IGB_PTP
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006216 case SIOCSHWTSTAMP:
Matthew Vicka79f4f82012-08-10 05:40:44 +00006217 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
Matthew Vick3c89f6d2012-08-10 05:40:43 +00006218#endif /* CONFIG_IGB_PTP */
Auke Kok9d5c8242008-01-24 02:22:38 -08006219 default:
6220 return -EOPNOTSUPP;
6221 }
6222}
6223
Alexander Duyck009bc062009-07-23 18:08:35 +00006224s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6225{
6226 struct igb_adapter *adapter = hw->back;
6227 u16 cap_offset;
6228
Jon Masonbdaae042011-06-27 07:44:01 +00006229 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006230 if (!cap_offset)
6231 return -E1000_ERR_CONFIG;
6232
6233 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6234
6235 return 0;
6236}
6237
6238s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6239{
6240 struct igb_adapter *adapter = hw->back;
6241 u16 cap_offset;
6242
Jon Masonbdaae042011-06-27 07:44:01 +00006243 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006244 if (!cap_offset)
6245 return -E1000_ERR_CONFIG;
6246
6247 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6248
6249 return 0;
6250}
6251
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006252static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006253{
6254 struct igb_adapter *adapter = netdev_priv(netdev);
6255 struct e1000_hw *hw = &adapter->hw;
6256 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006257 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006258
Alexander Duyck5faf0302011-08-26 07:46:08 +00006259 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006260 /* enable VLAN tag insert/strip */
6261 ctrl = rd32(E1000_CTRL);
6262 ctrl |= E1000_CTRL_VME;
6263 wr32(E1000_CTRL, ctrl);
6264
Alexander Duyck51466232009-10-27 23:47:35 +00006265 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006266 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006267 rctl &= ~E1000_RCTL_CFIEN;
6268 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006269 } else {
6270 /* disable VLAN tag insert/strip */
6271 ctrl = rd32(E1000_CTRL);
6272 ctrl &= ~E1000_CTRL_VME;
6273 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006274 }
6275
Alexander Duycke1739522009-02-19 20:39:44 -08006276 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006277}
6278
Jiri Pirko8e586132011-12-08 19:52:37 -05006279static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006280{
6281 struct igb_adapter *adapter = netdev_priv(netdev);
6282 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006283 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006284
Alexander Duyck51466232009-10-27 23:47:35 +00006285 /* attempt to add filter to vlvf array */
6286 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006287
Alexander Duyck51466232009-10-27 23:47:35 +00006288 /* add the filter since PF can receive vlans w/o entry in vlvf */
6289 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006290
6291 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006292
6293 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006294}
6295
Jiri Pirko8e586132011-12-08 19:52:37 -05006296static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006297{
6298 struct igb_adapter *adapter = netdev_priv(netdev);
6299 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006300 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006301 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006302
Alexander Duyck51466232009-10-27 23:47:35 +00006303 /* remove vlan from VLVF table array */
6304 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006305
Alexander Duyck51466232009-10-27 23:47:35 +00006306 /* if vid was not present in VLVF just remove it from table */
6307 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006308 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006309
6310 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006311
6312 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006313}
6314
6315static void igb_restore_vlan(struct igb_adapter *adapter)
6316{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006317 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006318
Alexander Duyck5faf0302011-08-26 07:46:08 +00006319 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6320
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006321 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6322 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006323}
6324
David Decotigny14ad2512011-04-27 18:32:43 +00006325int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006326{
Alexander Duyck090b1792009-10-27 23:51:55 +00006327 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006328 struct e1000_mac_info *mac = &adapter->hw.mac;
6329
6330 mac->autoneg = 0;
6331
David Decotigny14ad2512011-04-27 18:32:43 +00006332 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6333 * for the switch() below to work */
6334 if ((spd & 1) || (dplx & ~1))
6335 goto err_inval;
6336
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006337 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6338 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006339 spd != SPEED_1000 &&
6340 dplx != DUPLEX_FULL)
6341 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006342
David Decotigny14ad2512011-04-27 18:32:43 +00006343 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006344 case SPEED_10 + DUPLEX_HALF:
6345 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6346 break;
6347 case SPEED_10 + DUPLEX_FULL:
6348 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6349 break;
6350 case SPEED_100 + DUPLEX_HALF:
6351 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6352 break;
6353 case SPEED_100 + DUPLEX_FULL:
6354 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6355 break;
6356 case SPEED_1000 + DUPLEX_FULL:
6357 mac->autoneg = 1;
6358 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6359 break;
6360 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6361 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006362 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006363 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00006364
6365 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6366 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6367
Auke Kok9d5c8242008-01-24 02:22:38 -08006368 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006369
6370err_inval:
6371 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6372 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006373}
6374
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006375static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6376 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006377{
6378 struct net_device *netdev = pci_get_drvdata(pdev);
6379 struct igb_adapter *adapter = netdev_priv(netdev);
6380 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006381 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006382 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006383#ifdef CONFIG_PM
6384 int retval = 0;
6385#endif
6386
6387 netif_device_detach(netdev);
6388
Alexander Duycka88f10e2008-07-08 15:13:38 -07006389 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006390 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006391
Alexander Duyck047e0032009-10-27 15:49:27 +00006392 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006393
6394#ifdef CONFIG_PM
6395 retval = pci_save_state(pdev);
6396 if (retval)
6397 return retval;
6398#endif
6399
6400 status = rd32(E1000_STATUS);
6401 if (status & E1000_STATUS_LU)
6402 wufc &= ~E1000_WUFC_LNKC;
6403
6404 if (wufc) {
6405 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006406 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006407
6408 /* turn on all-multi mode if wake on multicast is enabled */
6409 if (wufc & E1000_WUFC_MC) {
6410 rctl = rd32(E1000_RCTL);
6411 rctl |= E1000_RCTL_MPE;
6412 wr32(E1000_RCTL, rctl);
6413 }
6414
6415 ctrl = rd32(E1000_CTRL);
6416 /* advertise wake from D3Cold */
6417 #define E1000_CTRL_ADVD3WUC 0x00100000
6418 /* phy power management enable */
6419 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6420 ctrl |= E1000_CTRL_ADVD3WUC;
6421 wr32(E1000_CTRL, ctrl);
6422
Auke Kok9d5c8242008-01-24 02:22:38 -08006423 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006424 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006425
6426 wr32(E1000_WUC, E1000_WUC_PME_EN);
6427 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006428 } else {
6429 wr32(E1000_WUC, 0);
6430 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006431 }
6432
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006433 *enable_wake = wufc || adapter->en_mng_pt;
6434 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006435 igb_power_down_link(adapter);
6436 else
6437 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006438
6439 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6440 * would have already happened in close and is redundant. */
6441 igb_release_hw_control(adapter);
6442
6443 pci_disable_device(pdev);
6444
Auke Kok9d5c8242008-01-24 02:22:38 -08006445 return 0;
6446}
6447
6448#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006449#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006450static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006451{
6452 int retval;
6453 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006454 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006455
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006456 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006457 if (retval)
6458 return retval;
6459
6460 if (wake) {
6461 pci_prepare_to_sleep(pdev);
6462 } else {
6463 pci_wake_from_d3(pdev, false);
6464 pci_set_power_state(pdev, PCI_D3hot);
6465 }
6466
6467 return 0;
6468}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006469#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006470
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006471static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006472{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006473 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006474 struct net_device *netdev = pci_get_drvdata(pdev);
6475 struct igb_adapter *adapter = netdev_priv(netdev);
6476 struct e1000_hw *hw = &adapter->hw;
6477 u32 err;
6478
6479 pci_set_power_state(pdev, PCI_D0);
6480 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006481 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006482
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006483 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006484 if (err) {
6485 dev_err(&pdev->dev,
6486 "igb: Cannot enable PCI device from suspend\n");
6487 return err;
6488 }
6489 pci_set_master(pdev);
6490
6491 pci_enable_wake(pdev, PCI_D3hot, 0);
6492 pci_enable_wake(pdev, PCI_D3cold, 0);
6493
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00006494 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006495 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6496 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006497 }
6498
Auke Kok9d5c8242008-01-24 02:22:38 -08006499 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006500
6501 /* let the f/w know that the h/w is now under the control of the
6502 * driver. */
6503 igb_get_hw_control(adapter);
6504
Auke Kok9d5c8242008-01-24 02:22:38 -08006505 wr32(E1000_WUS, ~0);
6506
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006507 if (netdev->flags & IFF_UP) {
6508 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006509 if (err)
6510 return err;
6511 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006512
6513 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006514 return 0;
6515}
6516
6517#ifdef CONFIG_PM_RUNTIME
6518static int igb_runtime_idle(struct device *dev)
6519{
6520 struct pci_dev *pdev = to_pci_dev(dev);
6521 struct net_device *netdev = pci_get_drvdata(pdev);
6522 struct igb_adapter *adapter = netdev_priv(netdev);
6523
6524 if (!igb_has_link(adapter))
6525 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6526
6527 return -EBUSY;
6528}
6529
6530static int igb_runtime_suspend(struct device *dev)
6531{
6532 struct pci_dev *pdev = to_pci_dev(dev);
6533 int retval;
6534 bool wake;
6535
6536 retval = __igb_shutdown(pdev, &wake, 1);
6537 if (retval)
6538 return retval;
6539
6540 if (wake) {
6541 pci_prepare_to_sleep(pdev);
6542 } else {
6543 pci_wake_from_d3(pdev, false);
6544 pci_set_power_state(pdev, PCI_D3hot);
6545 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006546
Auke Kok9d5c8242008-01-24 02:22:38 -08006547 return 0;
6548}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006549
6550static int igb_runtime_resume(struct device *dev)
6551{
6552 return igb_resume(dev);
6553}
6554#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006555#endif
6556
6557static void igb_shutdown(struct pci_dev *pdev)
6558{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006559 bool wake;
6560
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006561 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006562
6563 if (system_state == SYSTEM_POWER_OFF) {
6564 pci_wake_from_d3(pdev, wake);
6565 pci_set_power_state(pdev, PCI_D3hot);
6566 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006567}
6568
6569#ifdef CONFIG_NET_POLL_CONTROLLER
6570/*
6571 * Polling 'interrupt' - used by things like netconsole to send skbs
6572 * without having to re-enable interrupts. It's not called while
6573 * the interrupt routine is executing.
6574 */
6575static void igb_netpoll(struct net_device *netdev)
6576{
6577 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006578 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006579 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006580 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006581
Alexander Duyck047e0032009-10-27 15:49:27 +00006582 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006583 q_vector = adapter->q_vector[i];
6584 if (adapter->msix_entries)
6585 wr32(E1000_EIMC, q_vector->eims_value);
6586 else
6587 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006588 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006589 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006590}
6591#endif /* CONFIG_NET_POLL_CONTROLLER */
6592
6593/**
6594 * igb_io_error_detected - called when PCI error is detected
6595 * @pdev: Pointer to PCI device
6596 * @state: The current pci connection state
6597 *
6598 * This function is called after a PCI bus error affecting
6599 * this device has been detected.
6600 */
6601static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6602 pci_channel_state_t state)
6603{
6604 struct net_device *netdev = pci_get_drvdata(pdev);
6605 struct igb_adapter *adapter = netdev_priv(netdev);
6606
6607 netif_device_detach(netdev);
6608
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006609 if (state == pci_channel_io_perm_failure)
6610 return PCI_ERS_RESULT_DISCONNECT;
6611
Auke Kok9d5c8242008-01-24 02:22:38 -08006612 if (netif_running(netdev))
6613 igb_down(adapter);
6614 pci_disable_device(pdev);
6615
6616 /* Request a slot slot reset. */
6617 return PCI_ERS_RESULT_NEED_RESET;
6618}
6619
6620/**
6621 * igb_io_slot_reset - called after the pci bus has been reset.
6622 * @pdev: Pointer to PCI device
6623 *
6624 * Restart the card from scratch, as if from a cold-boot. Implementation
6625 * resembles the first-half of the igb_resume routine.
6626 */
6627static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6628{
6629 struct net_device *netdev = pci_get_drvdata(pdev);
6630 struct igb_adapter *adapter = netdev_priv(netdev);
6631 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006632 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006633 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006634
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006635 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006636 dev_err(&pdev->dev,
6637 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006638 result = PCI_ERS_RESULT_DISCONNECT;
6639 } else {
6640 pci_set_master(pdev);
6641 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006642 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006643
6644 pci_enable_wake(pdev, PCI_D3hot, 0);
6645 pci_enable_wake(pdev, PCI_D3cold, 0);
6646
6647 igb_reset(adapter);
6648 wr32(E1000_WUS, ~0);
6649 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006650 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006651
Jeff Kirsherea943d42008-12-11 20:34:19 -08006652 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6653 if (err) {
6654 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6655 "failed 0x%0x\n", err);
6656 /* non-fatal, continue */
6657 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006658
Alexander Duyck40a914f2008-11-27 00:24:37 -08006659 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006660}
6661
6662/**
6663 * igb_io_resume - called when traffic can start flowing again.
6664 * @pdev: Pointer to PCI device
6665 *
6666 * This callback is called when the error recovery driver tells us that
6667 * its OK to resume normal operation. Implementation resembles the
6668 * second-half of the igb_resume routine.
6669 */
6670static void igb_io_resume(struct pci_dev *pdev)
6671{
6672 struct net_device *netdev = pci_get_drvdata(pdev);
6673 struct igb_adapter *adapter = netdev_priv(netdev);
6674
Auke Kok9d5c8242008-01-24 02:22:38 -08006675 if (netif_running(netdev)) {
6676 if (igb_up(adapter)) {
6677 dev_err(&pdev->dev, "igb_up failed after reset\n");
6678 return;
6679 }
6680 }
6681
6682 netif_device_attach(netdev);
6683
6684 /* let the f/w know that the h/w is now under the control of the
6685 * driver. */
6686 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006687}
6688
Alexander Duyck26ad9172009-10-05 06:32:49 +00006689static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6690 u8 qsel)
6691{
6692 u32 rar_low, rar_high;
6693 struct e1000_hw *hw = &adapter->hw;
6694
6695 /* HW expects these in little endian so we reverse the byte order
6696 * from network order (big endian) to little endian
6697 */
6698 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6699 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6700 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6701
6702 /* Indicate to hardware the Address is Valid. */
6703 rar_high |= E1000_RAH_AV;
6704
6705 if (hw->mac.type == e1000_82575)
6706 rar_high |= E1000_RAH_POOL_1 * qsel;
6707 else
6708 rar_high |= E1000_RAH_POOL_1 << qsel;
6709
6710 wr32(E1000_RAL(index), rar_low);
6711 wrfl();
6712 wr32(E1000_RAH(index), rar_high);
6713 wrfl();
6714}
6715
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006716static int igb_set_vf_mac(struct igb_adapter *adapter,
6717 int vf, unsigned char *mac_addr)
6718{
6719 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006720 /* VF MAC addresses start at end of receive addresses and moves
6721 * torwards the first, as a result a collision should not be possible */
6722 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006723
Alexander Duyck37680112009-02-19 20:40:30 -08006724 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006725
Alexander Duyck26ad9172009-10-05 06:32:49 +00006726 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006727
6728 return 0;
6729}
6730
Williams, Mitch A8151d292010-02-10 01:44:24 +00006731static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6732{
6733 struct igb_adapter *adapter = netdev_priv(netdev);
6734 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6735 return -EINVAL;
6736 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6737 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6738 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6739 " change effective.");
6740 if (test_bit(__IGB_DOWN, &adapter->state)) {
6741 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6742 " but the PF device is not up.\n");
6743 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6744 " attempting to use the VF device.\n");
6745 }
6746 return igb_set_vf_mac(adapter, vf, mac);
6747}
6748
Lior Levy17dc5662011-02-08 02:28:46 +00006749static int igb_link_mbps(int internal_link_speed)
6750{
6751 switch (internal_link_speed) {
6752 case SPEED_100:
6753 return 100;
6754 case SPEED_1000:
6755 return 1000;
6756 default:
6757 return 0;
6758 }
6759}
6760
6761static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6762 int link_speed)
6763{
6764 int rf_dec, rf_int;
6765 u32 bcnrc_val;
6766
6767 if (tx_rate != 0) {
6768 /* Calculate the rate factor values to set */
6769 rf_int = link_speed / tx_rate;
6770 rf_dec = (link_speed - (rf_int * tx_rate));
6771 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6772
6773 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6774 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6775 E1000_RTTBCNRC_RF_INT_MASK);
6776 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6777 } else {
6778 bcnrc_val = 0;
6779 }
6780
6781 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Lior Levyf00b0da2011-06-04 06:05:03 +00006782 /*
6783 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6784 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
6785 */
6786 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00006787 wr32(E1000_RTTBCNRC, bcnrc_val);
6788}
6789
6790static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6791{
6792 int actual_link_speed, i;
6793 bool reset_rate = false;
6794
6795 /* VF TX rate limit was not set or not supported */
6796 if ((adapter->vf_rate_link_speed == 0) ||
6797 (adapter->hw.mac.type != e1000_82576))
6798 return;
6799
6800 actual_link_speed = igb_link_mbps(adapter->link_speed);
6801 if (actual_link_speed != adapter->vf_rate_link_speed) {
6802 reset_rate = true;
6803 adapter->vf_rate_link_speed = 0;
6804 dev_info(&adapter->pdev->dev,
6805 "Link speed has been changed. VF Transmit "
6806 "rate is disabled\n");
6807 }
6808
6809 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6810 if (reset_rate)
6811 adapter->vf_data[i].tx_rate = 0;
6812
6813 igb_set_vf_rate_limit(&adapter->hw, i,
6814 adapter->vf_data[i].tx_rate,
6815 actual_link_speed);
6816 }
6817}
6818
Williams, Mitch A8151d292010-02-10 01:44:24 +00006819static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6820{
Lior Levy17dc5662011-02-08 02:28:46 +00006821 struct igb_adapter *adapter = netdev_priv(netdev);
6822 struct e1000_hw *hw = &adapter->hw;
6823 int actual_link_speed;
6824
6825 if (hw->mac.type != e1000_82576)
6826 return -EOPNOTSUPP;
6827
6828 actual_link_speed = igb_link_mbps(adapter->link_speed);
6829 if ((vf >= adapter->vfs_allocated_count) ||
6830 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6831 (tx_rate < 0) || (tx_rate > actual_link_speed))
6832 return -EINVAL;
6833
6834 adapter->vf_rate_link_speed = actual_link_speed;
6835 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6836 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6837
6838 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006839}
6840
6841static int igb_ndo_get_vf_config(struct net_device *netdev,
6842 int vf, struct ifla_vf_info *ivi)
6843{
6844 struct igb_adapter *adapter = netdev_priv(netdev);
6845 if (vf >= adapter->vfs_allocated_count)
6846 return -EINVAL;
6847 ivi->vf = vf;
6848 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006849 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006850 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6851 ivi->qos = adapter->vf_data[vf].pf_qos;
6852 return 0;
6853}
6854
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006855static void igb_vmm_control(struct igb_adapter *adapter)
6856{
6857 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006858 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006859
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006860 switch (hw->mac.type) {
6861 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00006862 case e1000_i210:
6863 case e1000_i211:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006864 default:
6865 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006866 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006867 case e1000_82576:
6868 /* notify HW that the MAC is adding vlan tags */
6869 reg = rd32(E1000_DTXCTL);
6870 reg |= E1000_DTXCTL_VLAN_ADDED;
6871 wr32(E1000_DTXCTL, reg);
6872 case e1000_82580:
6873 /* enable replication vlan tag stripping */
6874 reg = rd32(E1000_RPLOLR);
6875 reg |= E1000_RPLOLR_STRVLAN;
6876 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006877 case e1000_i350:
6878 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006879 break;
6880 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006881
Alexander Duyckd4960302009-10-27 15:53:45 +00006882 if (adapter->vfs_allocated_count) {
6883 igb_vmdq_set_loopback_pf(hw, true);
6884 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006885 igb_vmdq_set_anti_spoofing_pf(hw, true,
6886 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006887 } else {
6888 igb_vmdq_set_loopback_pf(hw, false);
6889 igb_vmdq_set_replication_pf(hw, false);
6890 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006891}
6892
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006893static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
6894{
6895 struct e1000_hw *hw = &adapter->hw;
6896 u32 dmac_thr;
6897 u16 hwm;
6898
6899 if (hw->mac.type > e1000_82580) {
6900 if (adapter->flags & IGB_FLAG_DMAC) {
6901 u32 reg;
6902
6903 /* force threshold to 0. */
6904 wr32(E1000_DMCTXTH, 0);
6905
6906 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00006907 * DMA Coalescing high water mark needs to be greater
6908 * than the Rx threshold. Set hwm to PBA - max frame
6909 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006910 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00006911 hwm = 64 * pba - adapter->max_frame_size / 16;
6912 if (hwm < 64 * (pba - 6))
6913 hwm = 64 * (pba - 6);
6914 reg = rd32(E1000_FCRTC);
6915 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
6916 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
6917 & E1000_FCRTC_RTH_COAL_MASK);
6918 wr32(E1000_FCRTC, reg);
6919
6920 /*
6921 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
6922 * frame size, capping it at PBA - 10KB.
6923 */
6924 dmac_thr = pba - adapter->max_frame_size / 512;
6925 if (dmac_thr < pba - 10)
6926 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006927 reg = rd32(E1000_DMACR);
6928 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006929 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
6930 & E1000_DMACR_DMACTHR_MASK);
6931
6932 /* transition to L0x or L1 if available..*/
6933 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
6934
6935 /* watchdog timer= +-1000 usec in 32usec intervals */
6936 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00006937
6938 /* Disable BMC-to-OS Watchdog Enable */
6939 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006940 wr32(E1000_DMACR, reg);
6941
6942 /*
6943 * no lower threshold to disable
6944 * coalescing(smart fifb)-UTRESH=0
6945 */
6946 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00006947
6948 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
6949
6950 wr32(E1000_DMCTLX, reg);
6951
6952 /*
6953 * free space in tx packet buffer to wake from
6954 * DMA coal
6955 */
6956 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
6957 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
6958
6959 /*
6960 * make low power state decision controlled
6961 * by DMA coal
6962 */
6963 reg = rd32(E1000_PCIEMISC);
6964 reg &= ~E1000_PCIEMISC_LX_DECISION;
6965 wr32(E1000_PCIEMISC, reg);
6966 } /* endif adapter->dmac is not disabled */
6967 } else if (hw->mac.type == e1000_82580) {
6968 u32 reg = rd32(E1000_PCIEMISC);
6969 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
6970 wr32(E1000_DMACR, 0);
6971 }
6972}
6973
Auke Kok9d5c8242008-01-24 02:22:38 -08006974/* igb_main.c */