blob: 7f6cf654b30756684989165b441b3fec91f6c2be [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +00004 Copyright(c) 2007-2013 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +000060#include <linux/i2c.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080061#include "igb.h"
62
Carolyn Wyborny67b1b902013-04-17 16:44:53 +000063#define MAJ 5
64#define MIN 0
65#define BUILD 3
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080066#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000067__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080068char igb_driver_name[] = "igb";
69char igb_driver_version[] = DRV_VERSION;
70static const char igb_driver_string[] =
71 "Intel(R) Gigabit Ethernet Network Driver";
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +000072static const char igb_copyright[] =
73 "Copyright (c) 2007-2013 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080074
Auke Kok9d5c8242008-01-24 02:22:38 -080075static const struct e1000_info *igb_info_tbl[] = {
76 [board_82575] = &e1000_82575_info,
77};
78
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000079static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Carolyn Wybornyceb5f132013-04-18 22:21:30 +000080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Carolyn Wyborny53b87ce2013-07-16 19:18:36 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +0000100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +0000102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +0000105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +0000106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
115 /* required last entry */
116 {0, }
117};
118
119MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
120
121void igb_reset(struct igb_adapter *);
122static int igb_setup_all_tx_resources(struct igb_adapter *);
123static int igb_setup_all_rx_resources(struct igb_adapter *);
124static void igb_free_all_tx_resources(struct igb_adapter *);
125static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000126static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static int igb_probe(struct pci_dev *, const struct pci_device_id *);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500128static void igb_remove(struct pci_dev *pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -0800129static int igb_sw_init(struct igb_adapter *);
130static int igb_open(struct net_device *);
131static int igb_close(struct net_device *);
Stefan Assmann53c7d062012-12-04 06:00:12 +0000132static void igb_configure(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800133static void igb_configure_tx(struct igb_adapter *);
134static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800135static void igb_clean_all_tx_rings(struct igb_adapter *);
136static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700137static void igb_clean_tx_ring(struct igb_ring *);
138static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000139static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800140static void igb_update_phy_info(unsigned long);
141static void igb_watchdog(unsigned long);
142static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000143static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000144static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
145 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800146static int igb_change_mtu(struct net_device *, int);
147static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000148static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static irqreturn_t igb_intr(int irq, void *);
150static irqreturn_t igb_intr_msi(int irq, void *);
151static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000152static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700153#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000154static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700155static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700156#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700157static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000158static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000159static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800160static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
161static void igb_tx_timeout(struct net_device *);
162static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000163static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000164static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
165static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800166static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000167static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800168static void igb_ping_all_vfs(struct igb_adapter *);
169static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800170static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000171static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800172static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000173static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
174static int igb_ndo_set_vf_vlan(struct net_device *netdev,
175 int vf, u16 vlan, u8 qos);
176static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
Lior Levy70ea4782013-03-03 20:27:48 +0000177static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
178 bool setting);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000179static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
180 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000181static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000182
183#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000184static int igb_vf_configure(struct igb_adapter *adapter, int vf);
RongQing Li46a01692011-10-18 22:52:35 +0000185#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800186
Auke Kok9d5c8242008-01-24 02:22:38 -0800187#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000188#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000189static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000190#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000191static int igb_resume(struct device *);
192#ifdef CONFIG_PM_RUNTIME
193static int igb_runtime_suspend(struct device *dev);
194static int igb_runtime_resume(struct device *dev);
195static int igb_runtime_idle(struct device *dev);
196#endif
197static const struct dev_pm_ops igb_pm_ops = {
198 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
199 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
200 igb_runtime_idle)
201};
Auke Kok9d5c8242008-01-24 02:22:38 -0800202#endif
203static void igb_shutdown(struct pci_dev *);
Greg Rosefa44f2f2013-01-17 01:03:06 -0800204static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700205#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700206static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
207static struct notifier_block dca_notifier = {
208 .notifier_call = igb_notify_dca,
209 .next = NULL,
210 .priority = 0
211};
212#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800213#ifdef CONFIG_NET_POLL_CONTROLLER
214/* for netdump / net console */
215static void igb_netpoll(struct net_device *);
216#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800217#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000218static unsigned int max_vfs = 0;
219module_param(max_vfs, uint, 0);
220MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
221 "per physical function");
222#endif /* CONFIG_PCI_IOV */
223
Auke Kok9d5c8242008-01-24 02:22:38 -0800224static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
225 pci_channel_state_t);
226static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
227static void igb_io_resume(struct pci_dev *);
228
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700229static const struct pci_error_handlers igb_err_handler = {
Auke Kok9d5c8242008-01-24 02:22:38 -0800230 .error_detected = igb_io_error_detected,
231 .slot_reset = igb_io_slot_reset,
232 .resume = igb_io_resume,
233};
234
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000235static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800236
237static struct pci_driver igb_driver = {
238 .name = igb_driver_name,
239 .id_table = igb_pci_tbl,
240 .probe = igb_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500241 .remove = igb_remove,
Auke Kok9d5c8242008-01-24 02:22:38 -0800242#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000243 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800244#endif
245 .shutdown = igb_shutdown,
Greg Rosefa44f2f2013-01-17 01:03:06 -0800246 .sriov_configure = igb_pci_sriov_configure,
Auke Kok9d5c8242008-01-24 02:22:38 -0800247 .err_handler = &igb_err_handler
248};
249
250MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
251MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
252MODULE_LICENSE("GPL");
253MODULE_VERSION(DRV_VERSION);
254
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000255#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
256static int debug = -1;
257module_param(debug, int, 0);
258MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
259
Taku Izumic97ec422010-04-27 14:39:30 +0000260struct igb_reg_info {
261 u32 ofs;
262 char *name;
263};
264
265static const struct igb_reg_info igb_reg_info_tbl[] = {
266
267 /* General Registers */
268 {E1000_CTRL, "CTRL"},
269 {E1000_STATUS, "STATUS"},
270 {E1000_CTRL_EXT, "CTRL_EXT"},
271
272 /* Interrupt Registers */
273 {E1000_ICR, "ICR"},
274
275 /* RX Registers */
276 {E1000_RCTL, "RCTL"},
277 {E1000_RDLEN(0), "RDLEN"},
278 {E1000_RDH(0), "RDH"},
279 {E1000_RDT(0), "RDT"},
280 {E1000_RXDCTL(0), "RXDCTL"},
281 {E1000_RDBAL(0), "RDBAL"},
282 {E1000_RDBAH(0), "RDBAH"},
283
284 /* TX Registers */
285 {E1000_TCTL, "TCTL"},
286 {E1000_TDBAL(0), "TDBAL"},
287 {E1000_TDBAH(0), "TDBAH"},
288 {E1000_TDLEN(0), "TDLEN"},
289 {E1000_TDH(0), "TDH"},
290 {E1000_TDT(0), "TDT"},
291 {E1000_TXDCTL(0), "TXDCTL"},
292 {E1000_TDFH, "TDFH"},
293 {E1000_TDFT, "TDFT"},
294 {E1000_TDFHS, "TDFHS"},
295 {E1000_TDFPC, "TDFPC"},
296
297 /* List Terminator */
298 {}
299};
300
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000301/* igb_regdump - register printout routine */
Taku Izumic97ec422010-04-27 14:39:30 +0000302static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
303{
304 int n = 0;
305 char rname[16];
306 u32 regs[8];
307
308 switch (reginfo->ofs) {
309 case E1000_RDLEN(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDLEN(n));
312 break;
313 case E1000_RDH(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RDH(n));
316 break;
317 case E1000_RDT(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_RDT(n));
320 break;
321 case E1000_RXDCTL(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RXDCTL(n));
324 break;
325 case E1000_RDBAL(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDBAL(n));
328 break;
329 case E1000_RDBAH(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_RDBAH(n));
332 break;
333 case E1000_TDBAL(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_RDBAL(n));
336 break;
337 case E1000_TDBAH(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TDBAH(n));
340 break;
341 case E1000_TDLEN(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_TDLEN(n));
344 break;
345 case E1000_TDH(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_TDH(n));
348 break;
349 case E1000_TDT(0):
350 for (n = 0; n < 4; n++)
351 regs[n] = rd32(E1000_TDT(n));
352 break;
353 case E1000_TXDCTL(0):
354 for (n = 0; n < 4; n++)
355 regs[n] = rd32(E1000_TXDCTL(n));
356 break;
357 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000358 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000359 return;
360 }
361
362 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000363 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
364 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000365}
366
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000367/* igb_dump - Print registers, Tx-rings and Rx-rings */
Taku Izumic97ec422010-04-27 14:39:30 +0000368static void igb_dump(struct igb_adapter *adapter)
369{
370 struct net_device *netdev = adapter->netdev;
371 struct e1000_hw *hw = &adapter->hw;
372 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000373 struct igb_ring *tx_ring;
374 union e1000_adv_tx_desc *tx_desc;
375 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000376 struct igb_ring *rx_ring;
377 union e1000_adv_rx_desc *rx_desc;
378 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000379 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000380
381 if (!netif_msg_hw(adapter))
382 return;
383
384 /* Print netdevice Info */
385 if (netdev) {
386 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000387 pr_info("Device Name state trans_start "
388 "last_rx\n");
389 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
390 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000391 }
392
393 /* Print Registers */
394 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000395 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000396 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
397 reginfo->name; reginfo++) {
398 igb_regdump(hw, reginfo);
399 }
400
401 /* Print TX Ring Summary */
402 if (!netdev || !netif_running(netdev))
403 goto exit;
404
405 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000406 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000407 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000408 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000409 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000410 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000411 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
412 n, tx_ring->next_to_use, tx_ring->next_to_clean,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000413 (u64)dma_unmap_addr(buffer_info, dma),
414 dma_unmap_len(buffer_info, len),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000415 buffer_info->next_to_watch,
416 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000417 }
418
419 /* Print TX Rings */
420 if (!netif_msg_tx_done(adapter))
421 goto rx_ring_summary;
422
423 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
424
425 /* Transmit Descriptor Formats
426 *
427 * Advanced Transmit Descriptor
428 * +--------------------------------------------------------------+
429 * 0 | Buffer Address [63:0] |
430 * +--------------------------------------------------------------+
431 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
432 * +--------------------------------------------------------------+
433 * 63 46 45 40 39 38 36 35 32 31 24 15 0
434 */
435
436 for (n = 0; n < adapter->num_tx_queues; n++) {
437 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000438 pr_info("------------------------------------\n");
439 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
440 pr_info("------------------------------------\n");
441 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
442 "[bi->dma ] leng ntw timestamp "
443 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000444
445 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000446 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000447 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000448 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000449 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000450 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000451 if (i == tx_ring->next_to_use &&
452 i == tx_ring->next_to_clean)
453 next_desc = " NTC/U";
454 else if (i == tx_ring->next_to_use)
455 next_desc = " NTU";
456 else if (i == tx_ring->next_to_clean)
457 next_desc = " NTC";
458 else
459 next_desc = "";
460
461 pr_info("T [0x%03X] %016llX %016llX %016llX"
462 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000463 le64_to_cpu(u0->a),
464 le64_to_cpu(u0->b),
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000465 (u64)dma_unmap_addr(buffer_info, dma),
466 dma_unmap_len(buffer_info, len),
Taku Izumic97ec422010-04-27 14:39:30 +0000467 buffer_info->next_to_watch,
468 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000469 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000470
Emil Tantilovb6695882012-07-28 05:07:48 +0000471 if (netif_msg_pktdata(adapter) && buffer_info->skb)
Taku Izumic97ec422010-04-27 14:39:30 +0000472 print_hex_dump(KERN_INFO, "",
473 DUMP_PREFIX_ADDRESS,
Emil Tantilovb6695882012-07-28 05:07:48 +0000474 16, 1, buffer_info->skb->data,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000475 dma_unmap_len(buffer_info, len),
476 true);
Taku Izumic97ec422010-04-27 14:39:30 +0000477 }
478 }
479
480 /* Print RX Rings Summary */
481rx_ring_summary:
482 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000483 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000484 for (n = 0; n < adapter->num_rx_queues; n++) {
485 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000486 pr_info(" %5d %5X %5X\n",
487 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000488 }
489
490 /* Print RX Rings */
491 if (!netif_msg_rx_status(adapter))
492 goto exit;
493
494 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
495
496 /* Advanced Receive Descriptor (Read) Format
497 * 63 1 0
498 * +-----------------------------------------------------+
499 * 0 | Packet Buffer Address [63:1] |A0/NSE|
500 * +----------------------------------------------+------+
501 * 8 | Header Buffer Address [63:1] | DD |
502 * +-----------------------------------------------------+
503 *
504 *
505 * Advanced Receive Descriptor (Write-Back) Format
506 *
507 * 63 48 47 32 31 30 21 20 17 16 4 3 0
508 * +------------------------------------------------------+
509 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
510 * | Checksum Ident | | | | Type | Type |
511 * +------------------------------------------------------+
512 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
513 * +------------------------------------------------------+
514 * 63 48 47 32 31 20 19 0
515 */
516
517 for (n = 0; n < adapter->num_rx_queues; n++) {
518 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000519 pr_info("------------------------------------\n");
520 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
521 pr_info("------------------------------------\n");
522 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
523 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
524 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
525 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000526
527 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000528 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000529 struct igb_rx_buffer *buffer_info;
530 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000531 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000532 u0 = (struct my_u0 *)rx_desc;
533 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000534
535 if (i == rx_ring->next_to_use)
536 next_desc = " NTU";
537 else if (i == rx_ring->next_to_clean)
538 next_desc = " NTC";
539 else
540 next_desc = "";
541
Taku Izumic97ec422010-04-27 14:39:30 +0000542 if (staterr & E1000_RXD_STAT_DD) {
543 /* Descriptor Done */
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000544 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
545 "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000546 le64_to_cpu(u0->a),
547 le64_to_cpu(u0->b),
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000548 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000549 } else {
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000550 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
551 "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000552 le64_to_cpu(u0->a),
553 le64_to_cpu(u0->b),
554 (u64)buffer_info->dma,
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000555 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000556
Emil Tantilovb6695882012-07-28 05:07:48 +0000557 if (netif_msg_pktdata(adapter) &&
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000558 buffer_info->dma && buffer_info->page) {
Alexander Duyck44390ca2011-08-26 07:43:38 +0000559 print_hex_dump(KERN_INFO, "",
560 DUMP_PREFIX_ADDRESS,
561 16, 1,
Emil Tantilovb6695882012-07-28 05:07:48 +0000562 page_address(buffer_info->page) +
563 buffer_info->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +0000564 IGB_RX_BUFSZ, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000565 }
566 }
Taku Izumic97ec422010-04-27 14:39:30 +0000567 }
568 }
569
570exit:
571 return;
572}
573
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000574/**
575 * igb_get_i2c_data - Reads the I2C SDA data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000576 * @hw: pointer to hardware structure
577 * @i2cctl: Current value of I2CCTL register
578 *
579 * Returns the I2C data bit value
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000580 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000581static int igb_get_i2c_data(void *data)
582{
583 struct igb_adapter *adapter = (struct igb_adapter *)data;
584 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS);
586
587 return ((i2cctl & E1000_I2C_DATA_IN) != 0);
588}
589
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000590/**
591 * igb_set_i2c_data - Sets the I2C data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000592 * @data: pointer to hardware structure
593 * @state: I2C data value (0 or 1) to set
594 *
595 * Sets the I2C data bit
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000596 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000597static void igb_set_i2c_data(void *data, int state)
598{
599 struct igb_adapter *adapter = (struct igb_adapter *)data;
600 struct e1000_hw *hw = &adapter->hw;
601 s32 i2cctl = rd32(E1000_I2CPARAMS);
602
603 if (state)
604 i2cctl |= E1000_I2C_DATA_OUT;
605 else
606 i2cctl &= ~E1000_I2C_DATA_OUT;
607
608 i2cctl &= ~E1000_I2C_DATA_OE_N;
609 i2cctl |= E1000_I2C_CLK_OE_N;
610 wr32(E1000_I2CPARAMS, i2cctl);
611 wrfl();
612
613}
614
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000615/**
616 * igb_set_i2c_clk - Sets the I2C SCL clock
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000617 * @data: pointer to hardware structure
618 * @state: state to set clock
619 *
620 * Sets the I2C clock line to state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000621 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000622static void igb_set_i2c_clk(void *data, int state)
623{
624 struct igb_adapter *adapter = (struct igb_adapter *)data;
625 struct e1000_hw *hw = &adapter->hw;
626 s32 i2cctl = rd32(E1000_I2CPARAMS);
627
628 if (state) {
629 i2cctl |= E1000_I2C_CLK_OUT;
630 i2cctl &= ~E1000_I2C_CLK_OE_N;
631 } else {
632 i2cctl &= ~E1000_I2C_CLK_OUT;
633 i2cctl &= ~E1000_I2C_CLK_OE_N;
634 }
635 wr32(E1000_I2CPARAMS, i2cctl);
636 wrfl();
637}
638
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000639/**
640 * igb_get_i2c_clk - Gets the I2C SCL clock state
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000641 * @data: pointer to hardware structure
642 *
643 * Gets the I2C clock state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000644 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000645static int igb_get_i2c_clk(void *data)
646{
647 struct igb_adapter *adapter = (struct igb_adapter *)data;
648 struct e1000_hw *hw = &adapter->hw;
649 s32 i2cctl = rd32(E1000_I2CPARAMS);
650
651 return ((i2cctl & E1000_I2C_CLK_IN) != 0);
652}
653
654static const struct i2c_algo_bit_data igb_i2c_algo = {
655 .setsda = igb_set_i2c_data,
656 .setscl = igb_set_i2c_clk,
657 .getsda = igb_get_i2c_data,
658 .getscl = igb_get_i2c_clk,
659 .udelay = 5,
660 .timeout = 20,
661};
662
Auke Kok9d5c8242008-01-24 02:22:38 -0800663/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000664 * igb_get_hw_dev - return device
665 * @hw: pointer to hardware structure
666 *
667 * used by hardware layer to print debugging information
Auke Kok9d5c8242008-01-24 02:22:38 -0800668 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000669struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800670{
671 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000672 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800673}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000674
675/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000676 * igb_init_module - Driver Registration Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800677 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000678 * igb_init_module is the first routine called when the driver is
679 * loaded. All it does is register with the PCI subsystem.
Auke Kok9d5c8242008-01-24 02:22:38 -0800680 **/
681static int __init igb_init_module(void)
682{
683 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000684 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800685 igb_driver_string, igb_driver_version);
686
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000687 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800688
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700689#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700690 dca_register_notify(&dca_notifier);
691#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800692 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800693 return ret;
694}
695
696module_init(igb_init_module);
697
698/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000699 * igb_exit_module - Driver Exit Cleanup Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800700 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000701 * igb_exit_module is called just before the driver is removed
702 * from memory.
Auke Kok9d5c8242008-01-24 02:22:38 -0800703 **/
704static void __exit igb_exit_module(void)
705{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700706#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700707 dca_unregister_notify(&dca_notifier);
708#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800709 pci_unregister_driver(&igb_driver);
710}
711
712module_exit(igb_exit_module);
713
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800714#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
715/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000716 * igb_cache_ring_register - Descriptor ring to register mapping
717 * @adapter: board private structure to initialize
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800718 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000719 * Once we know the feature-set enabled for the device, we'll cache
720 * the register offset the descriptor ring is assigned to.
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800721 **/
722static void igb_cache_ring_register(struct igb_adapter *adapter)
723{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000724 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000725 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800726
727 switch (adapter->hw.mac.type) {
728 case e1000_82576:
729 /* The queues are allocated for virtualization such that VF 0
730 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
731 * In order to avoid collision we start at the first free queue
732 * and continue consuming queues in the same sequence
733 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000734 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000735 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000736 adapter->rx_ring[i]->reg_idx = rbase_offset +
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000737 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000738 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800739 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000740 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000741 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000742 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000743 case e1000_i210:
744 case e1000_i211:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800745 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000746 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000747 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000748 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000749 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800750 break;
751 }
752}
753
Alexander Duyck4be000c2011-08-26 07:45:52 +0000754/**
755 * igb_write_ivar - configure ivar for given MSI-X vector
756 * @hw: pointer to the HW structure
757 * @msix_vector: vector number we are allocating to a given ring
758 * @index: row index of IVAR register to write within IVAR table
759 * @offset: column offset of in IVAR, should be multiple of 8
760 *
761 * This function is intended to handle the writing of the IVAR register
762 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
763 * each containing an cause allocation for an Rx and Tx ring, and a
764 * variable number of rows depending on the number of queues supported.
765 **/
766static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
767 int index, int offset)
768{
769 u32 ivar = array_rd32(E1000_IVAR0, index);
770
771 /* clear any bits that are currently set */
772 ivar &= ~((u32)0xFF << offset);
773
774 /* write vector and valid bit */
775 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
776
777 array_wr32(E1000_IVAR0, index, ivar);
778}
779
Auke Kok9d5c8242008-01-24 02:22:38 -0800780#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000781static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800782{
Alexander Duyck047e0032009-10-27 15:49:27 +0000783 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800784 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000785 int rx_queue = IGB_N0_QUEUE;
786 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000787 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000788
Alexander Duyck0ba82992011-08-26 07:45:47 +0000789 if (q_vector->rx.ring)
790 rx_queue = q_vector->rx.ring->reg_idx;
791 if (q_vector->tx.ring)
792 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700793
794 switch (hw->mac.type) {
795 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800796 /* The 82575 assigns vectors using a bitmask, which matches the
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000797 * bitmask for the EICR/EIMS/EIMC registers. To assign one
798 * or more queues to a vector, we write the appropriate bits
799 * into the MSIXBM register for that vector.
800 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000801 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800802 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800804 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000805 if (!adapter->msix_entries && msix_vector == 0)
806 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800807 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000808 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700809 break;
810 case e1000_82576:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000811 /* 82576 uses a table that essentially consists of 2 columns
Alexander Duyck4be000c2011-08-26 07:45:52 +0000812 * with 8 rows. The ordering is column-major so we use the
813 * lower 3 bits as the row index, and the 4th bit as the
814 * column offset.
815 */
816 if (rx_queue > IGB_N0_QUEUE)
817 igb_write_ivar(hw, msix_vector,
818 rx_queue & 0x7,
819 (rx_queue & 0x8) << 1);
820 if (tx_queue > IGB_N0_QUEUE)
821 igb_write_ivar(hw, msix_vector,
822 tx_queue & 0x7,
823 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000824 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700825 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000826 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000827 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000828 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000829 case e1000_i210:
830 case e1000_i211:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000831 /* On 82580 and newer adapters the scheme is similar to 82576
Alexander Duyck4be000c2011-08-26 07:45:52 +0000832 * however instead of ordering column-major we have things
833 * ordered row-major. So we traverse the table by using
834 * bit 0 as the column offset, and the remaining bits as the
835 * row index.
836 */
837 if (rx_queue > IGB_N0_QUEUE)
838 igb_write_ivar(hw, msix_vector,
839 rx_queue >> 1,
840 (rx_queue & 0x1) << 4);
841 if (tx_queue > IGB_N0_QUEUE)
842 igb_write_ivar(hw, msix_vector,
843 tx_queue >> 1,
844 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000845 q_vector->eims_value = 1 << msix_vector;
846 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700847 default:
848 BUG();
849 break;
850 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000851
852 /* add q_vector eims value to global eims_enable_mask */
853 adapter->eims_enable_mask |= q_vector->eims_value;
854
855 /* configure q_vector to set itr on first interrupt */
856 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800857}
858
859/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000860 * igb_configure_msix - Configure MSI-X hardware
861 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800862 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000863 * igb_configure_msix sets up the hardware to properly
864 * generate MSI-X interrupts.
Auke Kok9d5c8242008-01-24 02:22:38 -0800865 **/
866static void igb_configure_msix(struct igb_adapter *adapter)
867{
868 u32 tmp;
869 int i, vector = 0;
870 struct e1000_hw *hw = &adapter->hw;
871
872 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800873
874 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700875 switch (hw->mac.type) {
876 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800877 tmp = rd32(E1000_CTRL_EXT);
878 /* enable MSI-X PBA support*/
879 tmp |= E1000_CTRL_EXT_PBA_CLR;
880
881 /* Auto-Mask interrupts upon ICR read. */
882 tmp |= E1000_CTRL_EXT_EIAME;
883 tmp |= E1000_CTRL_EXT_IRCA;
884
885 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000886
887 /* enable msix_other interrupt */
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000888 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700889 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800890
Alexander Duyck2d064c02008-07-08 15:10:12 -0700891 break;
892
893 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000894 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000895 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000896 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000897 case e1000_i210:
898 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000899 /* Turn on MSI-X capability first, or our settings
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000900 * won't stick. And it will take days to debug.
901 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000902 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000903 E1000_GPIE_PBA | E1000_GPIE_EIAME |
904 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700905
Alexander Duyck047e0032009-10-27 15:49:27 +0000906 /* enable msix_other interrupt */
907 adapter->eims_other = 1 << vector;
908 tmp = (vector++ | E1000_IVAR_VALID) << 8;
909
910 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700911 break;
912 default:
913 /* do nothing, since nothing else supports MSI-X */
914 break;
915 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000916
917 adapter->eims_enable_mask |= adapter->eims_other;
918
Alexander Duyck26b39272010-02-17 01:00:41 +0000919 for (i = 0; i < adapter->num_q_vectors; i++)
920 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000921
Auke Kok9d5c8242008-01-24 02:22:38 -0800922 wrfl();
923}
924
925/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000926 * igb_request_msix - Initialize MSI-X interrupts
927 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000929 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
930 * kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -0800931 **/
932static int igb_request_msix(struct igb_adapter *adapter)
933{
934 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000935 struct e1000_hw *hw = &adapter->hw;
Stefan Assmann52285b72012-12-04 06:00:17 +0000936 int i, err = 0, vector = 0, free_vector = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800937
Auke Kok9d5c8242008-01-24 02:22:38 -0800938 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000939 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000941 goto err_out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000942
943 for (i = 0; i < adapter->num_q_vectors; i++) {
944 struct igb_q_vector *q_vector = adapter->q_vector[i];
945
Stefan Assmann52285b72012-12-04 06:00:17 +0000946 vector++;
947
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
949
Alexander Duyck0ba82992011-08-26 07:45:47 +0000950 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000951 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000952 q_vector->rx.ring->queue_index);
953 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000954 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000955 q_vector->tx.ring->queue_index);
956 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000958 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000959 else
960 sprintf(q_vector->name, "%s-unused", netdev->name);
961
962 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000963 igb_msix_ring, 0, q_vector->name,
964 q_vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000965 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000966 goto err_free;
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800968
Auke Kok9d5c8242008-01-24 02:22:38 -0800969 igb_configure_msix(adapter);
970 return 0;
Stefan Assmann52285b72012-12-04 06:00:17 +0000971
972err_free:
973 /* free already assigned IRQs */
974 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
975
976 vector--;
977 for (i = 0; i < vector; i++) {
978 free_irq(adapter->msix_entries[free_vector++].vector,
979 adapter->q_vector[i]);
980 }
981err_out:
Auke Kok9d5c8242008-01-24 02:22:38 -0800982 return err;
983}
984
985static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
986{
987 if (adapter->msix_entries) {
988 pci_disable_msix(adapter->pdev);
989 kfree(adapter->msix_entries);
990 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000991 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800992 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000993 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800994}
995
Alexander Duyck047e0032009-10-27 15:49:27 +0000996/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000997 * igb_free_q_vector - Free memory allocated for specific interrupt vector
998 * @adapter: board private structure to initialize
999 * @v_idx: Index of vector to be freed
Alexander Duyck5536d212012-09-25 00:31:17 +00001000 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001001 * This function frees the memory allocated to the q_vector. In addition if
1002 * NAPI is enabled it will delete any references to the NAPI struct prior
1003 * to freeing the q_vector.
Alexander Duyck5536d212012-09-25 00:31:17 +00001004 **/
1005static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1006{
1007 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1008
1009 if (q_vector->tx.ring)
1010 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1011
1012 if (q_vector->rx.ring)
1013 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
1014
1015 adapter->q_vector[v_idx] = NULL;
1016 netif_napi_del(&q_vector->napi);
1017
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00001018 /* igb_get_stats64() might access the rings on this vector,
Alexander Duyck5536d212012-09-25 00:31:17 +00001019 * we must wait a grace period before freeing it.
1020 */
1021 kfree_rcu(q_vector, rcu);
1022}
1023
1024/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001025 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1026 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001027 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001028 * This function frees the memory allocated to the q_vectors. In addition if
1029 * NAPI is enabled it will delete any references to the NAPI struct prior
1030 * to freeing the q_vector.
Alexander Duyck047e0032009-10-27 15:49:27 +00001031 **/
1032static void igb_free_q_vectors(struct igb_adapter *adapter)
1033{
Alexander Duyck5536d212012-09-25 00:31:17 +00001034 int v_idx = adapter->num_q_vectors;
Alexander Duyck047e0032009-10-27 15:49:27 +00001035
Alexander Duyck5536d212012-09-25 00:31:17 +00001036 adapter->num_tx_queues = 0;
1037 adapter->num_rx_queues = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00001038 adapter->num_q_vectors = 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001039
1040 while (v_idx--)
1041 igb_free_q_vector(adapter, v_idx);
Alexander Duyck047e0032009-10-27 15:49:27 +00001042}
1043
1044/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001045 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1046 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001047 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001048 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1049 * MSI-X interrupts allocated.
Alexander Duyck047e0032009-10-27 15:49:27 +00001050 */
1051static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1052{
Alexander Duyck047e0032009-10-27 15:49:27 +00001053 igb_free_q_vectors(adapter);
1054 igb_reset_interrupt_capability(adapter);
1055}
Auke Kok9d5c8242008-01-24 02:22:38 -08001056
1057/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001058 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1059 * @adapter: board private structure to initialize
1060 * @msix: boolean value of MSIX capability
Auke Kok9d5c8242008-01-24 02:22:38 -08001061 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001062 * Attempt to configure interrupts using the best available
1063 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001064 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001065static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
Auke Kok9d5c8242008-01-24 02:22:38 -08001066{
1067 int err;
1068 int numvecs, i;
1069
Stefan Assmann53c7d062012-12-04 06:00:12 +00001070 if (!msix)
1071 goto msi_only;
1072
Alexander Duyck83b71802009-02-06 23:15:45 +00001073 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001074 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001075 if (adapter->vfs_allocated_count)
1076 adapter->num_tx_queues = 1;
1077 else
1078 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001079
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001080 /* start with one vector for every Rx queue */
Alexander Duyck047e0032009-10-27 15:49:27 +00001081 numvecs = adapter->num_rx_queues;
1082
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001083 /* if Tx handler is separate add 1 for every Tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001084 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1085 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001086
1087 /* store the number of vectors reserved for queues */
1088 adapter->num_q_vectors = numvecs;
1089
1090 /* add 1 vector for link status interrupts */
1091 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001092 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1093 GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001094
Auke Kok9d5c8242008-01-24 02:22:38 -08001095 if (!adapter->msix_entries)
1096 goto msi_only;
1097
1098 for (i = 0; i < numvecs; i++)
1099 adapter->msix_entries[i].entry = i;
1100
1101 err = pci_enable_msix(adapter->pdev,
1102 adapter->msix_entries,
1103 numvecs);
1104 if (err == 0)
Alexander Duyck0c2cc022012-09-25 00:31:22 +00001105 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08001106
1107 igb_reset_interrupt_capability(adapter);
1108
1109 /* If we can't do MSI-X, try MSI */
1110msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001111#ifdef CONFIG_PCI_IOV
1112 /* disable SR-IOV for non MSI-X configurations */
1113 if (adapter->vf_data) {
1114 struct e1000_hw *hw = &adapter->hw;
1115 /* disable iov and allow time for transactions to clear */
1116 pci_disable_sriov(adapter->pdev);
1117 msleep(500);
1118
1119 kfree(adapter->vf_data);
1120 adapter->vf_data = NULL;
1121 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001122 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001123 msleep(100);
1124 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1125 }
1126#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001127 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001128 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001129 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001130 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001131 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001132 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001133 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001134 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001135}
1136
Alexander Duyck5536d212012-09-25 00:31:17 +00001137static void igb_add_ring(struct igb_ring *ring,
1138 struct igb_ring_container *head)
1139{
1140 head->ring = ring;
1141 head->count++;
1142}
1143
1144/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001145 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1146 * @adapter: board private structure to initialize
1147 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1148 * @v_idx: index of vector in adapter struct
1149 * @txr_count: total number of Tx rings to allocate
1150 * @txr_idx: index of first Tx ring to allocate
1151 * @rxr_count: total number of Rx rings to allocate
1152 * @rxr_idx: index of first Rx ring to allocate
Alexander Duyck5536d212012-09-25 00:31:17 +00001153 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001154 * We allocate one q_vector. If allocation fails we return -ENOMEM.
Alexander Duyck5536d212012-09-25 00:31:17 +00001155 **/
1156static int igb_alloc_q_vector(struct igb_adapter *adapter,
1157 int v_count, int v_idx,
1158 int txr_count, int txr_idx,
1159 int rxr_count, int rxr_idx)
1160{
1161 struct igb_q_vector *q_vector;
1162 struct igb_ring *ring;
1163 int ring_count, size;
1164
1165 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1166 if (txr_count > 1 || rxr_count > 1)
1167 return -ENOMEM;
1168
1169 ring_count = txr_count + rxr_count;
1170 size = sizeof(struct igb_q_vector) +
1171 (sizeof(struct igb_ring) * ring_count);
1172
1173 /* allocate q_vector and rings */
1174 q_vector = kzalloc(size, GFP_KERNEL);
1175 if (!q_vector)
1176 return -ENOMEM;
1177
1178 /* initialize NAPI */
1179 netif_napi_add(adapter->netdev, &q_vector->napi,
1180 igb_poll, 64);
1181
1182 /* tie q_vector and adapter together */
1183 adapter->q_vector[v_idx] = q_vector;
1184 q_vector->adapter = adapter;
1185
1186 /* initialize work limits */
1187 q_vector->tx.work_limit = adapter->tx_work_limit;
1188
1189 /* initialize ITR configuration */
1190 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1191 q_vector->itr_val = IGB_START_ITR;
1192
1193 /* initialize pointer to rings */
1194 ring = q_vector->ring;
1195
Alexander Duyck4e2276672013-02-12 02:31:01 +00001196 /* intialize ITR */
1197 if (rxr_count) {
1198 /* rx or rx/tx vector */
1199 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1200 q_vector->itr_val = adapter->rx_itr_setting;
1201 } else {
1202 /* tx only vector */
1203 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1204 q_vector->itr_val = adapter->tx_itr_setting;
1205 }
1206
Alexander Duyck5536d212012-09-25 00:31:17 +00001207 if (txr_count) {
1208 /* assign generic ring traits */
1209 ring->dev = &adapter->pdev->dev;
1210 ring->netdev = adapter->netdev;
1211
1212 /* configure backlink on ring */
1213 ring->q_vector = q_vector;
1214
1215 /* update q_vector Tx values */
1216 igb_add_ring(ring, &q_vector->tx);
1217
1218 /* For 82575, context index must be unique per ring. */
1219 if (adapter->hw.mac.type == e1000_82575)
1220 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1221
1222 /* apply Tx specific ring traits */
1223 ring->count = adapter->tx_ring_count;
1224 ring->queue_index = txr_idx;
1225
1226 /* assign ring to adapter */
1227 adapter->tx_ring[txr_idx] = ring;
1228
1229 /* push pointer to next ring */
1230 ring++;
1231 }
1232
1233 if (rxr_count) {
1234 /* assign generic ring traits */
1235 ring->dev = &adapter->pdev->dev;
1236 ring->netdev = adapter->netdev;
1237
1238 /* configure backlink on ring */
1239 ring->q_vector = q_vector;
1240
1241 /* update q_vector Rx values */
1242 igb_add_ring(ring, &q_vector->rx);
1243
1244 /* set flag indicating ring supports SCTP checksum offload */
1245 if (adapter->hw.mac.type >= e1000_82576)
1246 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1247
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00001248 /*
1249 * On i350, i354, i210, and i211, loopback VLAN packets
Alexander Duyck5536d212012-09-25 00:31:17 +00001250 * have the tag byte-swapped.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001251 */
Alexander Duyck5536d212012-09-25 00:31:17 +00001252 if (adapter->hw.mac.type >= e1000_i350)
1253 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1254
1255 /* apply Rx specific ring traits */
1256 ring->count = adapter->rx_ring_count;
1257 ring->queue_index = rxr_idx;
1258
1259 /* assign ring to adapter */
1260 adapter->rx_ring[rxr_idx] = ring;
1261 }
1262
1263 return 0;
1264}
1265
1266
Auke Kok9d5c8242008-01-24 02:22:38 -08001267/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001268 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1269 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001270 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001271 * We allocate one q_vector per queue interrupt. If allocation fails we
1272 * return -ENOMEM.
Alexander Duyck047e0032009-10-27 15:49:27 +00001273 **/
1274static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1275{
Alexander Duyck5536d212012-09-25 00:31:17 +00001276 int q_vectors = adapter->num_q_vectors;
1277 int rxr_remaining = adapter->num_rx_queues;
1278 int txr_remaining = adapter->num_tx_queues;
1279 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1280 int err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001281
Alexander Duyck5536d212012-09-25 00:31:17 +00001282 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1283 for (; rxr_remaining; v_idx++) {
1284 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1285 0, 0, 1, rxr_idx);
1286
1287 if (err)
1288 goto err_out;
1289
1290 /* update counts and index */
1291 rxr_remaining--;
1292 rxr_idx++;
1293 }
1294 }
1295
1296 for (; v_idx < q_vectors; v_idx++) {
1297 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1298 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1299 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1300 tqpv, txr_idx, rqpv, rxr_idx);
1301
1302 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001303 goto err_out;
Alexander Duyck5536d212012-09-25 00:31:17 +00001304
1305 /* update counts and index */
1306 rxr_remaining -= rqpv;
1307 txr_remaining -= tqpv;
1308 rxr_idx++;
1309 txr_idx++;
Alexander Duyck047e0032009-10-27 15:49:27 +00001310 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001311
Alexander Duyck047e0032009-10-27 15:49:27 +00001312 return 0;
1313
1314err_out:
Alexander Duyck5536d212012-09-25 00:31:17 +00001315 adapter->num_tx_queues = 0;
1316 adapter->num_rx_queues = 0;
1317 adapter->num_q_vectors = 0;
1318
1319 while (v_idx--)
1320 igb_free_q_vector(adapter, v_idx);
1321
Alexander Duyck047e0032009-10-27 15:49:27 +00001322 return -ENOMEM;
1323}
1324
Alexander Duyck047e0032009-10-27 15:49:27 +00001325/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001326 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1327 * @adapter: board private structure to initialize
1328 * @msix: boolean value of MSIX capability
Alexander Duyck047e0032009-10-27 15:49:27 +00001329 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001330 * This function initializes the interrupts and allocates all of the queues.
Alexander Duyck047e0032009-10-27 15:49:27 +00001331 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001332static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
Alexander Duyck047e0032009-10-27 15:49:27 +00001333{
1334 struct pci_dev *pdev = adapter->pdev;
1335 int err;
1336
Stefan Assmann53c7d062012-12-04 06:00:12 +00001337 igb_set_interrupt_capability(adapter, msix);
Alexander Duyck047e0032009-10-27 15:49:27 +00001338
1339 err = igb_alloc_q_vectors(adapter);
1340 if (err) {
1341 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1342 goto err_alloc_q_vectors;
1343 }
1344
Alexander Duyck5536d212012-09-25 00:31:17 +00001345 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001346
1347 return 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001348
Alexander Duyck047e0032009-10-27 15:49:27 +00001349err_alloc_q_vectors:
1350 igb_reset_interrupt_capability(adapter);
1351 return err;
1352}
1353
1354/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001355 * igb_request_irq - initialize interrupts
1356 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -08001357 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001358 * Attempts to configure interrupts using the best available
1359 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001360 **/
1361static int igb_request_irq(struct igb_adapter *adapter)
1362{
1363 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001364 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001365 int err = 0;
1366
1367 if (adapter->msix_entries) {
1368 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001369 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001370 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001371 /* fall back to MSI */
Alexander Duyck5536d212012-09-25 00:31:17 +00001372 igb_free_all_tx_resources(adapter);
1373 igb_free_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001374
Alexander Duyck047e0032009-10-27 15:49:27 +00001375 igb_clear_interrupt_scheme(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001376 err = igb_init_interrupt_scheme(adapter, false);
1377 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001378 goto request_done;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001379
Alexander Duyck047e0032009-10-27 15:49:27 +00001380 igb_setup_all_tx_resources(adapter);
1381 igb_setup_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001382 igb_configure(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001383 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001384
Alexander Duyckc74d5882011-08-26 07:46:45 +00001385 igb_assign_vector(adapter->q_vector[0], 0);
1386
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001387 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001388 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001389 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001390 if (!err)
1391 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001392
Auke Kok9d5c8242008-01-24 02:22:38 -08001393 /* fall back to legacy interrupts */
1394 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001395 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001396 }
1397
Alexander Duyckc74d5882011-08-26 07:46:45 +00001398 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001399 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001400
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001401 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001402 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001403 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001404
1405request_done:
1406 return err;
1407}
1408
1409static void igb_free_irq(struct igb_adapter *adapter)
1410{
Auke Kok9d5c8242008-01-24 02:22:38 -08001411 if (adapter->msix_entries) {
1412 int vector = 0, i;
1413
Alexander Duyck047e0032009-10-27 15:49:27 +00001414 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001415
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001416 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001417 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001418 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001419 } else {
1420 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001421 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001422}
1423
1424/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001425 * igb_irq_disable - Mask off interrupt generation on the NIC
1426 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001427 **/
1428static void igb_irq_disable(struct igb_adapter *adapter)
1429{
1430 struct e1000_hw *hw = &adapter->hw;
1431
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001432 /* we need to be careful when disabling interrupts. The VFs are also
Alexander Duyck25568a52009-10-27 23:49:59 +00001433 * mapped into these registers and so clearing the bits can cause
1434 * issues on the VF drivers so we only need to clear what we set
1435 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001436 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001437 u32 regval = rd32(E1000_EIAM);
1438 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1439 wr32(E1000_EIMC, adapter->eims_enable_mask);
1440 regval = rd32(E1000_EIAC);
1441 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001442 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001443
1444 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001445 wr32(E1000_IMC, ~0);
1446 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001447 if (adapter->msix_entries) {
1448 int i;
1449 for (i = 0; i < adapter->num_q_vectors; i++)
1450 synchronize_irq(adapter->msix_entries[i].vector);
1451 } else {
1452 synchronize_irq(adapter->pdev->irq);
1453 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001454}
1455
1456/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001457 * igb_irq_enable - Enable default interrupt generation settings
1458 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001459 **/
1460static void igb_irq_enable(struct igb_adapter *adapter)
1461{
1462 struct e1000_hw *hw = &adapter->hw;
1463
1464 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001465 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001466 u32 regval = rd32(E1000_EIAC);
1467 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1468 regval = rd32(E1000_EIAM);
1469 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001470 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001471 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001472 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001473 ims |= E1000_IMS_VMMB;
1474 }
1475 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001476 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001477 wr32(E1000_IMS, IMS_ENABLE_MASK |
1478 E1000_IMS_DRSTA);
1479 wr32(E1000_IAM, IMS_ENABLE_MASK |
1480 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001481 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001482}
1483
1484static void igb_update_mng_vlan(struct igb_adapter *adapter)
1485{
Alexander Duyck51466232009-10-27 23:47:35 +00001486 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 u16 vid = adapter->hw.mng_cookie.vlan_id;
1488 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001489
Alexander Duyck51466232009-10-27 23:47:35 +00001490 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1491 /* add VID to filter table */
1492 igb_vfta_set(hw, vid, true);
1493 adapter->mng_vlan_id = vid;
1494 } else {
1495 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1496 }
1497
1498 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1499 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001500 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001501 /* remove VID from filter table */
1502 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001503 }
1504}
1505
1506/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001507 * igb_release_hw_control - release control of the h/w to f/w
1508 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001509 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001510 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1511 * For ASF and Pass Through versions of f/w this means that the
1512 * driver is no longer loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001513 **/
1514static void igb_release_hw_control(struct igb_adapter *adapter)
1515{
1516 struct e1000_hw *hw = &adapter->hw;
1517 u32 ctrl_ext;
1518
1519 /* Let firmware take over control of h/w */
1520 ctrl_ext = rd32(E1000_CTRL_EXT);
1521 wr32(E1000_CTRL_EXT,
1522 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1523}
1524
Auke Kok9d5c8242008-01-24 02:22:38 -08001525/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001526 * igb_get_hw_control - get control of the h/w from f/w
1527 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001528 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001529 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1530 * For ASF and Pass Through versions of f/w this means that
1531 * the driver is loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001532 **/
1533static void igb_get_hw_control(struct igb_adapter *adapter)
1534{
1535 struct e1000_hw *hw = &adapter->hw;
1536 u32 ctrl_ext;
1537
1538 /* Let firmware know the driver has taken over */
1539 ctrl_ext = rd32(E1000_CTRL_EXT);
1540 wr32(E1000_CTRL_EXT,
1541 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1542}
1543
Auke Kok9d5c8242008-01-24 02:22:38 -08001544/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001545 * igb_configure - configure the hardware for RX and TX
1546 * @adapter: private board structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001547 **/
1548static void igb_configure(struct igb_adapter *adapter)
1549{
1550 struct net_device *netdev = adapter->netdev;
1551 int i;
1552
1553 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001554 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001555
1556 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001557
Alexander Duyck85b430b2009-10-27 15:50:29 +00001558 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001559 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001560 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001561
1562 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001563 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001564
1565 igb_rx_fifo_flush_82575(&adapter->hw);
1566
Alexander Duyckc493ea42009-03-20 00:16:50 +00001567 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001568 * at least 1 descriptor unused to make sure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001569 * next_to_use != next_to_clean
1570 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001571 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001572 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001573 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001574 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001575}
1576
Nick Nunley88a268c2010-02-17 01:01:59 +00001577/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001578 * igb_power_up_link - Power up the phy/serdes link
1579 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001580 **/
1581void igb_power_up_link(struct igb_adapter *adapter)
1582{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001583 igb_reset_phy(&adapter->hw);
1584
Nick Nunley88a268c2010-02-17 01:01:59 +00001585 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1586 igb_power_up_phy_copper(&adapter->hw);
1587 else
1588 igb_power_up_serdes_link_82575(&adapter->hw);
1589}
1590
1591/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001592 * igb_power_down_link - Power down the phy/serdes link
1593 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001594 */
1595static void igb_power_down_link(struct igb_adapter *adapter)
1596{
1597 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1598 igb_power_down_phy_copper_82575(&adapter->hw);
1599 else
1600 igb_shutdown_serdes_link_82575(&adapter->hw);
1601}
Auke Kok9d5c8242008-01-24 02:22:38 -08001602
1603/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001604 * igb_up - Open the interface and prepare it to handle traffic
1605 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001606 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001607int igb_up(struct igb_adapter *adapter)
1608{
1609 struct e1000_hw *hw = &adapter->hw;
1610 int i;
1611
1612 /* hardware has been reset, we need to reload some things */
1613 igb_configure(adapter);
1614
1615 clear_bit(__IGB_DOWN, &adapter->state);
1616
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001617 for (i = 0; i < adapter->num_q_vectors; i++)
1618 napi_enable(&(adapter->q_vector[i]->napi));
1619
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001620 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001621 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001622 else
1623 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001624
1625 /* Clear any pending interrupts. */
1626 rd32(E1000_ICR);
1627 igb_irq_enable(adapter);
1628
Alexander Duyckd4960302009-10-27 15:53:45 +00001629 /* notify VFs that reset has been completed */
1630 if (adapter->vfs_allocated_count) {
1631 u32 reg_data = rd32(E1000_CTRL_EXT);
1632 reg_data |= E1000_CTRL_EXT_PFRSTD;
1633 wr32(E1000_CTRL_EXT, reg_data);
1634 }
1635
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001636 netif_tx_start_all_queues(adapter->netdev);
1637
Alexander Duyck25568a52009-10-27 23:49:59 +00001638 /* start the watchdog. */
1639 hw->mac.get_link_status = 1;
1640 schedule_work(&adapter->watchdog_task);
1641
Auke Kok9d5c8242008-01-24 02:22:38 -08001642 return 0;
1643}
1644
1645void igb_down(struct igb_adapter *adapter)
1646{
Auke Kok9d5c8242008-01-24 02:22:38 -08001647 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001648 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001649 u32 tctl, rctl;
1650 int i;
1651
1652 /* signal that we're down so the interrupt handler does not
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001653 * reschedule our watchdog timer
1654 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001655 set_bit(__IGB_DOWN, &adapter->state);
1656
1657 /* disable receives in the hardware */
1658 rctl = rd32(E1000_RCTL);
1659 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1660 /* flush and sleep below */
1661
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001662 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001663
1664 /* disable transmits in the hardware */
1665 tctl = rd32(E1000_TCTL);
1666 tctl &= ~E1000_TCTL_EN;
1667 wr32(E1000_TCTL, tctl);
1668 /* flush both disables and wait for them to finish */
1669 wrfl();
1670 msleep(10);
1671
Auke Kok9d5c8242008-01-24 02:22:38 -08001672 igb_irq_disable(adapter);
1673
Carolyn Wyborny41f149a2013-04-30 00:21:32 +00001674 for (i = 0; i < adapter->num_q_vectors; i++) {
1675 napi_synchronize(&(adapter->q_vector[i]->napi));
1676 napi_disable(&(adapter->q_vector[i]->napi));
1677 }
1678
1679
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 del_timer_sync(&adapter->watchdog_timer);
1681 del_timer_sync(&adapter->phy_info_timer);
1682
Auke Kok9d5c8242008-01-24 02:22:38 -08001683 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001684
1685 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001686 spin_lock(&adapter->stats64_lock);
1687 igb_update_stats(adapter, &adapter->stats64);
1688 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001689
Auke Kok9d5c8242008-01-24 02:22:38 -08001690 adapter->link_speed = 0;
1691 adapter->link_duplex = 0;
1692
Jeff Kirsher30236822008-06-24 17:01:15 -07001693 if (!pci_channel_offline(adapter->pdev))
1694 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001695 igb_clean_all_tx_rings(adapter);
1696 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001697#ifdef CONFIG_IGB_DCA
1698
1699 /* since we reset the hardware DCA settings were cleared */
1700 igb_setup_dca(adapter);
1701#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001702}
1703
1704void igb_reinit_locked(struct igb_adapter *adapter)
1705{
1706 WARN_ON(in_interrupt());
1707 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1708 msleep(1);
1709 igb_down(adapter);
1710 igb_up(adapter);
1711 clear_bit(__IGB_RESETTING, &adapter->state);
1712}
1713
1714void igb_reset(struct igb_adapter *adapter)
1715{
Alexander Duyck090b1792009-10-27 23:51:55 +00001716 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001717 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001718 struct e1000_mac_info *mac = &hw->mac;
1719 struct e1000_fc_info *fc = &hw->fc;
Matthew Vickd48507f2012-11-08 04:03:58 +00001720 u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
Auke Kok9d5c8242008-01-24 02:22:38 -08001721
1722 /* Repartition Pba for greater than 9k mtu
1723 * To take effect CTRL.RST is required.
1724 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001725 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001726 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00001727 case e1000_i354:
Alexander Duyck55cac242009-11-19 12:42:21 +00001728 case e1000_82580:
1729 pba = rd32(E1000_RXPBS);
1730 pba = igb_rxpbs_adjust_82580(pba);
1731 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001732 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001733 pba = rd32(E1000_RXPBS);
1734 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001735 break;
1736 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001737 case e1000_i210:
1738 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001739 default:
1740 pba = E1000_PBA_34K;
1741 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001742 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001743
Alexander Duyck2d064c02008-07-08 15:10:12 -07001744 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1745 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001746 /* adjust PBA for jumbo frames */
1747 wr32(E1000_PBA, pba);
1748
1749 /* To maintain wire speed transmits, the Tx FIFO should be
1750 * large enough to accommodate two full transmit packets,
1751 * rounded up to the next 1KB and expressed in KB. Likewise,
1752 * the Rx FIFO should be large enough to accommodate at least
1753 * one full receive packet and is similarly rounded up and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001754 * expressed in KB.
1755 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001756 pba = rd32(E1000_PBA);
1757 /* upper 16 bits has Tx packet buffer allocation size in KB */
1758 tx_space = pba >> 16;
1759 /* lower 16 bits has Rx packet buffer allocation size in KB */
1760 pba &= 0xffff;
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001761 /* the Tx fifo also stores 16 bytes of information about the Tx
1762 * but don't include ethernet FCS because hardware appends it
1763 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001764 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001765 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001766 ETH_FCS_LEN) * 2;
1767 min_tx_space = ALIGN(min_tx_space, 1024);
1768 min_tx_space >>= 10;
1769 /* software strips receive CRC, so leave room for it */
1770 min_rx_space = adapter->max_frame_size;
1771 min_rx_space = ALIGN(min_rx_space, 1024);
1772 min_rx_space >>= 10;
1773
1774 /* If current Tx allocation is less than the min Tx FIFO size,
1775 * and the min Tx FIFO size is less than the current Rx FIFO
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001776 * allocation, take space away from current Rx allocation
1777 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001778 if (tx_space < min_tx_space &&
1779 ((min_tx_space - tx_space) < pba)) {
1780 pba = pba - (min_tx_space - tx_space);
1781
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001782 /* if short on Rx space, Rx wins and must trump Tx
1783 * adjustment
1784 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001785 if (pba < min_rx_space)
1786 pba = min_rx_space;
1787 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001788 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001789 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001790
1791 /* flow control settings */
1792 /* The high water mark must be low enough to fit one full frame
1793 * (or the size used for early receive) above it in the Rx FIFO.
1794 * Set it to the lower of:
1795 * - 90% of the Rx FIFO size, or
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001796 * - the full Rx FIFO size minus one full frame
1797 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001798 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001799 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001800
Matthew Vickd48507f2012-11-08 04:03:58 +00001801 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
Alexander Duyckd405ea32009-12-23 13:21:27 +00001802 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001803 fc->pause_time = 0xFFFF;
1804 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001805 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001806
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001807 /* disable receive for all VFs and wait one second */
1808 if (adapter->vfs_allocated_count) {
1809 int i;
1810 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001811 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001812
1813 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001814 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001815
1816 /* disable transmits and receives */
1817 wr32(E1000_VFRE, 0);
1818 wr32(E1000_VFTE, 0);
1819 }
1820
Auke Kok9d5c8242008-01-24 02:22:38 -08001821 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001822 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001823 wr32(E1000_WUC, 0);
1824
Alexander Duyck330a6d62009-10-27 23:51:35 +00001825 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001826 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001827
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001828 /* Flow control settings reset on hardware reset, so guarantee flow
Matthew Vicka27416b2012-04-18 02:57:44 +00001829 * control is off when forcing speed.
1830 */
1831 if (!hw->mac.autoneg)
1832 igb_force_mac_fc(hw);
1833
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001834 igb_init_dmac(adapter, pba);
Carolyn Wybornye4288932012-12-07 03:01:42 +00001835#ifdef CONFIG_IGB_HWMON
1836 /* Re-initialize the thermal sensor on i350 devices. */
1837 if (!test_bit(__IGB_DOWN, &adapter->state)) {
1838 if (mac->type == e1000_i350 && hw->bus.func == 0) {
1839 /* If present, re-initialize the external thermal sensor
1840 * interface.
1841 */
1842 if (adapter->ets)
1843 mac->ops.init_thermal_sensor_thresh(hw);
1844 }
1845 }
1846#endif
Nick Nunley88a268c2010-02-17 01:01:59 +00001847 if (!netif_running(adapter->netdev))
1848 igb_power_down_link(adapter);
1849
Auke Kok9d5c8242008-01-24 02:22:38 -08001850 igb_update_mng_vlan(adapter);
1851
1852 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1853 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1854
Matthew Vick1f6e8172012-08-18 07:26:33 +00001855 /* Re-enable PTP, where applicable. */
1856 igb_ptp_reset(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00001857
Alexander Duyck330a6d62009-10-27 23:51:35 +00001858 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001859}
1860
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001861static netdev_features_t igb_fix_features(struct net_device *netdev,
1862 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001863{
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001864 /* Since there is no support for separate Rx/Tx vlan accel
1865 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001866 */
Patrick McHardyf6469682013-04-19 02:04:27 +00001867 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1868 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001869 else
Patrick McHardyf6469682013-04-19 02:04:27 +00001870 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001871
1872 return features;
1873}
1874
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001875static int igb_set_features(struct net_device *netdev,
1876 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001877{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001878 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001879 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001880
Patrick McHardyf6469682013-04-19 02:04:27 +00001881 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001882 igb_vlan_mode(netdev, features);
1883
Ben Greear89eaefb2012-03-06 09:41:58 +00001884 if (!(changed & NETIF_F_RXALL))
1885 return 0;
1886
1887 netdev->features = features;
1888
1889 if (netif_running(netdev))
1890 igb_reinit_locked(adapter);
1891 else
1892 igb_reset(adapter);
1893
Michał Mirosławac52caa2011-06-08 08:38:01 +00001894 return 0;
1895}
1896
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001897static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001898 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001899 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001900 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001901 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001902 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001903 .ndo_set_mac_address = igb_set_mac,
1904 .ndo_change_mtu = igb_change_mtu,
1905 .ndo_do_ioctl = igb_ioctl,
1906 .ndo_tx_timeout = igb_tx_timeout,
1907 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001908 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1909 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001910 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1911 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1912 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
Lior Levy70ea4782013-03-03 20:27:48 +00001913 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001914 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001915#ifdef CONFIG_NET_POLL_CONTROLLER
1916 .ndo_poll_controller = igb_netpoll,
1917#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001918 .ndo_fix_features = igb_fix_features,
1919 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001920};
1921
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001922/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001923 * igb_set_fw_version - Configure version string for ethtool
1924 * @adapter: adapter struct
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001925 **/
1926void igb_set_fw_version(struct igb_adapter *adapter)
1927{
1928 struct e1000_hw *hw = &adapter->hw;
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001929 struct e1000_fw_version fw;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001930
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001931 igb_get_fw_version(hw, &fw);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001932
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001933 switch (hw->mac.type) {
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00001934 case e1000_i210:
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001935 case e1000_i211:
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00001936 if (!(igb_get_flash_presence_i210(hw))) {
1937 snprintf(adapter->fw_version,
1938 sizeof(adapter->fw_version),
1939 "%2d.%2d-%d",
1940 fw.invm_major, fw.invm_minor,
1941 fw.invm_img_type);
1942 break;
1943 }
1944 /* fall through */
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001945 default:
1946 /* if option is rom valid, display its version too */
1947 if (fw.or_valid) {
1948 snprintf(adapter->fw_version,
1949 sizeof(adapter->fw_version),
1950 "%d.%d, 0x%08x, %d.%d.%d",
1951 fw.eep_major, fw.eep_minor, fw.etrack_id,
1952 fw.or_major, fw.or_build, fw.or_patch);
1953 /* no option rom */
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00001954 } else if (fw.etrack_id != 0X0000) {
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001955 snprintf(adapter->fw_version,
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00001956 sizeof(adapter->fw_version),
1957 "%d.%d, 0x%08x",
1958 fw.eep_major, fw.eep_minor, fw.etrack_id);
1959 } else {
1960 snprintf(adapter->fw_version,
1961 sizeof(adapter->fw_version),
1962 "%d.%d.%d",
1963 fw.eep_major, fw.eep_minor, fw.eep_build);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001964 }
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001965 break;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001966 }
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001967 return;
1968}
1969
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001970/**
1971 * igb_init_i2c - Init I2C interface
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00001972 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001973 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00001974static s32 igb_init_i2c(struct igb_adapter *adapter)
1975{
1976 s32 status = E1000_SUCCESS;
1977
1978 /* I2C interface supported on i350 devices */
1979 if (adapter->hw.mac.type != e1000_i350)
1980 return E1000_SUCCESS;
1981
1982 /* Initialize the i2c bus which is controlled by the registers.
1983 * This bus will use the i2c_algo_bit structue that implements
1984 * the protocol through toggling of the 4 bits in the register.
1985 */
1986 adapter->i2c_adap.owner = THIS_MODULE;
1987 adapter->i2c_algo = igb_i2c_algo;
1988 adapter->i2c_algo.data = adapter;
1989 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
1990 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
1991 strlcpy(adapter->i2c_adap.name, "igb BB",
1992 sizeof(adapter->i2c_adap.name));
1993 status = i2c_bit_add_bus(&adapter->i2c_adap);
1994 return status;
1995}
1996
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001997/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001998 * igb_probe - Device Initialization Routine
1999 * @pdev: PCI device information struct
2000 * @ent: entry in igb_pci_tbl
Auke Kok9d5c8242008-01-24 02:22:38 -08002001 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002002 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002003 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002004 * igb_probe initializes an adapter identified by a pci_dev structure.
2005 * The OS initialization, configuring of the adapter private structure,
2006 * and a hardware reset occur.
Auke Kok9d5c8242008-01-24 02:22:38 -08002007 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002008static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Auke Kok9d5c8242008-01-24 02:22:38 -08002009{
2010 struct net_device *netdev;
2011 struct igb_adapter *adapter;
2012 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00002013 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002014 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00002015 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08002016 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2017 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07002018 int err, pci_using_dac;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002019 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08002020
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002021 /* Catch broken hardware that put the wrong VF device ID in
2022 * the PCIe SR-IOV capability.
2023 */
2024 if (pdev->is_virtfn) {
2025 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002026 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002027 return -EINVAL;
2028 }
2029
Alexander Duyckaed5dec2009-02-06 23:16:04 +00002030 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002031 if (err)
2032 return err;
2033
2034 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00002035 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08002036 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00002037 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08002038 if (!err)
2039 pci_using_dac = 1;
2040 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00002041 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08002042 if (err) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002043 err = dma_set_coherent_mask(&pdev->dev,
2044 DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08002045 if (err) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002046 dev_err(&pdev->dev,
2047 "No usable DMA configuration, aborting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002048 goto err_dma;
2049 }
2050 }
2051 }
2052
Alexander Duyckaed5dec2009-02-06 23:16:04 +00002053 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002054 IORESOURCE_MEM),
2055 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002056 if (err)
2057 goto err_pci_reg;
2058
Frans Pop19d5afd2009-10-02 10:04:12 -07002059 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002060
Auke Kok9d5c8242008-01-24 02:22:38 -08002061 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07002062 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002063
2064 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08002065 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00002066 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08002067 if (!netdev)
2068 goto err_alloc_etherdev;
2069
2070 SET_NETDEV_DEV(netdev, &pdev->dev);
2071
2072 pci_set_drvdata(pdev, netdev);
2073 adapter = netdev_priv(netdev);
2074 adapter->netdev = netdev;
2075 adapter->pdev = pdev;
2076 hw = &adapter->hw;
2077 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00002078 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002079
2080 mmio_start = pci_resource_start(pdev, 0);
2081 mmio_len = pci_resource_len(pdev, 0);
2082
2083 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00002084 hw->hw_addr = ioremap(mmio_start, mmio_len);
2085 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08002086 goto err_ioremap;
2087
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002088 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08002089 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002090 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08002091
2092 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2093
2094 netdev->mem_start = mmio_start;
2095 netdev->mem_end = mmio_start + mmio_len;
2096
Auke Kok9d5c8242008-01-24 02:22:38 -08002097 /* PCI config space info */
2098 hw->vendor_id = pdev->vendor;
2099 hw->device_id = pdev->device;
2100 hw->revision_id = pdev->revision;
2101 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2102 hw->subsystem_device_id = pdev->subsystem_device;
2103
Auke Kok9d5c8242008-01-24 02:22:38 -08002104 /* Copy the default MAC, PHY and NVM function pointers */
2105 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2106 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2107 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2108 /* Initialize skew-specific constants */
2109 err = ei->get_invariants(hw);
2110 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00002111 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08002112
Alexander Duyck450c87c2009-02-06 23:22:11 +00002113 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08002114 err = igb_sw_init(adapter);
2115 if (err)
2116 goto err_sw_init;
2117
2118 igb_get_bus_info_pcie(hw);
2119
2120 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002121
2122 /* Copper options */
2123 if (hw->phy.media_type == e1000_media_type_copper) {
2124 hw->phy.mdix = AUTO_ALL_MODES;
2125 hw->phy.disable_polarity_correction = false;
2126 hw->phy.ms_type = e1000_ms_hw_default;
2127 }
2128
2129 if (igb_check_reset_block(hw))
2130 dev_info(&pdev->dev,
2131 "PHY reset is blocked due to SOL/IDER session.\n");
2132
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002133 /* features is initialized to 0 in allocation, it might have bits
Alexander Duyck077887c2011-08-26 07:46:29 +00002134 * set by igb_sw_init so we should use an or instead of an
2135 * assignment.
2136 */
2137 netdev->features |= NETIF_F_SG |
2138 NETIF_F_IP_CSUM |
2139 NETIF_F_IPV6_CSUM |
2140 NETIF_F_TSO |
2141 NETIF_F_TSO6 |
2142 NETIF_F_RXHASH |
2143 NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00002144 NETIF_F_HW_VLAN_CTAG_RX |
2145 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002146
Alexander Duyck077887c2011-08-26 07:46:29 +00002147 /* copy netdev features into list of user selectable features */
2148 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00002149 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08002150
Alexander Duyck077887c2011-08-26 07:46:29 +00002151 /* set this bit last since it cannot be part of hw_features */
Patrick McHardyf6469682013-04-19 02:04:27 +00002152 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
Alexander Duyck077887c2011-08-26 07:46:29 +00002153
2154 netdev->vlan_features |= NETIF_F_TSO |
2155 NETIF_F_TSO6 |
2156 NETIF_F_IP_CSUM |
2157 NETIF_F_IPV6_CSUM |
2158 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002159
Ben Greear6b8f0922012-03-06 09:41:53 +00002160 netdev->priv_flags |= IFF_SUPP_NOFCS;
2161
Yi Zou7b872a52010-09-22 17:57:58 +00002162 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002163 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002164 netdev->vlan_features |= NETIF_F_HIGHDMA;
2165 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002166
Michał Mirosławac52caa2011-06-08 08:38:01 +00002167 if (hw->mac.type >= e1000_82576) {
2168 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002169 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002170 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002171
Jiri Pirko01789342011-08-16 06:29:00 +00002172 netdev->priv_flags |= IFF_UNICAST_FLT;
2173
Alexander Duyck330a6d62009-10-27 23:51:35 +00002174 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002175
2176 /* before reading the NVM, reset the controller to put the device in a
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002177 * known good starting state
2178 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002179 hw->mac.ops.reset_hw(hw);
2180
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002181 /* make sure the NVM is good , i211/i210 parts can have special NVM
2182 * that doesn't contain a checksum
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002183 */
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002184 switch (hw->mac.type) {
2185 case e1000_i210:
2186 case e1000_i211:
2187 if (igb_get_flash_presence_i210(hw)) {
2188 if (hw->nvm.ops.validate(hw) < 0) {
2189 dev_err(&pdev->dev,
2190 "The NVM Checksum Is Not Valid\n");
2191 err = -EIO;
2192 goto err_eeprom;
2193 }
2194 }
2195 break;
2196 default:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002197 if (hw->nvm.ops.validate(hw) < 0) {
2198 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2199 err = -EIO;
2200 goto err_eeprom;
2201 }
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002202 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002203 }
2204
2205 /* copy the MAC address out of the NVM */
2206 if (hw->mac.ops.read_mac_addr(hw))
2207 dev_err(&pdev->dev, "NVM Read Error\n");
2208
2209 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002210
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00002211 if (!is_valid_ether_addr(netdev->dev_addr)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002212 dev_err(&pdev->dev, "Invalid MAC Address\n");
2213 err = -EIO;
2214 goto err_eeprom;
2215 }
2216
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002217 /* get firmware version for ethtool -i */
2218 igb_set_fw_version(adapter);
2219
Joe Perchesc061b182010-08-23 18:20:03 +00002220 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002221 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002222 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002223 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002224
2225 INIT_WORK(&adapter->reset_task, igb_reset_task);
2226 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2227
Alexander Duyck450c87c2009-02-06 23:22:11 +00002228 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002229 adapter->fc_autoneg = true;
2230 hw->mac.autoneg = true;
2231 hw->phy.autoneg_advertised = 0x2f;
2232
Alexander Duyck0cce1192009-07-23 18:10:24 +00002233 hw->fc.requested_mode = e1000_fc_default;
2234 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002235
Auke Kok9d5c8242008-01-24 02:22:38 -08002236 igb_validate_mdi_setting(hw);
2237
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002238 /* By default, support wake on port A */
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002239 if (hw->bus.func == 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002240 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2241
2242 /* Check the NVM for wake support on non-port A ports */
2243 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002244 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002245 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2246 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002247 else if (hw->bus.func == 1)
2248 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002249
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002250 if (eeprom_data & IGB_EEPROM_APME)
2251 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002252
2253 /* now that we have the eeprom settings, apply the special cases where
2254 * the eeprom may be wrong or the board simply won't support wake on
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002255 * lan on a particular port
2256 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002257 switch (pdev->device) {
2258 case E1000_DEV_ID_82575GB_QUAD_COPPER:
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002259 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002260 break;
2261 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002262 case E1000_DEV_ID_82576_FIBER:
2263 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002264 /* Wake events only supported on port A for dual fiber
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002265 * regardless of eeprom setting
2266 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002267 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002268 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002269 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002270 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002271 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002272 /* if quad port adapter, disable WoL on all but port A */
2273 if (global_quad_port_a != 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002274 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002275 else
2276 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2277 /* Reset for multiple quad port adapters */
2278 if (++global_quad_port_a == 4)
2279 global_quad_port_a = 0;
2280 break;
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002281 default:
2282 /* If the device can't wake, don't set software support */
2283 if (!device_can_wakeup(&adapter->pdev->dev))
2284 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002285 }
2286
2287 /* initialize the wol settings based on the eeprom settings */
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002288 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2289 adapter->wol |= E1000_WUFC_MAG;
2290
2291 /* Some vendors want WoL disabled by default, but still supported */
2292 if ((hw->mac.type == e1000_i350) &&
2293 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2294 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2295 adapter->wol = 0;
2296 }
2297
2298 device_set_wakeup_enable(&adapter->pdev->dev,
2299 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
Auke Kok9d5c8242008-01-24 02:22:38 -08002300
2301 /* reset the hardware with the new settings */
2302 igb_reset(adapter);
2303
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002304 /* Init the I2C interface */
2305 err = igb_init_i2c(adapter);
2306 if (err) {
2307 dev_err(&pdev->dev, "failed to init i2c interface\n");
2308 goto err_eeprom;
2309 }
2310
Auke Kok9d5c8242008-01-24 02:22:38 -08002311 /* let the f/w know that the h/w is now under the control of the
2312 * driver. */
2313 igb_get_hw_control(adapter);
2314
Auke Kok9d5c8242008-01-24 02:22:38 -08002315 strcpy(netdev->name, "eth%d");
2316 err = register_netdev(netdev);
2317 if (err)
2318 goto err_register;
2319
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002320 /* carrier off reporting is important to ethtool even BEFORE open */
2321 netif_carrier_off(netdev);
2322
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002323#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002324 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002325 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002326 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002327 igb_setup_dca(adapter);
2328 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002329
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002330#endif
Carolyn Wybornye4288932012-12-07 03:01:42 +00002331#ifdef CONFIG_IGB_HWMON
2332 /* Initialize the thermal sensor on i350 devices. */
2333 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2334 u16 ets_word;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00002335
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002336 /* Read the NVM to determine if this i350 device supports an
Carolyn Wybornye4288932012-12-07 03:01:42 +00002337 * external thermal sensor.
2338 */
2339 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
2340 if (ets_word != 0x0000 && ets_word != 0xFFFF)
2341 adapter->ets = true;
2342 else
2343 adapter->ets = false;
2344 if (igb_sysfs_init(adapter))
2345 dev_err(&pdev->dev,
2346 "failed to allocate sysfs resources\n");
2347 } else {
2348 adapter->ets = false;
2349 }
2350#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002351 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002352 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00002353
Auke Kok9d5c8242008-01-24 02:22:38 -08002354 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002355 /* print bus type/speed/width info, not applicable to i354 */
2356 if (hw->mac.type != e1000_i354) {
2357 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2358 netdev->name,
2359 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2360 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2361 "unknown"),
2362 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
2363 "Width x4" :
2364 (hw->bus.width == e1000_bus_width_pcie_x2) ?
2365 "Width x2" :
2366 (hw->bus.width == e1000_bus_width_pcie_x1) ?
2367 "Width x1" : "unknown"), netdev->dev_addr);
2368 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002369
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002370 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2371 if (ret_val)
2372 strcpy(part_str, "Unknown");
2373 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002374 dev_info(&pdev->dev,
2375 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2376 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002377 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002378 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002379 switch (hw->mac.type) {
2380 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002381 case e1000_i210:
2382 case e1000_i211:
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002383 igb_set_eee_i350(hw);
2384 break;
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002385 case e1000_i354:
2386 if (hw->phy.media_type == e1000_media_type_copper) {
2387 if ((rd32(E1000_CTRL_EXT) &
2388 E1000_CTRL_EXT_LINK_MODE_SGMII))
2389 igb_set_eee_i354(hw);
2390 }
2391 break;
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002392 default:
2393 break;
2394 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002395
2396 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002397 return 0;
2398
2399err_register:
2400 igb_release_hw_control(adapter);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002401 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
Auke Kok9d5c8242008-01-24 02:22:38 -08002402err_eeprom:
2403 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002404 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002405
2406 if (hw->flash_address)
2407 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002408err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002409 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002410 iounmap(hw->hw_addr);
2411err_ioremap:
2412 free_netdev(netdev);
2413err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002414 pci_release_selected_regions(pdev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002415 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002416err_pci_reg:
2417err_dma:
2418 pci_disable_device(pdev);
2419 return err;
2420}
2421
Greg Rosefa44f2f2013-01-17 01:03:06 -08002422#ifdef CONFIG_PCI_IOV
2423static int igb_disable_sriov(struct pci_dev *pdev)
2424{
2425 struct net_device *netdev = pci_get_drvdata(pdev);
2426 struct igb_adapter *adapter = netdev_priv(netdev);
2427 struct e1000_hw *hw = &adapter->hw;
2428
2429 /* reclaim resources allocated to VFs */
2430 if (adapter->vf_data) {
2431 /* disable iov and allow time for transactions to clear */
Alexander Duyckb09186d2013-03-26 00:03:26 +00002432 if (pci_vfs_assigned(pdev)) {
Greg Rosefa44f2f2013-01-17 01:03:06 -08002433 dev_warn(&pdev->dev,
2434 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2435 return -EPERM;
2436 } else {
2437 pci_disable_sriov(pdev);
2438 msleep(500);
2439 }
2440
2441 kfree(adapter->vf_data);
2442 adapter->vf_data = NULL;
2443 adapter->vfs_allocated_count = 0;
2444 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2445 wrfl();
2446 msleep(100);
2447 dev_info(&pdev->dev, "IOV Disabled\n");
2448
2449 /* Re-enable DMA Coalescing flag since IOV is turned off */
2450 adapter->flags |= IGB_FLAG_DMAC;
2451 }
2452
2453 return 0;
2454}
2455
2456static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2457{
2458 struct net_device *netdev = pci_get_drvdata(pdev);
2459 struct igb_adapter *adapter = netdev_priv(netdev);
2460 int old_vfs = pci_num_vf(pdev);
2461 int err = 0;
2462 int i;
2463
Mitch A Williams50267192013-06-20 06:03:36 +00002464 if (!adapter->msix_entries) {
2465 err = -EPERM;
2466 goto out;
2467 }
2468
Greg Rosefa44f2f2013-01-17 01:03:06 -08002469 if (!num_vfs)
2470 goto out;
2471 else if (old_vfs && old_vfs == num_vfs)
2472 goto out;
2473 else if (old_vfs && old_vfs != num_vfs)
2474 err = igb_disable_sriov(pdev);
2475
2476 if (err)
2477 goto out;
2478
2479 if (num_vfs > 7) {
2480 err = -EPERM;
2481 goto out;
2482 }
2483
2484 adapter->vfs_allocated_count = num_vfs;
2485
2486 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2487 sizeof(struct vf_data_storage), GFP_KERNEL);
2488
2489 /* if allocation failed then we do not support SR-IOV */
2490 if (!adapter->vf_data) {
2491 adapter->vfs_allocated_count = 0;
2492 dev_err(&pdev->dev,
2493 "Unable to allocate memory for VF Data Storage\n");
2494 err = -ENOMEM;
2495 goto out;
2496 }
2497
2498 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
2499 if (err)
2500 goto err_out;
2501
2502 dev_info(&pdev->dev, "%d VFs allocated\n",
2503 adapter->vfs_allocated_count);
2504 for (i = 0; i < adapter->vfs_allocated_count; i++)
2505 igb_vf_configure(adapter, i);
2506
2507 /* DMA Coalescing is not supported in IOV mode. */
2508 adapter->flags &= ~IGB_FLAG_DMAC;
2509 goto out;
2510
2511err_out:
2512 kfree(adapter->vf_data);
2513 adapter->vf_data = NULL;
2514 adapter->vfs_allocated_count = 0;
2515out:
2516 return err;
2517}
2518
2519#endif
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002520/**
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002521 * igb_remove_i2c - Cleanup I2C interface
2522 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002523 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002524static void igb_remove_i2c(struct igb_adapter *adapter)
2525{
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002526 /* free the adapter bus structure */
2527 i2c_del_adapter(&adapter->i2c_adap);
2528}
2529
Auke Kok9d5c8242008-01-24 02:22:38 -08002530/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002531 * igb_remove - Device Removal Routine
2532 * @pdev: PCI device information struct
Auke Kok9d5c8242008-01-24 02:22:38 -08002533 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002534 * igb_remove is called by the PCI subsystem to alert the driver
2535 * that it should release a PCI device. The could be caused by a
2536 * Hot-Plug event, or because the driver is going to be removed from
2537 * memory.
Auke Kok9d5c8242008-01-24 02:22:38 -08002538 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002539static void igb_remove(struct pci_dev *pdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002540{
2541 struct net_device *netdev = pci_get_drvdata(pdev);
2542 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002543 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002544
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002545 pm_runtime_get_noresume(&pdev->dev);
Carolyn Wybornye4288932012-12-07 03:01:42 +00002546#ifdef CONFIG_IGB_HWMON
2547 igb_sysfs_exit(adapter);
2548#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002549 igb_remove_i2c(adapter);
Matthew Vicka79f4f82012-08-10 05:40:44 +00002550 igb_ptp_stop(adapter);
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002551 /* The watchdog timer may be rescheduled, so explicitly
Tejun Heo760141a2010-12-12 16:45:14 +01002552 * disable watchdog from being rescheduled.
2553 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002554 set_bit(__IGB_DOWN, &adapter->state);
2555 del_timer_sync(&adapter->watchdog_timer);
2556 del_timer_sync(&adapter->phy_info_timer);
2557
Tejun Heo760141a2010-12-12 16:45:14 +01002558 cancel_work_sync(&adapter->reset_task);
2559 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002560
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002561#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002562 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002563 dev_info(&pdev->dev, "DCA disabled\n");
2564 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002565 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002566 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002567 }
2568#endif
2569
Auke Kok9d5c8242008-01-24 02:22:38 -08002570 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002571 * would have already happened in close and is redundant.
2572 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002573 igb_release_hw_control(adapter);
2574
2575 unregister_netdev(netdev);
2576
Alexander Duyck047e0032009-10-27 15:49:27 +00002577 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002578
Alexander Duyck37680112009-02-19 20:40:30 -08002579#ifdef CONFIG_PCI_IOV
Greg Rosefa44f2f2013-01-17 01:03:06 -08002580 igb_disable_sriov(pdev);
Alexander Duyck37680112009-02-19 20:40:30 -08002581#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002582
Alexander Duyck28b07592009-02-06 23:20:31 +00002583 iounmap(hw->hw_addr);
2584 if (hw->flash_address)
2585 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002586 pci_release_selected_regions(pdev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002587 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002588
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002589 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002590 free_netdev(netdev);
2591
Frans Pop19d5afd2009-10-02 10:04:12 -07002592 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002593
Auke Kok9d5c8242008-01-24 02:22:38 -08002594 pci_disable_device(pdev);
2595}
2596
2597/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002598 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2599 * @adapter: board private structure to initialize
Alexander Duycka6b623e2009-10-27 23:47:53 +00002600 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002601 * This function initializes the vf specific data storage and then attempts to
2602 * allocate the VFs. The reason for ordering it this way is because it is much
2603 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2604 * the memory for the VFs.
Alexander Duycka6b623e2009-10-27 23:47:53 +00002605 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002606static void igb_probe_vfs(struct igb_adapter *adapter)
Alexander Duycka6b623e2009-10-27 23:47:53 +00002607{
2608#ifdef CONFIG_PCI_IOV
2609 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002610 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002611
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002612 /* Virtualization features not supported on i210 family. */
2613 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2614 return;
2615
Greg Rosefa44f2f2013-01-17 01:03:06 -08002616 pci_sriov_set_totalvfs(pdev, 7);
Alex Williamsond5e51a12013-03-13 15:50:29 +00002617 igb_enable_sriov(pdev, max_vfs);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002618
Alexander Duycka6b623e2009-10-27 23:47:53 +00002619#endif /* CONFIG_PCI_IOV */
2620}
2621
Greg Rosefa44f2f2013-01-17 01:03:06 -08002622static void igb_init_queue_configuration(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002623{
2624 struct e1000_hw *hw = &adapter->hw;
Matthew Vick374a5422012-05-18 04:54:58 +00002625 u32 max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08002626
Matthew Vick374a5422012-05-18 04:54:58 +00002627 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002628 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002629 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00002630 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002631 break;
Matthew Vick374a5422012-05-18 04:54:58 +00002632 case e1000_82575:
2633 case e1000_i210:
2634 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2635 break;
2636 case e1000_i350:
2637 /* I350 cannot do RSS and SR-IOV at the same time */
2638 if (!!adapter->vfs_allocated_count) {
2639 max_rss_queues = 1;
2640 break;
2641 }
2642 /* fall through */
2643 case e1000_82576:
2644 if (!!adapter->vfs_allocated_count) {
2645 max_rss_queues = 2;
2646 break;
2647 }
2648 /* fall through */
2649 case e1000_82580:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002650 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002651 default:
Matthew Vick374a5422012-05-18 04:54:58 +00002652 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002653 break;
2654 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002655
Matthew Vick374a5422012-05-18 04:54:58 +00002656 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2657
2658 /* Determine if we need to pair queues. */
2659 switch (hw->mac.type) {
2660 case e1000_82575:
2661 case e1000_i211:
2662 /* Device supports enough interrupts without queue pairing. */
2663 break;
2664 case e1000_82576:
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002665 /* If VFs are going to be allocated with RSS queues then we
Matthew Vick374a5422012-05-18 04:54:58 +00002666 * should pair the queues in order to conserve interrupts due
2667 * to limited supply.
2668 */
2669 if ((adapter->rss_queues > 1) &&
2670 (adapter->vfs_allocated_count > 6))
2671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2672 /* fall through */
2673 case e1000_82580:
2674 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002675 case e1000_i354:
Matthew Vick374a5422012-05-18 04:54:58 +00002676 case e1000_i210:
2677 default:
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002678 /* If rss_queues > half of max_rss_queues, pair the queues in
Matthew Vick374a5422012-05-18 04:54:58 +00002679 * order to conserve interrupts due to limited supply.
2680 */
2681 if (adapter->rss_queues > (max_rss_queues / 2))
2682 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2683 break;
2684 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08002685}
2686
2687/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002688 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2689 * @adapter: board private structure to initialize
Greg Rosefa44f2f2013-01-17 01:03:06 -08002690 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002691 * igb_sw_init initializes the Adapter private data structure.
2692 * Fields are initialized based on PCI device information and
2693 * OS network device settings (MTU size).
Greg Rosefa44f2f2013-01-17 01:03:06 -08002694 **/
2695static int igb_sw_init(struct igb_adapter *adapter)
2696{
2697 struct e1000_hw *hw = &adapter->hw;
2698 struct net_device *netdev = adapter->netdev;
2699 struct pci_dev *pdev = adapter->pdev;
2700
2701 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2702
2703 /* set default ring sizes */
2704 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2705 adapter->rx_ring_count = IGB_DEFAULT_RXD;
2706
2707 /* set default ITR values */
2708 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2709 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2710
2711 /* set default work limits */
2712 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2713
2714 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2715 VLAN_HLEN;
2716 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2717
2718 spin_lock_init(&adapter->stats64_lock);
2719#ifdef CONFIG_PCI_IOV
2720 switch (hw->mac.type) {
2721 case e1000_82576:
2722 case e1000_i350:
2723 if (max_vfs > 7) {
2724 dev_warn(&pdev->dev,
2725 "Maximum of 7 VFs per PF, using max\n");
Alex Williamsond0f63ac2013-03-13 15:50:24 +00002726 max_vfs = adapter->vfs_allocated_count = 7;
Greg Rosefa44f2f2013-01-17 01:03:06 -08002727 } else
2728 adapter->vfs_allocated_count = max_vfs;
2729 if (adapter->vfs_allocated_count)
2730 dev_warn(&pdev->dev,
2731 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
2732 break;
2733 default:
2734 break;
2735 }
2736#endif /* CONFIG_PCI_IOV */
2737
2738 igb_init_queue_configuration(adapter);
Alexander Duycka99955f2009-11-12 18:37:19 +00002739
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002740 /* Setup and initialize a copy of the hw vlan table array */
Joe Perchesb2adaca2013-02-03 17:43:58 +00002741 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
2742 GFP_ATOMIC);
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002743
Alexander Duycka6b623e2009-10-27 23:47:53 +00002744 /* This call may decrease the number of queues */
Stefan Assmann53c7d062012-12-04 06:00:12 +00002745 if (igb_init_interrupt_scheme(adapter, true)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002746 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2747 return -ENOMEM;
2748 }
2749
Alexander Duycka6b623e2009-10-27 23:47:53 +00002750 igb_probe_vfs(adapter);
2751
Auke Kok9d5c8242008-01-24 02:22:38 -08002752 /* Explicitly disable IRQ since the NIC can be in any state. */
2753 igb_irq_disable(adapter);
2754
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002755 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002756 adapter->flags &= ~IGB_FLAG_DMAC;
2757
Auke Kok9d5c8242008-01-24 02:22:38 -08002758 set_bit(__IGB_DOWN, &adapter->state);
2759 return 0;
2760}
2761
2762/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002763 * igb_open - Called when a network interface is made active
2764 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002765 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002766 * Returns 0 on success, negative value on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002767 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002768 * The open entry point is called when a network interface is made
2769 * active by the system (IFF_UP). At this point all resources needed
2770 * for transmit and receive operations are allocated, the interrupt
2771 * handler is registered with the OS, the watchdog timer is started,
2772 * and the stack is notified that the interface is ready.
Auke Kok9d5c8242008-01-24 02:22:38 -08002773 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002774static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002775{
2776 struct igb_adapter *adapter = netdev_priv(netdev);
2777 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002778 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002779 int err;
2780 int i;
2781
2782 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002783 if (test_bit(__IGB_TESTING, &adapter->state)) {
2784 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002785 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002786 }
2787
2788 if (!resuming)
2789 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002790
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002791 netif_carrier_off(netdev);
2792
Auke Kok9d5c8242008-01-24 02:22:38 -08002793 /* allocate transmit descriptors */
2794 err = igb_setup_all_tx_resources(adapter);
2795 if (err)
2796 goto err_setup_tx;
2797
2798 /* allocate receive descriptors */
2799 err = igb_setup_all_rx_resources(adapter);
2800 if (err)
2801 goto err_setup_rx;
2802
Nick Nunley88a268c2010-02-17 01:01:59 +00002803 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002804
Auke Kok9d5c8242008-01-24 02:22:38 -08002805 /* before we allocate an interrupt, we must be ready to handle it.
2806 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2807 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002808 * clean_rx handler before we do so.
2809 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002810 igb_configure(adapter);
2811
2812 err = igb_request_irq(adapter);
2813 if (err)
2814 goto err_req_irq;
2815
Alexander Duyck0c2cc022012-09-25 00:31:22 +00002816 /* Notify the stack of the actual queue counts. */
2817 err = netif_set_real_num_tx_queues(adapter->netdev,
2818 adapter->num_tx_queues);
2819 if (err)
2820 goto err_set_queues;
2821
2822 err = netif_set_real_num_rx_queues(adapter->netdev,
2823 adapter->num_rx_queues);
2824 if (err)
2825 goto err_set_queues;
2826
Auke Kok9d5c8242008-01-24 02:22:38 -08002827 /* From here on the code is the same as igb_up() */
2828 clear_bit(__IGB_DOWN, &adapter->state);
2829
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002830 for (i = 0; i < adapter->num_q_vectors; i++)
2831 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002832
2833 /* Clear any pending interrupts. */
2834 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002835
2836 igb_irq_enable(adapter);
2837
Alexander Duyckd4960302009-10-27 15:53:45 +00002838 /* notify VFs that reset has been completed */
2839 if (adapter->vfs_allocated_count) {
2840 u32 reg_data = rd32(E1000_CTRL_EXT);
2841 reg_data |= E1000_CTRL_EXT_PFRSTD;
2842 wr32(E1000_CTRL_EXT, reg_data);
2843 }
2844
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002845 netif_tx_start_all_queues(netdev);
2846
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002847 if (!resuming)
2848 pm_runtime_put(&pdev->dev);
2849
Alexander Duyck25568a52009-10-27 23:49:59 +00002850 /* start the watchdog. */
2851 hw->mac.get_link_status = 1;
2852 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002853
2854 return 0;
2855
Alexander Duyck0c2cc022012-09-25 00:31:22 +00002856err_set_queues:
2857 igb_free_irq(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002858err_req_irq:
2859 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002860 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002861 igb_free_all_rx_resources(adapter);
2862err_setup_rx:
2863 igb_free_all_tx_resources(adapter);
2864err_setup_tx:
2865 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002866 if (!resuming)
2867 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002868
2869 return err;
2870}
2871
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002872static int igb_open(struct net_device *netdev)
2873{
2874 return __igb_open(netdev, false);
2875}
2876
Auke Kok9d5c8242008-01-24 02:22:38 -08002877/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002878 * igb_close - Disables a network interface
2879 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002880 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002881 * Returns 0, this is not allowed to fail
Auke Kok9d5c8242008-01-24 02:22:38 -08002882 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002883 * The close entry point is called when an interface is de-activated
2884 * by the OS. The hardware is still under the driver's control, but
2885 * needs to be disabled. A global MAC reset is issued to stop the
2886 * hardware, and all transmit and receive resources are freed.
Auke Kok9d5c8242008-01-24 02:22:38 -08002887 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002888static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002889{
2890 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002891 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002892
2893 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002894
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002895 if (!suspending)
2896 pm_runtime_get_sync(&pdev->dev);
2897
2898 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002899 igb_free_irq(adapter);
2900
2901 igb_free_all_tx_resources(adapter);
2902 igb_free_all_rx_resources(adapter);
2903
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002904 if (!suspending)
2905 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002906 return 0;
2907}
2908
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002909static int igb_close(struct net_device *netdev)
2910{
2911 return __igb_close(netdev, false);
2912}
2913
Auke Kok9d5c8242008-01-24 02:22:38 -08002914/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002915 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2916 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08002917 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002918 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002919 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002920int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002921{
Alexander Duyck59d71982010-04-27 13:09:25 +00002922 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002923 int size;
2924
Alexander Duyck06034642011-08-26 07:44:22 +00002925 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002926
2927 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002928 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002929 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002930
2931 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002932 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002933 tx_ring->size = ALIGN(tx_ring->size, 4096);
2934
Alexander Duyck5536d212012-09-25 00:31:17 +00002935 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2936 &tx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002937 if (!tx_ring->desc)
2938 goto err;
2939
Auke Kok9d5c8242008-01-24 02:22:38 -08002940 tx_ring->next_to_use = 0;
2941 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002942
Auke Kok9d5c8242008-01-24 02:22:38 -08002943 return 0;
2944
2945err:
Alexander Duyck06034642011-08-26 07:44:22 +00002946 vfree(tx_ring->tx_buffer_info);
Alexander Duyckf33005a2012-09-13 06:27:55 +00002947 tx_ring->tx_buffer_info = NULL;
2948 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002949 return -ENOMEM;
2950}
2951
2952/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002953 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2954 * (Descriptors) for all queues
2955 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002956 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002957 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002958 **/
2959static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2960{
Alexander Duyck439705e2009-10-27 23:49:20 +00002961 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002962 int i, err = 0;
2963
2964 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002965 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002966 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002967 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002968 "Allocation for Tx Queue %u failed\n", i);
2969 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002970 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002971 break;
2972 }
2973 }
2974
2975 return err;
2976}
2977
2978/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002979 * igb_setup_tctl - configure the transmit control registers
2980 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002981 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002982void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002983{
Auke Kok9d5c8242008-01-24 02:22:38 -08002984 struct e1000_hw *hw = &adapter->hw;
2985 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002986
Alexander Duyck85b430b2009-10-27 15:50:29 +00002987 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2988 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002989
2990 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002991 tctl = rd32(E1000_TCTL);
2992 tctl &= ~E1000_TCTL_CT;
2993 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2994 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2995
2996 igb_config_collision_dist(hw);
2997
Auke Kok9d5c8242008-01-24 02:22:38 -08002998 /* Enable transmits */
2999 tctl |= E1000_TCTL_EN;
3000
3001 wr32(E1000_TCTL, tctl);
3002}
3003
3004/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003005 * igb_configure_tx_ring - Configure transmit ring after Reset
3006 * @adapter: board private structure
3007 * @ring: tx ring to configure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003008 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003009 * Configure a transmit ring after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003010 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003011void igb_configure_tx_ring(struct igb_adapter *adapter,
3012 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003013{
3014 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00003015 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003016 u64 tdba = ring->dma;
3017 int reg_idx = ring->reg_idx;
3018
3019 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003020 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003021 wrfl();
3022 mdelay(10);
3023
3024 wr32(E1000_TDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003025 ring->count * sizeof(union e1000_adv_tx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00003026 wr32(E1000_TDBAL(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003027 tdba & 0x00000000ffffffffULL);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003028 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3029
Alexander Duyckfce99e32009-10-27 15:51:27 +00003030 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003031 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003032 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003033
3034 txdctl |= IGB_TX_PTHRESH;
3035 txdctl |= IGB_TX_HTHRESH << 8;
3036 txdctl |= IGB_TX_WTHRESH << 16;
3037
3038 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3039 wr32(E1000_TXDCTL(reg_idx), txdctl);
3040}
3041
3042/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003043 * igb_configure_tx - Configure transmit Unit after Reset
3044 * @adapter: board private structure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003045 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003046 * Configure the Tx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003047 **/
3048static void igb_configure_tx(struct igb_adapter *adapter)
3049{
3050 int i;
3051
3052 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003053 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003054}
3055
3056/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003057 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
3058 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08003059 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003060 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003061 **/
Alexander Duyck80785292009-10-27 15:51:47 +00003062int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003063{
Alexander Duyck59d71982010-04-27 13:09:25 +00003064 struct device *dev = rx_ring->dev;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003065 int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08003066
Alexander Duyck06034642011-08-26 07:44:22 +00003067 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003068
3069 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00003070 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003071 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003072
Auke Kok9d5c8242008-01-24 02:22:38 -08003073 /* Round up to nearest 4K */
Alexander Duyckf33005a2012-09-13 06:27:55 +00003074 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003075 rx_ring->size = ALIGN(rx_ring->size, 4096);
3076
Alexander Duyck5536d212012-09-25 00:31:17 +00003077 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3078 &rx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003079 if (!rx_ring->desc)
3080 goto err;
3081
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003082 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003083 rx_ring->next_to_clean = 0;
3084 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003085
Auke Kok9d5c8242008-01-24 02:22:38 -08003086 return 0;
3087
3088err:
Alexander Duyck06034642011-08-26 07:44:22 +00003089 vfree(rx_ring->rx_buffer_info);
3090 rx_ring->rx_buffer_info = NULL;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003091 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003092 return -ENOMEM;
3093}
3094
3095/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003096 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3097 * (Descriptors) for all queues
3098 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003099 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003100 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003101 **/
3102static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3103{
Alexander Duyck439705e2009-10-27 23:49:20 +00003104 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003105 int i, err = 0;
3106
3107 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003108 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003109 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003110 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003111 "Allocation for Rx Queue %u failed\n", i);
3112 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003113 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003114 break;
3115 }
3116 }
3117
3118 return err;
3119}
3120
3121/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003122 * igb_setup_mrqc - configure the multiple receive queue control registers
3123 * @adapter: Board private structure
Alexander Duyck06cf2662009-10-27 15:53:25 +00003124 **/
3125static void igb_setup_mrqc(struct igb_adapter *adapter)
3126{
3127 struct e1000_hw *hw = &adapter->hw;
3128 u32 mrqc, rxcsum;
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003129 u32 j, num_rx_queues, shift = 0;
Alexander Duycka57fe232012-09-13 06:28:16 +00003130 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3131 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3132 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
3133 0xFA01ACBE };
Alexander Duyck06cf2662009-10-27 15:53:25 +00003134
3135 /* Fill out hash function seeds */
Alexander Duycka57fe232012-09-13 06:28:16 +00003136 for (j = 0; j < 10; j++)
3137 wr32(E1000_RSSRK(j), rsskey[j]);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003138
Alexander Duycka99955f2009-11-12 18:37:19 +00003139 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003140
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003141 switch (hw->mac.type) {
3142 case e1000_82575:
3143 shift = 6;
3144 break;
3145 case e1000_82576:
3146 /* 82576 supports 2 RSS queues for SR-IOV */
3147 if (adapter->vfs_allocated_count) {
Alexander Duyck06cf2662009-10-27 15:53:25 +00003148 shift = 3;
3149 num_rx_queues = 2;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003150 }
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003151 break;
3152 default:
3153 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003154 }
3155
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003156 /* Populate the indirection table 4 entries at a time. To do this
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003157 * we are generating the results for n and n+2 and then interleaving
3158 * those with the results with n+1 and n+3.
3159 */
3160 for (j = 0; j < 32; j++) {
3161 /* first pass generates n and n+2 */
3162 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
3163 u32 reta = (base & 0x07800780) >> (7 - shift);
3164
3165 /* second pass generates n+1 and n+3 */
3166 base += 0x00010001 * num_rx_queues;
3167 reta |= (base & 0x07800780) << (1 + shift);
3168
3169 wr32(E1000_RETA(j), reta);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003170 }
3171
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003172 /* Disable raw packet checksumming so that RSS hash is placed in
Alexander Duyck06cf2662009-10-27 15:53:25 +00003173 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3174 * offloads as they are enabled by default
3175 */
3176 rxcsum = rd32(E1000_RXCSUM);
3177 rxcsum |= E1000_RXCSUM_PCSD;
3178
3179 if (adapter->hw.mac.type >= e1000_82576)
3180 /* Enable Receive Checksum Offload for SCTP */
3181 rxcsum |= E1000_RXCSUM_CRCOFL;
3182
3183 /* Don't need to set TUOFL or IPOFL, they default to 1 */
3184 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003185
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003186 /* Generate RSS hash based on packet types, TCP/UDP
3187 * port numbers and/or IPv4/v6 src and dst addresses
3188 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003189 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3190 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3191 E1000_MRQC_RSS_FIELD_IPV6 |
3192 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3193 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003194
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003195 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3196 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3197 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3198 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3199
Alexander Duyck06cf2662009-10-27 15:53:25 +00003200 /* If VMDq is enabled then we set the appropriate mode for that, else
3201 * we default to RSS so that an RSS hash is calculated per packet even
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003202 * if we are only using one queue
3203 */
Alexander Duyck06cf2662009-10-27 15:53:25 +00003204 if (adapter->vfs_allocated_count) {
3205 if (hw->mac.type > e1000_82575) {
3206 /* Set the default pool for the PF's first queue */
3207 u32 vtctl = rd32(E1000_VT_CTL);
3208 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3209 E1000_VT_CTL_DISABLE_DEF_POOL);
3210 vtctl |= adapter->vfs_allocated_count <<
3211 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3212 wr32(E1000_VT_CTL, vtctl);
3213 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003214 if (adapter->rss_queues > 1)
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003215 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003216 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003217 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003218 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003219 if (hw->mac.type != e1000_i211)
3220 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003221 }
3222 igb_vmm_control(adapter);
3223
Alexander Duyck06cf2662009-10-27 15:53:25 +00003224 wr32(E1000_MRQC, mrqc);
3225}
3226
3227/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003228 * igb_setup_rctl - configure the receive control registers
3229 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003230 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003231void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003232{
3233 struct e1000_hw *hw = &adapter->hw;
3234 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08003235
3236 rctl = rd32(E1000_RCTL);
3237
3238 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08003239 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003240
Alexander Duyck69d728b2008-11-25 01:04:03 -08003241 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00003242 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08003243
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003244 /* enable stripping of CRC. It's unlikely this will break BMC
Auke Kok87cb7e82008-07-08 15:08:29 -07003245 * redirection as it did with e1000. Newer features require
3246 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003247 */
Auke Kok87cb7e82008-07-08 15:08:29 -07003248 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08003249
Alexander Duyck559e9c42009-10-27 23:52:50 +00003250 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08003251 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08003252
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003253 /* enable LPE to prevent packets larger than max_frame_size */
3254 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08003255
Alexander Duyck952f72a2009-10-27 15:51:07 +00003256 /* disable queue 0 to prevent tail write w/o re-config */
3257 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003258
Alexander Duycke1739522009-02-19 20:39:44 -08003259 /* Attention!!! For SR-IOV PF driver operations you must enable
3260 * queue drop for all VF and PF queues to prevent head of line blocking
3261 * if an un-trusted VF does not provide descriptors to hardware.
3262 */
3263 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003264 /* set all queue drop enable bits */
3265 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003266 }
3267
Ben Greear89eaefb2012-03-06 09:41:58 +00003268 /* This is useful for sniffing bad packets. */
3269 if (adapter->netdev->features & NETIF_F_RXALL) {
3270 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003271 * in e1000e_set_rx_mode
3272 */
Ben Greear89eaefb2012-03-06 09:41:58 +00003273 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3274 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3275 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3276
3277 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3278 E1000_RCTL_DPF | /* Allow filtered pause */
3279 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3280 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3281 * and that breaks VLANs.
3282 */
3283 }
3284
Auke Kok9d5c8242008-01-24 02:22:38 -08003285 wr32(E1000_RCTL, rctl);
3286}
3287
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003288static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3289 int vfn)
3290{
3291 struct e1000_hw *hw = &adapter->hw;
3292 u32 vmolr;
3293
3294 /* if it isn't the PF check to see if VFs are enabled and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003295 * increase the size to support vlan tags
3296 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003297 if (vfn < adapter->vfs_allocated_count &&
3298 adapter->vf_data[vfn].vlans_enabled)
3299 size += VLAN_TAG_SIZE;
3300
3301 vmolr = rd32(E1000_VMOLR(vfn));
3302 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3303 vmolr |= size | E1000_VMOLR_LPE;
3304 wr32(E1000_VMOLR(vfn), vmolr);
3305
3306 return 0;
3307}
3308
Auke Kok9d5c8242008-01-24 02:22:38 -08003309/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003310 * igb_rlpml_set - set maximum receive packet size
3311 * @adapter: board private structure
Alexander Duycke1739522009-02-19 20:39:44 -08003312 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003313 * Configure maximum receivable packet size.
Alexander Duycke1739522009-02-19 20:39:44 -08003314 **/
3315static void igb_rlpml_set(struct igb_adapter *adapter)
3316{
Alexander Duyck153285f2011-08-26 07:43:32 +00003317 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003318 struct e1000_hw *hw = &adapter->hw;
3319 u16 pf_id = adapter->vfs_allocated_count;
3320
Alexander Duycke1739522009-02-19 20:39:44 -08003321 if (pf_id) {
3322 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003323 /* If we're in VMDQ or SR-IOV mode, then set global RLPML
Alexander Duyck153285f2011-08-26 07:43:32 +00003324 * to our max jumbo frame size, in case we need to enable
3325 * jumbo frames on one of the rings later.
3326 * This will not pass over-length frames into the default
3327 * queue because it's gated by the VMOLR.RLPML.
3328 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003329 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003330 }
3331
3332 wr32(E1000_RLPML, max_frame_size);
3333}
3334
Williams, Mitch A8151d292010-02-10 01:44:24 +00003335static inline void igb_set_vmolr(struct igb_adapter *adapter,
3336 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003337{
3338 struct e1000_hw *hw = &adapter->hw;
3339 u32 vmolr;
3340
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003341 /* This register exists only on 82576 and newer so if we are older then
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003342 * we should exit and do nothing
3343 */
3344 if (hw->mac.type < e1000_82576)
3345 return;
3346
3347 vmolr = rd32(E1000_VMOLR(vfn));
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003348 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003349 if (aupe)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003350 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003351 else
3352 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003353
3354 /* clear all bits that might not be set */
3355 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3356
Alexander Duycka99955f2009-11-12 18:37:19 +00003357 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003358 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003359 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003360 * multicast packets
3361 */
3362 if (vfn <= adapter->vfs_allocated_count)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003363 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003364
3365 wr32(E1000_VMOLR(vfn), vmolr);
3366}
3367
Alexander Duycke1739522009-02-19 20:39:44 -08003368/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003369 * igb_configure_rx_ring - Configure a receive ring after Reset
3370 * @adapter: board private structure
3371 * @ring: receive ring to be configured
Alexander Duyck85b430b2009-10-27 15:50:29 +00003372 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003373 * Configure the Rx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003374 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003375void igb_configure_rx_ring(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003376 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003377{
3378 struct e1000_hw *hw = &adapter->hw;
3379 u64 rdba = ring->dma;
3380 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003381 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003382
3383 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003384 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003385
3386 /* Set DMA base address registers */
3387 wr32(E1000_RDBAL(reg_idx),
3388 rdba & 0x00000000ffffffffULL);
3389 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3390 wr32(E1000_RDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003391 ring->count * sizeof(union e1000_adv_rx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00003392
3393 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003394 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003395 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003396 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003397
Alexander Duyck952f72a2009-10-27 15:51:07 +00003398 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003399 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyckde78d1f2012-09-25 00:31:12 +00003400 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003401 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
Alexander Duyck06218a82011-08-26 07:46:55 +00003402 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003403 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003404 /* Only set Drop Enable if we are supporting multiple queues */
3405 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3406 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003407
3408 wr32(E1000_SRRCTL(reg_idx), srrctl);
3409
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003410 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003411 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003412
Alexander Duyck85b430b2009-10-27 15:50:29 +00003413 rxdctl |= IGB_RX_PTHRESH;
3414 rxdctl |= IGB_RX_HTHRESH << 8;
3415 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003416
3417 /* enable receive descriptor fetching */
3418 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003419 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3420}
3421
3422/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003423 * igb_configure_rx - Configure receive Unit after Reset
3424 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003425 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003426 * Configure the Rx unit of the MAC after a reset.
Auke Kok9d5c8242008-01-24 02:22:38 -08003427 **/
3428static void igb_configure_rx(struct igb_adapter *adapter)
3429{
Hannes Eder91075842009-02-18 19:36:04 -08003430 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003431
Alexander Duyck68d480c2009-10-05 06:33:08 +00003432 /* set UTA to appropriate mode */
3433 igb_set_uta(adapter);
3434
Alexander Duyck26ad9172009-10-05 06:32:49 +00003435 /* set the correct pool for the PF default MAC address in entry 0 */
3436 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003437 adapter->vfs_allocated_count);
Alexander Duyck26ad9172009-10-05 06:32:49 +00003438
Alexander Duyck06cf2662009-10-27 15:53:25 +00003439 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003440 * the Base and Length of the Rx Descriptor Ring
3441 */
Alexander Duyckf9d40f62013-04-17 20:41:04 +00003442 for (i = 0; i < adapter->num_rx_queues; i++)
3443 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003444}
3445
3446/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003447 * igb_free_tx_resources - Free Tx Resources per Queue
3448 * @tx_ring: Tx descriptor ring for a specific queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003449 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003450 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003451 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003452void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003453{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003454 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003455
Alexander Duyck06034642011-08-26 07:44:22 +00003456 vfree(tx_ring->tx_buffer_info);
3457 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003458
Alexander Duyck439705e2009-10-27 23:49:20 +00003459 /* if not set, then don't free */
3460 if (!tx_ring->desc)
3461 return;
3462
Alexander Duyck59d71982010-04-27 13:09:25 +00003463 dma_free_coherent(tx_ring->dev, tx_ring->size,
3464 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003465
3466 tx_ring->desc = NULL;
3467}
3468
3469/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003470 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3471 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003472 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003473 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003474 **/
3475static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3476{
3477 int i;
3478
3479 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003480 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003481}
3482
Alexander Duyckebe42d12011-08-26 07:45:09 +00003483void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3484 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003485{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003486 if (tx_buffer->skb) {
3487 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003488 if (dma_unmap_len(tx_buffer, len))
Alexander Duyckebe42d12011-08-26 07:45:09 +00003489 dma_unmap_single(ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003490 dma_unmap_addr(tx_buffer, dma),
3491 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00003492 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003493 } else if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00003494 dma_unmap_page(ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003495 dma_unmap_addr(tx_buffer, dma),
3496 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00003497 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003498 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003499 tx_buffer->next_to_watch = NULL;
3500 tx_buffer->skb = NULL;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003501 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00003502 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003503}
3504
3505/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003506 * igb_clean_tx_ring - Free Tx Buffers
3507 * @tx_ring: ring to be cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08003508 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003509static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003510{
Alexander Duyck06034642011-08-26 07:44:22 +00003511 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003512 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003513 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003514
Alexander Duyck06034642011-08-26 07:44:22 +00003515 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003516 return;
3517 /* Free all the Tx ring sk_buffs */
3518
3519 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003520 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003521 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003522 }
3523
John Fastabenddad8a3b2012-04-23 12:22:39 +00003524 netdev_tx_reset_queue(txring_txq(tx_ring));
3525
Alexander Duyck06034642011-08-26 07:44:22 +00003526 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3527 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003528
3529 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003530 memset(tx_ring->desc, 0, tx_ring->size);
3531
3532 tx_ring->next_to_use = 0;
3533 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003534}
3535
3536/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003537 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3538 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003539 **/
3540static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3541{
3542 int i;
3543
3544 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003545 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003546}
3547
3548/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003549 * igb_free_rx_resources - Free Rx Resources
3550 * @rx_ring: ring to clean the resources from
Auke Kok9d5c8242008-01-24 02:22:38 -08003551 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003552 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003553 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003554void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003555{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003556 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003557
Alexander Duyck06034642011-08-26 07:44:22 +00003558 vfree(rx_ring->rx_buffer_info);
3559 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003560
Alexander Duyck439705e2009-10-27 23:49:20 +00003561 /* if not set, then don't free */
3562 if (!rx_ring->desc)
3563 return;
3564
Alexander Duyck59d71982010-04-27 13:09:25 +00003565 dma_free_coherent(rx_ring->dev, rx_ring->size,
3566 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003567
3568 rx_ring->desc = NULL;
3569}
3570
3571/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003572 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3573 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003574 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003575 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003576 **/
3577static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3578{
3579 int i;
3580
3581 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003582 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003583}
3584
3585/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003586 * igb_clean_rx_ring - Free Rx Buffers per Queue
3587 * @rx_ring: ring to free buffers from
Auke Kok9d5c8242008-01-24 02:22:38 -08003588 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003589static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003590{
Auke Kok9d5c8242008-01-24 02:22:38 -08003591 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003592 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003593
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003594 if (rx_ring->skb)
3595 dev_kfree_skb(rx_ring->skb);
3596 rx_ring->skb = NULL;
3597
Alexander Duyck06034642011-08-26 07:44:22 +00003598 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003599 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003600
Auke Kok9d5c8242008-01-24 02:22:38 -08003601 /* Free all the Rx ring sk_buffs */
3602 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003603 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003604
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003605 if (!buffer_info->page)
3606 continue;
3607
3608 dma_unmap_page(rx_ring->dev,
3609 buffer_info->dma,
3610 PAGE_SIZE,
3611 DMA_FROM_DEVICE);
3612 __free_page(buffer_info->page);
3613
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003614 buffer_info->page = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003615 }
3616
Alexander Duyck06034642011-08-26 07:44:22 +00003617 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3618 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003619
3620 /* Zero out the descriptor ring */
3621 memset(rx_ring->desc, 0, rx_ring->size);
3622
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003623 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003624 rx_ring->next_to_clean = 0;
3625 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003626}
3627
3628/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003629 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3630 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003631 **/
3632static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3633{
3634 int i;
3635
3636 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003637 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003638}
3639
3640/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003641 * igb_set_mac - Change the Ethernet Address of the NIC
3642 * @netdev: network interface device structure
3643 * @p: pointer to an address structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003644 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003645 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003646 **/
3647static int igb_set_mac(struct net_device *netdev, void *p)
3648{
3649 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003650 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003651 struct sockaddr *addr = p;
3652
3653 if (!is_valid_ether_addr(addr->sa_data))
3654 return -EADDRNOTAVAIL;
3655
3656 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003657 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003658
Alexander Duyck26ad9172009-10-05 06:32:49 +00003659 /* set the correct pool for the new PF MAC address in entry 0 */
3660 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003661 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003662
Auke Kok9d5c8242008-01-24 02:22:38 -08003663 return 0;
3664}
3665
3666/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003667 * igb_write_mc_addr_list - write multicast addresses to MTA
3668 * @netdev: network interface device structure
Alexander Duyck68d480c2009-10-05 06:33:08 +00003669 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003670 * Writes multicast address list to the MTA hash table.
3671 * Returns: -ENOMEM on failure
3672 * 0 on no addresses written
3673 * X on writing X addresses to MTA
Alexander Duyck68d480c2009-10-05 06:33:08 +00003674 **/
3675static int igb_write_mc_addr_list(struct net_device *netdev)
3676{
3677 struct igb_adapter *adapter = netdev_priv(netdev);
3678 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003679 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003680 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003681 int i;
3682
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003683 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003684 /* nothing to program, so clear mc list */
3685 igb_update_mc_addr_list(hw, NULL, 0);
3686 igb_restore_vf_multicasts(adapter);
3687 return 0;
3688 }
3689
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003690 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003691 if (!mta_list)
3692 return -ENOMEM;
3693
Alexander Duyck68d480c2009-10-05 06:33:08 +00003694 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003695 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003696 netdev_for_each_mc_addr(ha, netdev)
3697 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003698
Alexander Duyck68d480c2009-10-05 06:33:08 +00003699 igb_update_mc_addr_list(hw, mta_list, i);
3700 kfree(mta_list);
3701
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003702 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003703}
3704
3705/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003706 * igb_write_uc_addr_list - write unicast addresses to RAR table
3707 * @netdev: network interface device structure
Alexander Duyck68d480c2009-10-05 06:33:08 +00003708 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003709 * Writes unicast address list to the RAR table.
3710 * Returns: -ENOMEM on failure/insufficient address space
3711 * 0 on no addresses written
3712 * X on writing X addresses to the RAR table
Alexander Duyck68d480c2009-10-05 06:33:08 +00003713 **/
3714static int igb_write_uc_addr_list(struct net_device *netdev)
3715{
3716 struct igb_adapter *adapter = netdev_priv(netdev);
3717 struct e1000_hw *hw = &adapter->hw;
3718 unsigned int vfn = adapter->vfs_allocated_count;
3719 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3720 int count = 0;
3721
3722 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003723 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003724 return -ENOMEM;
3725
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003726 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003727 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003728
3729 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003730 if (!rar_entries)
3731 break;
3732 igb_rar_set_qsel(adapter, ha->addr,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003733 rar_entries--,
3734 vfn);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003735 count++;
3736 }
3737 }
3738 /* write the addresses in reverse order to avoid write combining */
3739 for (; rar_entries > 0 ; rar_entries--) {
3740 wr32(E1000_RAH(rar_entries), 0);
3741 wr32(E1000_RAL(rar_entries), 0);
3742 }
3743 wrfl();
3744
3745 return count;
3746}
3747
3748/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003749 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3750 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003751 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003752 * The set_rx_mode entry point is called whenever the unicast or multicast
3753 * address lists or the network interface flags are updated. This routine is
3754 * responsible for configuring the hardware for proper unicast, multicast,
3755 * promiscuous mode, and all-multi behavior.
Auke Kok9d5c8242008-01-24 02:22:38 -08003756 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003757static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003758{
3759 struct igb_adapter *adapter = netdev_priv(netdev);
3760 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003761 unsigned int vfn = adapter->vfs_allocated_count;
3762 u32 rctl, vmolr = 0;
3763 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003764
3765 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003766 rctl = rd32(E1000_RCTL);
3767
Alexander Duyck68d480c2009-10-05 06:33:08 +00003768 /* clear the effected bits */
3769 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3770
Patrick McHardy746b9f02008-07-16 20:15:45 -07003771 if (netdev->flags & IFF_PROMISC) {
Greg Rose6f3dc3192013-03-26 06:19:41 +00003772 /* retain VLAN HW filtering if in VT mode */
Emil Tantilov7e448922013-07-26 05:46:36 -07003773 if (adapter->vfs_allocated_count)
Greg Rose6f3dc3192013-03-26 06:19:41 +00003774 rctl |= E1000_RCTL_VFE;
Auke Kok9d5c8242008-01-24 02:22:38 -08003775 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003776 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003777 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003778 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003779 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003780 vmolr |= E1000_VMOLR_MPME;
3781 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003782 /* Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003783 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003784 * that we can at least receive multicast traffic
3785 */
3786 count = igb_write_mc_addr_list(netdev);
3787 if (count < 0) {
3788 rctl |= E1000_RCTL_MPE;
3789 vmolr |= E1000_VMOLR_MPME;
3790 } else if (count) {
3791 vmolr |= E1000_VMOLR_ROMPE;
3792 }
3793 }
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003794 /* Write addresses to available RAR registers, if there is not
Alexander Duyck68d480c2009-10-05 06:33:08 +00003795 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003796 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003797 */
3798 count = igb_write_uc_addr_list(netdev);
3799 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003800 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003801 vmolr |= E1000_VMOLR_ROPE;
3802 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003803 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003804 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003805 wr32(E1000_RCTL, rctl);
3806
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003807 /* In order to support SR-IOV and eventually VMDq it is necessary to set
Alexander Duyck68d480c2009-10-05 06:33:08 +00003808 * the VMOLR to enable the appropriate modes. Without this workaround
3809 * we will have issues with VLAN tag stripping not being done for frames
3810 * that are only arriving because we are the default pool
3811 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003812 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003813 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003814
Alexander Duyck68d480c2009-10-05 06:33:08 +00003815 vmolr |= rd32(E1000_VMOLR(vfn)) &
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003816 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003817 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003818 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003819}
3820
Greg Rose13800462010-11-06 02:08:26 +00003821static void igb_check_wvbr(struct igb_adapter *adapter)
3822{
3823 struct e1000_hw *hw = &adapter->hw;
3824 u32 wvbr = 0;
3825
3826 switch (hw->mac.type) {
3827 case e1000_82576:
3828 case e1000_i350:
3829 if (!(wvbr = rd32(E1000_WVBR)))
3830 return;
3831 break;
3832 default:
3833 break;
3834 }
3835
3836 adapter->wvbr |= wvbr;
3837}
3838
3839#define IGB_STAGGERED_QUEUE_OFFSET 8
3840
3841static void igb_spoof_check(struct igb_adapter *adapter)
3842{
3843 int j;
3844
3845 if (!adapter->wvbr)
3846 return;
3847
3848 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3849 if (adapter->wvbr & (1 << j) ||
3850 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3851 dev_warn(&adapter->pdev->dev,
3852 "Spoof event(s) detected on VF %d\n", j);
3853 adapter->wvbr &=
3854 ~((1 << j) |
3855 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3856 }
3857 }
3858}
3859
Auke Kok9d5c8242008-01-24 02:22:38 -08003860/* Need to wait a few seconds after link up to get diagnostic information from
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003861 * the phy
3862 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003863static void igb_update_phy_info(unsigned long data)
3864{
3865 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003866 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003867}
3868
3869/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003870 * igb_has_link - check shared code for link and determine up/down
3871 * @adapter: pointer to driver private info
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003872 **/
Nick Nunley31455352010-02-17 01:01:21 +00003873bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003874{
3875 struct e1000_hw *hw = &adapter->hw;
3876 bool link_active = false;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003877
3878 /* get_link_status is set on LSC (link status) interrupt or
3879 * rx sequence error interrupt. get_link_status will stay
3880 * false until the e1000_check_for_link establishes link
3881 * for copper adapters ONLY
3882 */
3883 switch (hw->phy.media_type) {
3884 case e1000_media_type_copper:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00003885 if (!hw->mac.get_link_status)
3886 return true;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003887 case e1000_media_type_internal_serdes:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00003888 hw->mac.ops.check_for_link(hw);
3889 link_active = !hw->mac.get_link_status;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003890 break;
3891 default:
3892 case e1000_media_type_unknown:
3893 break;
3894 }
3895
3896 return link_active;
3897}
3898
Stefan Assmann563988d2011-04-05 04:27:15 +00003899static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3900{
3901 bool ret = false;
3902 u32 ctrl_ext, thstat;
3903
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003904 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00003905 if (hw->mac.type == e1000_i350) {
3906 thstat = rd32(E1000_THSTAT);
3907 ctrl_ext = rd32(E1000_CTRL_EXT);
3908
3909 if ((hw->phy.media_type == e1000_media_type_copper) &&
Akeem G. Abodunrin5c17a202013-01-29 10:15:31 +00003910 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
Stefan Assmann563988d2011-04-05 04:27:15 +00003911 ret = !!(thstat & event);
Stefan Assmann563988d2011-04-05 04:27:15 +00003912 }
3913
3914 return ret;
3915}
3916
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003917/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003918 * igb_watchdog - Timer Call-back
3919 * @data: pointer to adapter cast into an unsigned long
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 **/
3921static void igb_watchdog(unsigned long data)
3922{
3923 struct igb_adapter *adapter = (struct igb_adapter *)data;
3924 /* Do the rest outside of interrupt context */
3925 schedule_work(&adapter->watchdog_task);
3926}
3927
3928static void igb_watchdog_task(struct work_struct *work)
3929{
3930 struct igb_adapter *adapter = container_of(work,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003931 struct igb_adapter,
3932 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 struct e1000_hw *hw = &adapter->hw;
Koki Sanagic0ba4772013-01-16 11:05:53 +00003934 struct e1000_phy_info *phy = &hw->phy;
Auke Kok9d5c8242008-01-24 02:22:38 -08003935 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003936 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003937 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003938
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003939 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003940 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003941 /* Cancel scheduled suspend requests. */
3942 pm_runtime_resume(netdev->dev.parent);
3943
Auke Kok9d5c8242008-01-24 02:22:38 -08003944 if (!netif_carrier_ok(netdev)) {
3945 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003946 hw->mac.ops.get_speed_and_duplex(hw,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003947 &adapter->link_speed,
3948 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003949
3950 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003951 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003952 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3953 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003954 netdev->name,
3955 adapter->link_speed,
3956 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003957 "Full" : "Half",
3958 (ctrl & E1000_CTRL_TFCE) &&
3959 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3960 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3961 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003962
Koki Sanagic0ba4772013-01-16 11:05:53 +00003963 /* check if SmartSpeed worked */
3964 igb_check_downshift(hw);
3965 if (phy->speed_downgraded)
3966 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
3967
Stefan Assmann563988d2011-04-05 04:27:15 +00003968 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003969 if (igb_thermal_sensor_event(hw,
3970 E1000_THSTAT_LINK_THROTTLE)) {
3971 netdev_info(netdev, "The network adapter link "
3972 "speed was downshifted because it "
3973 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003974 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003975
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003976 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003977 adapter->tx_timeout_factor = 1;
3978 switch (adapter->link_speed) {
3979 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003980 adapter->tx_timeout_factor = 14;
3981 break;
3982 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003983 /* maybe add some timeout factor ? */
3984 break;
3985 }
3986
3987 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003988
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003989 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003990 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003991
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003992 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003993 if (!test_bit(__IGB_DOWN, &adapter->state))
3994 mod_timer(&adapter->phy_info_timer,
3995 round_jiffies(jiffies + 2 * HZ));
3996 }
3997 } else {
3998 if (netif_carrier_ok(netdev)) {
3999 adapter->link_speed = 0;
4000 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00004001
4002 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00004003 if (igb_thermal_sensor_event(hw,
4004 E1000_THSTAT_PWR_DOWN)) {
4005 netdev_err(netdev, "The network adapter was "
4006 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00004007 }
Stefan Assmann563988d2011-04-05 04:27:15 +00004008
Alexander Duyck527d47c2008-11-27 00:21:39 -08004009 /* Links status message must follow this format */
4010 printk(KERN_INFO "igb: %s NIC Link is Down\n",
4011 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08004012 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00004013
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004014 igb_ping_all_vfs(adapter);
4015
Alexander Duyck4b1a9872009-02-06 23:19:50 +00004016 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08004017 if (!test_bit(__IGB_DOWN, &adapter->state))
4018 mod_timer(&adapter->phy_info_timer,
4019 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00004020
4021 pm_schedule_suspend(netdev->dev.parent,
4022 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08004023 }
4024 }
4025
Eric Dumazet12dcd862010-10-15 17:27:10 +00004026 spin_lock(&adapter->stats64_lock);
4027 igb_update_stats(adapter, &adapter->stats64);
4028 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08004029
Alexander Duyckdbabb062009-11-12 18:38:16 +00004030 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004031 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00004032 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004033 /* We've lost link, so the controller stops DMA,
4034 * but we've got queued Tx work that's never going
4035 * to get done, so reset controller to flush Tx.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004036 * (Do the reset outside of interrupt context).
4037 */
Alexander Duyckdbabb062009-11-12 18:38:16 +00004038 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
4039 adapter->tx_timeout_count++;
4040 schedule_work(&adapter->reset_task);
4041 /* return immediately since reset is imminent */
4042 return;
4043 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004044 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004045
Alexander Duyckdbabb062009-11-12 18:38:16 +00004046 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00004047 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00004048 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004049
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004050 /* Cause software interrupt to ensure Rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07004051 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00004052 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00004053 for (i = 0; i < adapter->num_q_vectors; i++)
4054 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07004055 wr32(E1000_EICS, eics);
4056 } else {
4057 wr32(E1000_ICS, E1000_ICS_RXDMT0);
4058 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004059
Greg Rose13800462010-11-06 02:08:26 +00004060 igb_spoof_check(adapter);
Matthew Vickfc580752012-12-13 07:20:35 +00004061 igb_ptp_rx_hang(adapter);
Greg Rose13800462010-11-06 02:08:26 +00004062
Auke Kok9d5c8242008-01-24 02:22:38 -08004063 /* Reset the timer */
4064 if (!test_bit(__IGB_DOWN, &adapter->state))
4065 mod_timer(&adapter->watchdog_timer,
4066 round_jiffies(jiffies + 2 * HZ));
4067}
4068
4069enum latency_range {
4070 lowest_latency = 0,
4071 low_latency = 1,
4072 bulk_latency = 2,
4073 latency_invalid = 255
4074};
4075
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004076/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004077 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4078 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004079 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004080 * Stores a new ITR value based on strictly on packet size. This
4081 * algorithm is less sophisticated than that used in igb_update_itr,
4082 * due to the difficulty of synchronizing statistics across multiple
4083 * receive rings. The divisors and thresholds used by this function
4084 * were determined based on theoretical maximum wire speed and testing
4085 * data, in order to minimize response time while increasing bulk
4086 * throughput.
4087 * This functionality is controlled by the InterruptThrottleRate module
4088 * parameter (see igb_param.c)
4089 * NOTE: This function is called only when operating in a multiqueue
4090 * receive environment.
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004091 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004092static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004093{
Alexander Duyck047e0032009-10-27 15:49:27 +00004094 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004095 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00004096 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004097 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004098
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004099 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4100 * ints/sec - ITR timer value of 120 ticks.
4101 */
4102 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004103 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004104 goto set_itr_val;
4105 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004106
Alexander Duyck0ba82992011-08-26 07:45:47 +00004107 packets = q_vector->rx.total_packets;
4108 if (packets)
4109 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004110
Alexander Duyck0ba82992011-08-26 07:45:47 +00004111 packets = q_vector->tx.total_packets;
4112 if (packets)
4113 avg_wire_size = max_t(u32, avg_wire_size,
4114 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00004115
4116 /* if avg_wire_size isn't set no work was done */
4117 if (!avg_wire_size)
4118 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004119
4120 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4121 avg_wire_size += 24;
4122
4123 /* Don't starve jumbo frames */
4124 avg_wire_size = min(avg_wire_size, 3000);
4125
4126 /* Give a little boost to mid-size frames */
4127 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
4128 new_val = avg_wire_size / 3;
4129 else
4130 new_val = avg_wire_size / 2;
4131
Alexander Duyck0ba82992011-08-26 07:45:47 +00004132 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4133 if (new_val < IGB_20K_ITR &&
4134 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4135 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4136 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00004137
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004138set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00004139 if (new_val != q_vector->itr_val) {
4140 q_vector->itr_val = new_val;
4141 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004142 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004143clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004144 q_vector->rx.total_bytes = 0;
4145 q_vector->rx.total_packets = 0;
4146 q_vector->tx.total_bytes = 0;
4147 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004148}
4149
4150/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004151 * igb_update_itr - update the dynamic ITR value based on statistics
4152 * @q_vector: pointer to q_vector
4153 * @ring_container: ring info to update the itr for
4154 *
4155 * Stores a new ITR value based on packets and byte
4156 * counts during the last interrupt. The advantage of per interrupt
4157 * computation is faster updates and more accurate ITR for the current
4158 * traffic pattern. Constants in this function were computed
4159 * based on theoretical maximum wire speed and thresholds were set based
4160 * on testing data as well as attempting to minimize response time
4161 * while increasing bulk throughput.
4162 * this functionality is controlled by the InterruptThrottleRate module
4163 * parameter (see igb_param.c)
4164 * NOTE: These calculations are only valid when operating in a single-
4165 * queue environment.
Auke Kok9d5c8242008-01-24 02:22:38 -08004166 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00004167static void igb_update_itr(struct igb_q_vector *q_vector,
4168 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08004169{
Alexander Duyck0ba82992011-08-26 07:45:47 +00004170 unsigned int packets = ring_container->total_packets;
4171 unsigned int bytes = ring_container->total_bytes;
4172 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08004173
Alexander Duyck0ba82992011-08-26 07:45:47 +00004174 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08004175 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004176 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08004177
Alexander Duyck0ba82992011-08-26 07:45:47 +00004178 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004179 case lowest_latency:
4180 /* handle TSO and jumbo frames */
4181 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004182 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004183 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00004184 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004185 break;
4186 case low_latency: /* 50 usec aka 20000 ints/s */
4187 if (bytes > 10000) {
4188 /* this if handles the TSO accounting */
4189 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004190 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004191 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004192 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004193 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004194 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004195 }
4196 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004197 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004198 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004199 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004200 }
4201 break;
4202 case bulk_latency: /* 250 usec aka 4000 ints/s */
4203 if (bytes > 25000) {
4204 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004205 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00004206 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004207 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004208 }
4209 break;
4210 }
4211
Alexander Duyck0ba82992011-08-26 07:45:47 +00004212 /* clear work counters since we have the values we need */
4213 ring_container->total_bytes = 0;
4214 ring_container->total_packets = 0;
4215
4216 /* write updated itr to ring container */
4217 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08004218}
4219
Alexander Duyck0ba82992011-08-26 07:45:47 +00004220static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004221{
Alexander Duyck0ba82992011-08-26 07:45:47 +00004222 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004223 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004224 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004225
4226 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4227 if (adapter->link_speed != SPEED_1000) {
4228 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004229 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08004230 goto set_itr_now;
4231 }
4232
Alexander Duyck0ba82992011-08-26 07:45:47 +00004233 igb_update_itr(q_vector, &q_vector->tx);
4234 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004235
Alexander Duyck0ba82992011-08-26 07:45:47 +00004236 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08004237
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004238 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00004239 if (current_itr == lowest_latency &&
4240 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4241 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004242 current_itr = low_latency;
4243
Auke Kok9d5c8242008-01-24 02:22:38 -08004244 switch (current_itr) {
4245 /* counts and packets in update_itr are dependent on these numbers */
4246 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004247 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004248 break;
4249 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004250 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004251 break;
4252 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004253 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004254 break;
4255 default:
4256 break;
4257 }
4258
4259set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00004260 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004261 /* this attempts to bias the interrupt rate towards Bulk
4262 * by adding intermediate steps when interrupt rate is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004263 * increasing
4264 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004265 new_itr = new_itr > q_vector->itr_val ?
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004266 max((new_itr * q_vector->itr_val) /
4267 (new_itr + (q_vector->itr_val >> 2)),
4268 new_itr) : new_itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08004269 /* Don't write the value here; it resets the adapter's
4270 * internal timer, and causes us to delay far longer than
4271 * we should between interrupts. Instead, we write the ITR
4272 * value at the beginning of the next interrupt so the timing
4273 * ends up being correct.
4274 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004275 q_vector->itr_val = new_itr;
4276 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004277 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004278}
4279
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00004280static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4281 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004282{
4283 struct e1000_adv_tx_context_desc *context_desc;
4284 u16 i = tx_ring->next_to_use;
4285
4286 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4287
4288 i++;
4289 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4290
4291 /* set bits to identify this as an advanced context descriptor */
4292 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4293
4294 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004295 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004296 mss_l4len_idx |= tx_ring->reg_idx << 4;
4297
4298 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4299 context_desc->seqnum_seed = 0;
4300 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4301 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4302}
4303
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004304static int igb_tso(struct igb_ring *tx_ring,
4305 struct igb_tx_buffer *first,
4306 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004307{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004308 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004309 u32 vlan_macip_lens, type_tucmd;
4310 u32 mss_l4len_idx, l4len;
4311
Alexander Duycked6aa102012-11-13 04:03:22 +00004312 if (skb->ip_summed != CHECKSUM_PARTIAL)
4313 return 0;
4314
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004315 if (!skb_is_gso(skb))
4316 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004317
4318 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004319 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004320 if (err)
4321 return err;
4322 }
4323
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004324 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4325 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004326
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004327 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004328 struct iphdr *iph = ip_hdr(skb);
4329 iph->tot_len = 0;
4330 iph->check = 0;
4331 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4332 iph->daddr, 0,
4333 IPPROTO_TCP,
4334 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004335 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004336 first->tx_flags |= IGB_TX_FLAGS_TSO |
4337 IGB_TX_FLAGS_CSUM |
4338 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004339 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004340 ipv6_hdr(skb)->payload_len = 0;
4341 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4342 &ipv6_hdr(skb)->daddr,
4343 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004344 first->tx_flags |= IGB_TX_FLAGS_TSO |
4345 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004346 }
4347
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004348 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004349 l4len = tcp_hdrlen(skb);
4350 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004351
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004352 /* update gso size and bytecount with header size */
4353 first->gso_segs = skb_shinfo(skb)->gso_segs;
4354 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4355
Auke Kok9d5c8242008-01-24 02:22:38 -08004356 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004357 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4358 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004359
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004360 /* VLAN MACLEN IPLEN */
4361 vlan_macip_lens = skb_network_header_len(skb);
4362 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004363 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004364
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004365 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004366
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004367 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004368}
4369
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004370static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004371{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004372 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004373 u32 vlan_macip_lens = 0;
4374 u32 mss_l4len_idx = 0;
4375 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004376
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004377 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004378 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4379 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004380 } else {
4381 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004382 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004383 case __constant_htons(ETH_P_IP):
4384 vlan_macip_lens |= skb_network_header_len(skb);
4385 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4386 l4_hdr = ip_hdr(skb)->protocol;
4387 break;
4388 case __constant_htons(ETH_P_IPV6):
4389 vlan_macip_lens |= skb_network_header_len(skb);
4390 l4_hdr = ipv6_hdr(skb)->nexthdr;
4391 break;
4392 default:
4393 if (unlikely(net_ratelimit())) {
4394 dev_warn(tx_ring->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004395 "partial checksum but proto=%x!\n",
4396 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004397 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004398 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004399 }
4400
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004401 switch (l4_hdr) {
4402 case IPPROTO_TCP:
4403 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4404 mss_l4len_idx = tcp_hdrlen(skb) <<
4405 E1000_ADVTXD_L4LEN_SHIFT;
4406 break;
4407 case IPPROTO_SCTP:
4408 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4409 mss_l4len_idx = sizeof(struct sctphdr) <<
4410 E1000_ADVTXD_L4LEN_SHIFT;
4411 break;
4412 case IPPROTO_UDP:
4413 mss_l4len_idx = sizeof(struct udphdr) <<
4414 E1000_ADVTXD_L4LEN_SHIFT;
4415 break;
4416 default:
4417 if (unlikely(net_ratelimit())) {
4418 dev_warn(tx_ring->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004419 "partial checksum but l4 proto=%x!\n",
4420 l4_hdr);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004421 }
4422 break;
4423 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004424
4425 /* update TX checksum flag */
4426 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004427 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004428
4429 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004430 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004431
4432 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004433}
4434
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004435#define IGB_SET_FLAG(_input, _flag, _result) \
4436 ((_flag <= _result) ? \
4437 ((u32)(_input & _flag) * (_result / _flag)) : \
4438 ((u32)(_input & _flag) / (_flag / _result)))
4439
4440static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
Alexander Duycke032afc2011-08-26 07:44:48 +00004441{
4442 /* set type for advanced descriptor with frame checksum insertion */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004443 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
4444 E1000_ADVTXD_DCMD_DEXT |
4445 E1000_ADVTXD_DCMD_IFCS;
Alexander Duycke032afc2011-08-26 07:44:48 +00004446
4447 /* set HW vlan bit if vlan is present */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004448 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
4449 (E1000_ADVTXD_DCMD_VLE));
Alexander Duycke032afc2011-08-26 07:44:48 +00004450
4451 /* set segmentation bits for TSO */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004452 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
4453 (E1000_ADVTXD_DCMD_TSE));
4454
4455 /* set timestamp bit if present */
4456 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
4457 (E1000_ADVTXD_MAC_TSTAMP));
4458
4459 /* insert frame checksum */
4460 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
Alexander Duycke032afc2011-08-26 07:44:48 +00004461
4462 return cmd_type;
4463}
4464
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004465static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4466 union e1000_adv_tx_desc *tx_desc,
4467 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004468{
4469 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4470
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004471 /* 82575 requires a unique index per ring */
4472 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004473 olinfo_status |= tx_ring->reg_idx << 4;
4474
4475 /* insert L4 checksum */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004476 olinfo_status |= IGB_SET_FLAG(tx_flags,
4477 IGB_TX_FLAGS_CSUM,
4478 (E1000_TXD_POPTS_TXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00004479
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004480 /* insert IPv4 checksum */
4481 olinfo_status |= IGB_SET_FLAG(tx_flags,
4482 IGB_TX_FLAGS_IPV4,
4483 (E1000_TXD_POPTS_IXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00004484
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004485 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004486}
4487
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004488static void igb_tx_map(struct igb_ring *tx_ring,
4489 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004490 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004491{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004492 struct sk_buff *skb = first->skb;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004493 struct igb_tx_buffer *tx_buffer;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004494 union e1000_adv_tx_desc *tx_desc;
Alexander Duyck80d07592012-11-13 04:03:24 +00004495 struct skb_frag_struct *frag;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004496 dma_addr_t dma;
Alexander Duyck80d07592012-11-13 04:03:24 +00004497 unsigned int data_len, size;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004498 u32 tx_flags = first->tx_flags;
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004499 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004500 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004501
4502 tx_desc = IGB_TX_DESC(tx_ring, i);
4503
Alexander Duyck80d07592012-11-13 04:03:24 +00004504 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
4505
4506 size = skb_headlen(skb);
4507 data_len = skb->data_len;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004508
4509 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004510
Alexander Duyck80d07592012-11-13 04:03:24 +00004511 tx_buffer = first;
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004512
Alexander Duyck80d07592012-11-13 04:03:24 +00004513 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
4514 if (dma_mapping_error(tx_ring->dev, dma))
4515 goto dma_error;
4516
4517 /* record length, and DMA address */
4518 dma_unmap_len_set(tx_buffer, len, size);
4519 dma_unmap_addr_set(tx_buffer, dma, dma);
4520
4521 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4522
Alexander Duyckebe42d12011-08-26 07:45:09 +00004523 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4524 tx_desc->read.cmd_type_len =
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004525 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004526
Alexander Duyckebe42d12011-08-26 07:45:09 +00004527 i++;
4528 tx_desc++;
4529 if (i == tx_ring->count) {
4530 tx_desc = IGB_TX_DESC(tx_ring, 0);
4531 i = 0;
4532 }
Alexander Duyck80d07592012-11-13 04:03:24 +00004533 tx_desc->read.olinfo_status = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004534
4535 dma += IGB_MAX_DATA_PER_TXD;
4536 size -= IGB_MAX_DATA_PER_TXD;
4537
Alexander Duyckebe42d12011-08-26 07:45:09 +00004538 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4539 }
4540
4541 if (likely(!data_len))
4542 break;
4543
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004544 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004545
Alexander Duyck65689fe2009-03-20 00:17:43 +00004546 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004547 tx_desc++;
4548 if (i == tx_ring->count) {
4549 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004550 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004551 }
Alexander Duyck80d07592012-11-13 04:03:24 +00004552 tx_desc->read.olinfo_status = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004553
Eric Dumazet9e903e02011-10-18 21:00:24 +00004554 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004555 data_len -= size;
4556
4557 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
Alexander Duyck80d07592012-11-13 04:03:24 +00004558 size, DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004559
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004560 tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004561 }
4562
Alexander Duyckebe42d12011-08-26 07:45:09 +00004563 /* write last descriptor with RS and EOP bits */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004564 cmd_type |= size | IGB_TXD_DCMD;
4565 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
Alexander Duyck8542db02011-08-26 07:44:43 +00004566
Alexander Duyck80d07592012-11-13 04:03:24 +00004567 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4568
Alexander Duyck8542db02011-08-26 07:44:43 +00004569 /* set the timestamp */
4570 first->time_stamp = jiffies;
4571
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004572 /* Force memory writes to complete before letting h/w know there
Alexander Duyckebe42d12011-08-26 07:45:09 +00004573 * are new descriptors to fetch. (Only applicable for weak-ordered
4574 * memory model archs, such as IA-64).
4575 *
4576 * We also need this memory barrier to make certain all of the
4577 * status bits have been updated before next_to_watch is written.
4578 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004579 wmb();
4580
Alexander Duyckebe42d12011-08-26 07:45:09 +00004581 /* set next_to_watch value indicating a packet is present */
4582 first->next_to_watch = tx_desc;
4583
4584 i++;
4585 if (i == tx_ring->count)
4586 i = 0;
4587
Auke Kok9d5c8242008-01-24 02:22:38 -08004588 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004589
Alexander Duyckfce99e32009-10-27 15:51:27 +00004590 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004591
Auke Kok9d5c8242008-01-24 02:22:38 -08004592 /* we need this if more than one processor can write to our tail
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004593 * at a time, it synchronizes IO on IA64/Altix systems
4594 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004595 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004596
4597 return;
4598
4599dma_error:
4600 dev_err(tx_ring->dev, "TX DMA map failed\n");
4601
4602 /* clear dma mappings for failed tx_buffer_info map */
4603 for (;;) {
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004604 tx_buffer = &tx_ring->tx_buffer_info[i];
4605 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4606 if (tx_buffer == first)
Alexander Duyckebe42d12011-08-26 07:45:09 +00004607 break;
4608 if (i == 0)
4609 i = tx_ring->count;
4610 i--;
4611 }
4612
4613 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004614}
4615
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004616static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004617{
Alexander Duycke694e962009-10-27 15:53:06 +00004618 struct net_device *netdev = tx_ring->netdev;
4619
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004620 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004621
Auke Kok9d5c8242008-01-24 02:22:38 -08004622 /* Herbert's original patch had:
4623 * smp_mb__after_netif_stop_queue();
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004624 * but since that doesn't exist yet, just open code it.
4625 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004626 smp_mb();
4627
4628 /* We need to check again in a case another CPU has just
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004629 * made room available.
4630 */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004631 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004632 return -EBUSY;
4633
4634 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004635 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004636
4637 u64_stats_update_begin(&tx_ring->tx_syncp2);
4638 tx_ring->tx_stats.restart_queue2++;
4639 u64_stats_update_end(&tx_ring->tx_syncp2);
4640
Auke Kok9d5c8242008-01-24 02:22:38 -08004641 return 0;
4642}
4643
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004644static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004645{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004646 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004647 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004648 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004649}
4650
Alexander Duyckcd392f52011-08-26 07:43:59 +00004651netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4652 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004653{
Alexander Duyck8542db02011-08-26 07:44:43 +00004654 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004655 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004656 u32 tx_flags = 0;
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004657 u16 count = TXD_USE_COUNT(skb_headlen(skb));
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004658 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004659 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004660
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004661 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
4662 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
Auke Kok9d5c8242008-01-24 02:22:38 -08004663 * + 2 desc gap to keep tail from touching head,
Auke Kok9d5c8242008-01-24 02:22:38 -08004664 * + 1 desc for context descriptor,
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004665 * otherwise try next time
4666 */
4667 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4668 unsigned short f;
4669 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4670 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4671 } else {
4672 count += skb_shinfo(skb)->nr_frags;
4673 }
4674
4675 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004676 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004677 return NETDEV_TX_BUSY;
4678 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004679
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004680 /* record the location of the first descriptor for this packet */
4681 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4682 first->skb = skb;
4683 first->bytecount = skb->len;
4684 first->gso_segs = 1;
4685
Matthew Vickb66e2392012-12-13 07:20:33 +00004686 skb_tx_timestamp(skb);
4687
Alexander Duyckb646c222013-02-07 08:55:46 +00004688 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4689 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Matthew Vick1f6e8172012-08-18 07:26:33 +00004690
Alexander Duyckb646c222013-02-07 08:55:46 +00004691 if (!(adapter->ptp_tx_skb)) {
4692 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4693 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4694
4695 adapter->ptp_tx_skb = skb_get(skb);
4696 adapter->ptp_tx_start = jiffies;
4697 if (adapter->hw.mac.type == e1000_82576)
4698 schedule_work(&adapter->ptp_tx_work);
4699 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004700 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004701
Jesse Grosseab6d182010-10-20 13:56:03 +00004702 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004703 tx_flags |= IGB_TX_FLAGS_VLAN;
4704 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4705 }
4706
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004707 /* record initial flags and protocol */
4708 first->tx_flags = tx_flags;
4709 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004710
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004711 tso = igb_tso(tx_ring, first, &hdr_len);
4712 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004713 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004714 else if (!tso)
4715 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004716
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004717 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004718
4719 /* Make sure there is space in the ring for the next send. */
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004720 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004721
Auke Kok9d5c8242008-01-24 02:22:38 -08004722 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004723
4724out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004725 igb_unmap_and_free_tx_resource(tx_ring, first);
4726
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004727 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004728}
4729
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004730static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4731 struct sk_buff *skb)
4732{
4733 unsigned int r_idx = skb->queue_mapping;
4734
4735 if (r_idx >= adapter->num_tx_queues)
4736 r_idx = r_idx % adapter->num_tx_queues;
4737
4738 return adapter->tx_ring[r_idx];
4739}
4740
Alexander Duyckcd392f52011-08-26 07:43:59 +00004741static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4742 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004743{
4744 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004745
4746 if (test_bit(__IGB_DOWN, &adapter->state)) {
4747 dev_kfree_skb_any(skb);
4748 return NETDEV_TX_OK;
4749 }
4750
4751 if (skb->len <= 0) {
4752 dev_kfree_skb_any(skb);
4753 return NETDEV_TX_OK;
4754 }
4755
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004756 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004757 * in order to meet this minimum size requirement.
4758 */
Tushar Daveea5ceea2012-09-14 03:43:43 +00004759 if (unlikely(skb->len < 17)) {
4760 if (skb_pad(skb, 17 - skb->len))
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004761 return NETDEV_TX_OK;
4762 skb->len = 17;
Tushar Daveea5ceea2012-09-14 03:43:43 +00004763 skb_set_tail_pointer(skb, 17);
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004764 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004765
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004766 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004767}
4768
4769/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004770 * igb_tx_timeout - Respond to a Tx Hang
4771 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004772 **/
4773static void igb_tx_timeout(struct net_device *netdev)
4774{
4775 struct igb_adapter *adapter = netdev_priv(netdev);
4776 struct e1000_hw *hw = &adapter->hw;
4777
4778 /* Do the reset outside of interrupt context */
4779 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004780
Alexander Duyck06218a82011-08-26 07:46:55 +00004781 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004782 hw->dev_spec._82575.global_device_reset = true;
4783
Auke Kok9d5c8242008-01-24 02:22:38 -08004784 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004785 wr32(E1000_EICS,
4786 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004787}
4788
4789static void igb_reset_task(struct work_struct *work)
4790{
4791 struct igb_adapter *adapter;
4792 adapter = container_of(work, struct igb_adapter, reset_task);
4793
Taku Izumic97ec422010-04-27 14:39:30 +00004794 igb_dump(adapter);
4795 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004796 igb_reinit_locked(adapter);
4797}
4798
4799/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004800 * igb_get_stats64 - Get System Network Statistics
4801 * @netdev: network interface device structure
4802 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004803 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004804static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004805 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004806{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004807 struct igb_adapter *adapter = netdev_priv(netdev);
4808
4809 spin_lock(&adapter->stats64_lock);
4810 igb_update_stats(adapter, &adapter->stats64);
4811 memcpy(stats, &adapter->stats64, sizeof(*stats));
4812 spin_unlock(&adapter->stats64_lock);
4813
4814 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004815}
4816
4817/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004818 * igb_change_mtu - Change the Maximum Transfer Unit
4819 * @netdev: network interface device structure
4820 * @new_mtu: new value for maximum frame size
Auke Kok9d5c8242008-01-24 02:22:38 -08004821 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004822 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08004823 **/
4824static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4825{
4826 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004827 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004828 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004829
Alexander Duyckc809d222009-10-27 23:52:13 +00004830 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004831 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004832 return -EINVAL;
4833 }
4834
Alexander Duyck153285f2011-08-26 07:43:32 +00004835#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004836 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004837 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004838 return -EINVAL;
4839 }
4840
Alexander Duyck2ccd9942013-07-16 00:20:34 +00004841 /* adjust max frame to be at least the size of a standard frame */
4842 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4843 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4844
Auke Kok9d5c8242008-01-24 02:22:38 -08004845 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4846 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004847
Auke Kok9d5c8242008-01-24 02:22:38 -08004848 /* igb_down has a dependency on max_frame_size */
4849 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004850
Alexander Duyck4c844852009-10-27 15:52:07 +00004851 if (netif_running(netdev))
4852 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004853
Alexander Duyck090b1792009-10-27 23:51:55 +00004854 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004855 netdev->mtu, new_mtu);
4856 netdev->mtu = new_mtu;
4857
4858 if (netif_running(netdev))
4859 igb_up(adapter);
4860 else
4861 igb_reset(adapter);
4862
4863 clear_bit(__IGB_RESETTING, &adapter->state);
4864
4865 return 0;
4866}
4867
4868/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004869 * igb_update_stats - Update the board statistics counters
4870 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004871 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004872void igb_update_stats(struct igb_adapter *adapter,
4873 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004874{
4875 struct e1000_hw *hw = &adapter->hw;
4876 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004877 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004878 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004879 int i;
4880 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004881 unsigned int start;
4882 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004883
4884#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4885
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004886 /* Prevent stats update while adapter is being reset, or if the pci
Auke Kok9d5c8242008-01-24 02:22:38 -08004887 * connection is down.
4888 */
4889 if (adapter->link_speed == 0)
4890 return;
4891 if (pci_channel_offline(pdev))
4892 return;
4893
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004894 bytes = 0;
4895 packets = 0;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00004896
4897 rcu_read_lock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004898 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004899 u32 rqdpc = rd32(E1000_RQDPC(i));
Alexander Duyck3025a442010-02-17 01:02:39 +00004900 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004901
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004902 if (rqdpc) {
4903 ring->rx_stats.drops += rqdpc;
4904 net_stats->rx_fifo_errors += rqdpc;
4905 }
Eric Dumazet12dcd862010-10-15 17:27:10 +00004906
4907 do {
4908 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4909 _bytes = ring->rx_stats.bytes;
4910 _packets = ring->rx_stats.packets;
4911 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4912 bytes += _bytes;
4913 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004914 }
4915
Alexander Duyck128e45e2009-11-12 18:37:38 +00004916 net_stats->rx_bytes = bytes;
4917 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004918
4919 bytes = 0;
4920 packets = 0;
4921 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004922 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004923 do {
4924 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4925 _bytes = ring->tx_stats.bytes;
4926 _packets = ring->tx_stats.packets;
4927 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4928 bytes += _bytes;
4929 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004930 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004931 net_stats->tx_bytes = bytes;
4932 net_stats->tx_packets = packets;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00004933 rcu_read_unlock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004934
4935 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004936 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4937 adapter->stats.gprc += rd32(E1000_GPRC);
4938 adapter->stats.gorc += rd32(E1000_GORCL);
4939 rd32(E1000_GORCH); /* clear GORCL */
4940 adapter->stats.bprc += rd32(E1000_BPRC);
4941 adapter->stats.mprc += rd32(E1000_MPRC);
4942 adapter->stats.roc += rd32(E1000_ROC);
4943
4944 adapter->stats.prc64 += rd32(E1000_PRC64);
4945 adapter->stats.prc127 += rd32(E1000_PRC127);
4946 adapter->stats.prc255 += rd32(E1000_PRC255);
4947 adapter->stats.prc511 += rd32(E1000_PRC511);
4948 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4949 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4950 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4951 adapter->stats.sec += rd32(E1000_SEC);
4952
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004953 mpc = rd32(E1000_MPC);
4954 adapter->stats.mpc += mpc;
4955 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004956 adapter->stats.scc += rd32(E1000_SCC);
4957 adapter->stats.ecol += rd32(E1000_ECOL);
4958 adapter->stats.mcc += rd32(E1000_MCC);
4959 adapter->stats.latecol += rd32(E1000_LATECOL);
4960 adapter->stats.dc += rd32(E1000_DC);
4961 adapter->stats.rlec += rd32(E1000_RLEC);
4962 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4963 adapter->stats.xontxc += rd32(E1000_XONTXC);
4964 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4965 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4966 adapter->stats.fcruc += rd32(E1000_FCRUC);
4967 adapter->stats.gptc += rd32(E1000_GPTC);
4968 adapter->stats.gotc += rd32(E1000_GOTCL);
4969 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004970 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004971 adapter->stats.ruc += rd32(E1000_RUC);
4972 adapter->stats.rfc += rd32(E1000_RFC);
4973 adapter->stats.rjc += rd32(E1000_RJC);
4974 adapter->stats.tor += rd32(E1000_TORH);
4975 adapter->stats.tot += rd32(E1000_TOTH);
4976 adapter->stats.tpr += rd32(E1000_TPR);
4977
4978 adapter->stats.ptc64 += rd32(E1000_PTC64);
4979 adapter->stats.ptc127 += rd32(E1000_PTC127);
4980 adapter->stats.ptc255 += rd32(E1000_PTC255);
4981 adapter->stats.ptc511 += rd32(E1000_PTC511);
4982 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4983 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4984
4985 adapter->stats.mptc += rd32(E1000_MPTC);
4986 adapter->stats.bptc += rd32(E1000_BPTC);
4987
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004988 adapter->stats.tpt += rd32(E1000_TPT);
4989 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004990
4991 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004992 /* read internal phy specific stats */
4993 reg = rd32(E1000_CTRL_EXT);
4994 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4995 adapter->stats.rxerrc += rd32(E1000_RXERRC);
Carolyn Wyborny3dbdf962012-09-12 04:36:24 +00004996
4997 /* this stat has invalid values on i210/i211 */
4998 if ((hw->mac.type != e1000_i210) &&
4999 (hw->mac.type != e1000_i211))
5000 adapter->stats.tncrs += rd32(E1000_TNCRS);
Nick Nunley43915c7c2010-02-17 01:03:58 +00005001 }
5002
Auke Kok9d5c8242008-01-24 02:22:38 -08005003 adapter->stats.tsctc += rd32(E1000_TSCTC);
5004 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
5005
5006 adapter->stats.iac += rd32(E1000_IAC);
5007 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
5008 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
5009 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
5010 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
5011 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
5012 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
5013 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
5014 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
5015
5016 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00005017 net_stats->multicast = adapter->stats.mprc;
5018 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005019
5020 /* Rx Errors */
5021
5022 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005023 * our own version based on RUC and ROC
5024 */
Alexander Duyck128e45e2009-11-12 18:37:38 +00005025 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08005026 adapter->stats.crcerrs + adapter->stats.algnerrc +
5027 adapter->stats.ruc + adapter->stats.roc +
5028 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00005029 net_stats->rx_length_errors = adapter->stats.ruc +
5030 adapter->stats.roc;
5031 net_stats->rx_crc_errors = adapter->stats.crcerrs;
5032 net_stats->rx_frame_errors = adapter->stats.algnerrc;
5033 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005034
5035 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00005036 net_stats->tx_errors = adapter->stats.ecol +
5037 adapter->stats.latecol;
5038 net_stats->tx_aborted_errors = adapter->stats.ecol;
5039 net_stats->tx_window_errors = adapter->stats.latecol;
5040 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08005041
5042 /* Tx Dropped needs to be maintained elsewhere */
5043
5044 /* Phy Stats */
5045 if (hw->phy.media_type == e1000_media_type_copper) {
5046 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005047 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005048 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
5049 adapter->phy_stats.idle_errors += phy_tmp;
5050 }
5051 }
5052
5053 /* Management Stats */
5054 adapter->stats.mgptc += rd32(E1000_MGTPTC);
5055 adapter->stats.mgprc += rd32(E1000_MGTPRC);
5056 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00005057
5058 /* OS2BMC Stats */
5059 reg = rd32(E1000_MANC);
5060 if (reg & E1000_MANC_EN_BMC2OS) {
5061 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
5062 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
5063 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
5064 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
5065 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005066}
5067
Auke Kok9d5c8242008-01-24 02:22:38 -08005068static irqreturn_t igb_msix_other(int irq, void *data)
5069{
Alexander Duyck047e0032009-10-27 15:49:27 +00005070 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08005071 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005072 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005073 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00005074
Alexander Duyck7f081d42010-01-07 17:41:00 +00005075 if (icr & E1000_ICR_DRSTA)
5076 schedule_work(&adapter->reset_task);
5077
Alexander Duyck047e0032009-10-27 15:49:27 +00005078 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005079 /* HW is reporting DMA is out of sync */
5080 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00005081 /* The DMA Out of Sync is also indication of a spoof event
5082 * in IOV mode. Check the Wrong VM Behavior register to
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005083 * see if it is really a spoof event.
5084 */
Greg Rose13800462010-11-06 02:08:26 +00005085 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00005086 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005087
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005088 /* Check for a mailbox event */
5089 if (icr & E1000_ICR_VMMB)
5090 igb_msg_task(adapter);
5091
5092 if (icr & E1000_ICR_LSC) {
5093 hw->mac.get_link_status = 1;
5094 /* guard against interrupt when we're going down */
5095 if (!test_bit(__IGB_DOWN, &adapter->state))
5096 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5097 }
5098
Matthew Vick1f6e8172012-08-18 07:26:33 +00005099 if (icr & E1000_ICR_TS) {
5100 u32 tsicr = rd32(E1000_TSICR);
5101
5102 if (tsicr & E1000_TSICR_TXTS) {
5103 /* acknowledge the interrupt */
5104 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5105 /* retrieve hardware timestamp */
5106 schedule_work(&adapter->ptp_tx_work);
5107 }
5108 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005109
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005110 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08005111
5112 return IRQ_HANDLED;
5113}
5114
Alexander Duyck047e0032009-10-27 15:49:27 +00005115static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005116{
Alexander Duyck26b39272010-02-17 01:00:41 +00005117 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00005118 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08005119
Alexander Duyck047e0032009-10-27 15:49:27 +00005120 if (!q_vector->set_itr)
5121 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005122
Alexander Duyck047e0032009-10-27 15:49:27 +00005123 if (!itr_val)
5124 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005125
Alexander Duyck26b39272010-02-17 01:00:41 +00005126 if (adapter->hw.mac.type == e1000_82575)
5127 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005128 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00005129 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00005130
5131 writel(itr_val, q_vector->itr_register);
5132 q_vector->set_itr = 0;
5133}
5134
5135static irqreturn_t igb_msix_ring(int irq, void *data)
5136{
5137 struct igb_q_vector *q_vector = data;
5138
5139 /* Write the ITR value calculated from the previous interrupt. */
5140 igb_write_itr(q_vector);
5141
5142 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005143
Auke Kok9d5c8242008-01-24 02:22:38 -08005144 return IRQ_HANDLED;
5145}
5146
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005147#ifdef CONFIG_IGB_DCA
Alexander Duyck6a050042012-09-25 00:31:27 +00005148static void igb_update_tx_dca(struct igb_adapter *adapter,
5149 struct igb_ring *tx_ring,
5150 int cpu)
5151{
5152 struct e1000_hw *hw = &adapter->hw;
5153 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
5154
5155 if (hw->mac.type != e1000_82575)
5156 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5157
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005158 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00005159 * DCA is enabled. This is due to a known issue in some chipsets
5160 * which will cause the DCA tag to be cleared.
5161 */
5162 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
5163 E1000_DCA_TXCTRL_DATA_RRO_EN |
5164 E1000_DCA_TXCTRL_DESC_DCA_EN;
5165
5166 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
5167}
5168
5169static void igb_update_rx_dca(struct igb_adapter *adapter,
5170 struct igb_ring *rx_ring,
5171 int cpu)
5172{
5173 struct e1000_hw *hw = &adapter->hw;
5174 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
5175
5176 if (hw->mac.type != e1000_82575)
5177 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5178
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005179 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00005180 * DCA is enabled. This is due to a known issue in some chipsets
5181 * which will cause the DCA tag to be cleared.
5182 */
5183 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
5184 E1000_DCA_RXCTRL_DESC_DCA_EN;
5185
5186 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
5187}
5188
Alexander Duyck047e0032009-10-27 15:49:27 +00005189static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005190{
Alexander Duyck047e0032009-10-27 15:49:27 +00005191 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005192 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005193
Alexander Duyck047e0032009-10-27 15:49:27 +00005194 if (q_vector->cpu == cpu)
5195 goto out_no_update;
5196
Alexander Duyck6a050042012-09-25 00:31:27 +00005197 if (q_vector->tx.ring)
5198 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
5199
5200 if (q_vector->rx.ring)
5201 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
5202
Alexander Duyck047e0032009-10-27 15:49:27 +00005203 q_vector->cpu = cpu;
5204out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005205 put_cpu();
5206}
5207
5208static void igb_setup_dca(struct igb_adapter *adapter)
5209{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00005210 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005211 int i;
5212
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005213 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005214 return;
5215
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00005216 /* Always use CB2 mode, difference is masked in the CB driver. */
5217 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
5218
Alexander Duyck047e0032009-10-27 15:49:27 +00005219 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00005220 adapter->q_vector[i]->cpu = -1;
5221 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005222 }
5223}
5224
5225static int __igb_notify_dca(struct device *dev, void *data)
5226{
5227 struct net_device *netdev = dev_get_drvdata(dev);
5228 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005229 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005230 struct e1000_hw *hw = &adapter->hw;
5231 unsigned long event = *(unsigned long *)data;
5232
5233 switch (event) {
5234 case DCA_PROVIDER_ADD:
5235 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005236 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005237 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005238 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08005239 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00005240 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005241 igb_setup_dca(adapter);
5242 break;
5243 }
5244 /* Fall Through since DCA is disabled. */
5245 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005246 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005247 /* without this a class_device is left
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005248 * hanging around in the sysfs model
5249 */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005250 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005251 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005252 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08005253 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005254 }
5255 break;
5256 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08005257
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005258 return 0;
5259}
5260
5261static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005262 void *p)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005263{
5264 int ret_val;
5265
5266 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005267 __igb_notify_dca);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005268
5269 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5270}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005271#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08005272
Greg Rose0224d662011-10-14 02:57:14 +00005273#ifdef CONFIG_PCI_IOV
5274static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5275{
5276 unsigned char mac_addr[ETH_ALEN];
Greg Rose0224d662011-10-14 02:57:14 +00005277
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005278 eth_zero_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00005279 igb_set_vf_mac(adapter, vf, mac_addr);
5280
Lior Levy70ea4782013-03-03 20:27:48 +00005281 /* By default spoof check is enabled for all VFs */
5282 adapter->vf_data[vf].spoofchk_enabled = true;
5283
Stefan Assmannf5571472012-08-18 04:06:11 +00005284 return 0;
Greg Rose0224d662011-10-14 02:57:14 +00005285}
5286
Greg Rose0224d662011-10-14 02:57:14 +00005287#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005288static void igb_ping_all_vfs(struct igb_adapter *adapter)
5289{
5290 struct e1000_hw *hw = &adapter->hw;
5291 u32 ping;
5292 int i;
5293
5294 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5295 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005296 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005297 ping |= E1000_VT_MSGTYPE_CTS;
5298 igb_write_mbx(hw, &ping, 1, i);
5299 }
5300}
5301
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005302static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5303{
5304 struct e1000_hw *hw = &adapter->hw;
5305 u32 vmolr = rd32(E1000_VMOLR(vf));
5306 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5307
Alexander Duyckd85b90042010-09-22 17:56:20 +00005308 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005309 IGB_VF_FLAG_MULTI_PROMISC);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005310 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5311
5312 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5313 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005314 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005315 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5316 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005317 /* if we have hashes and we are clearing a multicast promisc
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005318 * flag we need to write the hashes to the MTA as this step
5319 * was previously skipped
5320 */
5321 if (vf_data->num_vf_mc_hashes > 30) {
5322 vmolr |= E1000_VMOLR_MPME;
5323 } else if (vf_data->num_vf_mc_hashes) {
5324 int j;
5325 vmolr |= E1000_VMOLR_ROMPE;
5326 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5327 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5328 }
5329 }
5330
5331 wr32(E1000_VMOLR(vf), vmolr);
5332
5333 /* there are flags left unprocessed, likely not supported */
5334 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5335 return -EINVAL;
5336
5337 return 0;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005338}
5339
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005340static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5341 u32 *msgbuf, u32 vf)
5342{
5343 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5344 u16 *hash_list = (u16 *)&msgbuf[1];
5345 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5346 int i;
5347
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005348 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005349 * to this VF for later use to restore when the PF multi cast
5350 * list changes
5351 */
5352 vf_data->num_vf_mc_hashes = n;
5353
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005354 /* only up to 30 hash values supported */
5355 if (n > 30)
5356 n = 30;
5357
5358 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005359 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005360 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005361
5362 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005363 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005364
5365 return 0;
5366}
5367
5368static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5369{
5370 struct e1000_hw *hw = &adapter->hw;
5371 struct vf_data_storage *vf_data;
5372 int i, j;
5373
5374 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005375 u32 vmolr = rd32(E1000_VMOLR(i));
5376 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5377
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005378 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005379
5380 if ((vf_data->num_vf_mc_hashes > 30) ||
5381 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5382 vmolr |= E1000_VMOLR_MPME;
5383 } else if (vf_data->num_vf_mc_hashes) {
5384 vmolr |= E1000_VMOLR_ROMPE;
5385 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5386 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5387 }
5388 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389 }
5390}
5391
5392static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5393{
5394 struct e1000_hw *hw = &adapter->hw;
5395 u32 pool_mask, reg, vid;
5396 int i;
5397
5398 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5399
5400 /* Find the vlan filter for this id */
5401 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5402 reg = rd32(E1000_VLVF(i));
5403
5404 /* remove the vf from the pool */
5405 reg &= ~pool_mask;
5406
5407 /* if pool is empty then remove entry from vfta */
5408 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5409 (reg & E1000_VLVF_VLANID_ENABLE)) {
5410 reg = 0;
5411 vid = reg & E1000_VLVF_VLANID_MASK;
5412 igb_vfta_set(hw, vid, false);
5413 }
5414
5415 wr32(E1000_VLVF(i), reg);
5416 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005417
5418 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005419}
5420
5421static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5422{
5423 struct e1000_hw *hw = &adapter->hw;
5424 u32 reg, i;
5425
Alexander Duyck51466232009-10-27 23:47:35 +00005426 /* The vlvf table only exists on 82576 hardware and newer */
5427 if (hw->mac.type < e1000_82576)
5428 return -1;
5429
5430 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005431 if (!adapter->vfs_allocated_count)
5432 return -1;
5433
5434 /* Find the vlan filter for this id */
5435 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5436 reg = rd32(E1000_VLVF(i));
5437 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5438 vid == (reg & E1000_VLVF_VLANID_MASK))
5439 break;
5440 }
5441
5442 if (add) {
5443 if (i == E1000_VLVF_ARRAY_SIZE) {
5444 /* Did not find a matching VLAN ID entry that was
5445 * enabled. Search for a free filter entry, i.e.
5446 * one without the enable bit set
5447 */
5448 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5449 reg = rd32(E1000_VLVF(i));
5450 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5451 break;
5452 }
5453 }
5454 if (i < E1000_VLVF_ARRAY_SIZE) {
5455 /* Found an enabled/available entry */
5456 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5457
5458 /* if !enabled we need to set this up in vfta */
5459 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005460 /* add VID to filter table */
5461 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005462 reg |= E1000_VLVF_VLANID_ENABLE;
5463 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005464 reg &= ~E1000_VLVF_VLANID_MASK;
5465 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005466 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005467
5468 /* do not modify RLPML for PF devices */
5469 if (vf >= adapter->vfs_allocated_count)
5470 return 0;
5471
5472 if (!adapter->vf_data[vf].vlans_enabled) {
5473 u32 size;
5474 reg = rd32(E1000_VMOLR(vf));
5475 size = reg & E1000_VMOLR_RLPML_MASK;
5476 size += 4;
5477 reg &= ~E1000_VMOLR_RLPML_MASK;
5478 reg |= size;
5479 wr32(E1000_VMOLR(vf), reg);
5480 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005481
Alexander Duyck51466232009-10-27 23:47:35 +00005482 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005483 }
5484 } else {
5485 if (i < E1000_VLVF_ARRAY_SIZE) {
5486 /* remove vf from the pool */
5487 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5488 /* if pool is empty then remove entry from vfta */
5489 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5490 reg = 0;
5491 igb_vfta_set(hw, vid, false);
5492 }
5493 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005494
5495 /* do not modify RLPML for PF devices */
5496 if (vf >= adapter->vfs_allocated_count)
5497 return 0;
5498
5499 adapter->vf_data[vf].vlans_enabled--;
5500 if (!adapter->vf_data[vf].vlans_enabled) {
5501 u32 size;
5502 reg = rd32(E1000_VMOLR(vf));
5503 size = reg & E1000_VMOLR_RLPML_MASK;
5504 size -= 4;
5505 reg &= ~E1000_VMOLR_RLPML_MASK;
5506 reg |= size;
5507 wr32(E1000_VMOLR(vf), reg);
5508 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005509 }
5510 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005511 return 0;
5512}
5513
5514static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5515{
5516 struct e1000_hw *hw = &adapter->hw;
5517
5518 if (vid)
5519 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5520 else
5521 wr32(E1000_VMVIR(vf), 0);
5522}
5523
5524static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5525 int vf, u16 vlan, u8 qos)
5526{
5527 int err = 0;
5528 struct igb_adapter *adapter = netdev_priv(netdev);
5529
5530 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5531 return -EINVAL;
5532 if (vlan || qos) {
5533 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5534 if (err)
5535 goto out;
5536 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5537 igb_set_vmolr(adapter, vf, !vlan);
5538 adapter->vf_data[vf].pf_vlan = vlan;
5539 adapter->vf_data[vf].pf_qos = qos;
5540 dev_info(&adapter->pdev->dev,
5541 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5542 if (test_bit(__IGB_DOWN, &adapter->state)) {
5543 dev_warn(&adapter->pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005544 "The VF VLAN has been set, but the PF device is not up.\n");
Williams, Mitch A8151d292010-02-10 01:44:24 +00005545 dev_warn(&adapter->pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005546 "Bring the PF device up before attempting to use the VF device.\n");
Williams, Mitch A8151d292010-02-10 01:44:24 +00005547 }
5548 } else {
5549 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005550 false, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005551 igb_set_vmvir(adapter, vlan, vf);
5552 igb_set_vmolr(adapter, vf, true);
5553 adapter->vf_data[vf].pf_vlan = 0;
5554 adapter->vf_data[vf].pf_qos = 0;
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005555 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005556out:
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005557 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005558}
5559
Greg Rose6f3dc3192013-03-26 06:19:41 +00005560static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
5561{
5562 struct e1000_hw *hw = &adapter->hw;
5563 int i;
5564 u32 reg;
5565
5566 /* Find the vlan filter for this id */
5567 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5568 reg = rd32(E1000_VLVF(i));
5569 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5570 vid == (reg & E1000_VLVF_VLANID_MASK))
5571 break;
5572 }
5573
5574 if (i >= E1000_VLVF_ARRAY_SIZE)
5575 i = -1;
5576
5577 return i;
5578}
5579
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005580static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5581{
Greg Rose6f3dc3192013-03-26 06:19:41 +00005582 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005583 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5584 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
Greg Rose6f3dc3192013-03-26 06:19:41 +00005585 int err = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005586
Greg Rose6f3dc3192013-03-26 06:19:41 +00005587 /* If in promiscuous mode we need to make sure the PF also has
5588 * the VLAN filter set.
5589 */
5590 if (add && (adapter->netdev->flags & IFF_PROMISC))
5591 err = igb_vlvf_set(adapter, vid, add,
5592 adapter->vfs_allocated_count);
5593 if (err)
5594 goto out;
5595
5596 err = igb_vlvf_set(adapter, vid, add, vf);
5597
5598 if (err)
5599 goto out;
5600
5601 /* Go through all the checks to see if the VLAN filter should
5602 * be wiped completely.
5603 */
5604 if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5605 u32 vlvf, bits;
5606
5607 int regndx = igb_find_vlvf_entry(adapter, vid);
5608 if (regndx < 0)
5609 goto out;
5610 /* See if any other pools are set for this VLAN filter
5611 * entry other than the PF.
5612 */
5613 vlvf = bits = rd32(E1000_VLVF(regndx));
5614 bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
5615 adapter->vfs_allocated_count);
5616 /* If the filter was removed then ensure PF pool bit
5617 * is cleared if the PF only added itself to the pool
5618 * because the PF is in promiscuous mode.
5619 */
5620 if ((vlvf & VLAN_VID_MASK) == vid &&
5621 !test_bit(vid, adapter->active_vlans) &&
5622 !bits)
5623 igb_vlvf_set(adapter, vid, add,
5624 adapter->vfs_allocated_count);
5625 }
5626
5627out:
5628 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005629}
5630
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005631static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005632{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005633 /* clear flags - except flag that indicates PF has set the MAC */
5634 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005635 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005636
5637 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005638 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005639
5640 /* reset vlans for device */
5641 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005642 if (adapter->vf_data[vf].pf_vlan)
5643 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5644 adapter->vf_data[vf].pf_vlan,
5645 adapter->vf_data[vf].pf_qos);
5646 else
5647 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005648
5649 /* reset multicast table array for vf */
5650 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5651
5652 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005653 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005654}
5655
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005656static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5657{
5658 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5659
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005660 /* clear mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005661 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005662 eth_zero_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005663
5664 /* process remaining reset events */
5665 igb_vf_reset(adapter, vf);
5666}
5667
5668static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005669{
5670 struct e1000_hw *hw = &adapter->hw;
5671 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005672 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005673 u32 reg, msgbuf[3];
5674 u8 *addr = (u8 *)(&msgbuf[1]);
5675
5676 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005677 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005678
5679 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005680 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005681
5682 /* enable transmit and receive for vf */
5683 reg = rd32(E1000_VFTE);
5684 wr32(E1000_VFTE, reg | (1 << vf));
5685 reg = rd32(E1000_VFRE);
5686 wr32(E1000_VFRE, reg | (1 << vf));
5687
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005688 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005689
5690 /* reply to reset with ack and vf mac address */
5691 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5692 memcpy(addr, vf_mac, 6);
5693 igb_write_mbx(hw, msgbuf, 3, vf);
5694}
5695
5696static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5697{
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005698 /* The VF MAC Address is stored in a packed array of bytes
Greg Rosede42edd2010-07-01 13:39:23 +00005699 * starting at the second 32 bit word of the msg array
5700 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005701 unsigned char *addr = (char *)&msg[1];
5702 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005703
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005704 if (is_valid_ether_addr(addr))
5705 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005706
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005707 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005708}
5709
5710static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5711{
5712 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005713 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005714 u32 msg = E1000_VT_MSGTYPE_NACK;
5715
5716 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005717 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5718 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005719 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005720 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005721 }
5722}
5723
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005724static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005725{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005726 struct pci_dev *pdev = adapter->pdev;
5727 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005728 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005729 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005730 s32 retval;
5731
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005732 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005733
Alexander Duyckfef45f42009-12-11 22:57:34 -08005734 if (retval) {
5735 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005736 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005737 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5738 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5739 return;
5740 goto out;
5741 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005742
5743 /* this is a message we already processed, do nothing */
5744 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005745 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005746
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005747 /* until the vf completes a reset it should not be
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005748 * allowed to start any configuration.
5749 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005750 if (msgbuf[0] == E1000_VF_RESET) {
5751 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005752 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005753 }
5754
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005755 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005756 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5757 return;
5758 retval = -1;
5759 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005760 }
5761
5762 switch ((msgbuf[0] & 0xFFFF)) {
5763 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005764 retval = -EINVAL;
5765 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5766 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5767 else
5768 dev_warn(&pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005769 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
5770 vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005771 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005772 case E1000_VF_SET_PROMISC:
5773 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5774 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005775 case E1000_VF_SET_MULTICAST:
5776 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5777 break;
5778 case E1000_VF_SET_LPE:
5779 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5780 break;
5781 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005782 retval = -1;
5783 if (vf_data->pf_vlan)
5784 dev_warn(&pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005785 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
5786 vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005787 else
5788 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005789 break;
5790 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005791 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005792 retval = -1;
5793 break;
5794 }
5795
Alexander Duyckfef45f42009-12-11 22:57:34 -08005796 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5797out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005798 /* notify the VF of the results of what it sent us */
5799 if (retval)
5800 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5801 else
5802 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5803
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005804 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005805}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005806
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005807static void igb_msg_task(struct igb_adapter *adapter)
5808{
5809 struct e1000_hw *hw = &adapter->hw;
5810 u32 vf;
5811
5812 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5813 /* process any reset requests */
5814 if (!igb_check_for_rst(hw, vf))
5815 igb_vf_reset_event(adapter, vf);
5816
5817 /* process any messages pending */
5818 if (!igb_check_for_msg(hw, vf))
5819 igb_rcv_msg_from_vf(adapter, vf);
5820
5821 /* process any acks */
5822 if (!igb_check_for_ack(hw, vf))
5823 igb_rcv_ack_from_vf(adapter, vf);
5824 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005825}
5826
Auke Kok9d5c8242008-01-24 02:22:38 -08005827/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005828 * igb_set_uta - Set unicast filter table address
5829 * @adapter: board private structure
5830 *
5831 * The unicast table address is a register array of 32-bit registers.
5832 * The table is meant to be used in a way similar to how the MTA is used
5833 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005834 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5835 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005836 **/
5837static void igb_set_uta(struct igb_adapter *adapter)
5838{
5839 struct e1000_hw *hw = &adapter->hw;
5840 int i;
5841
5842 /* The UTA table only exists on 82576 hardware and newer */
5843 if (hw->mac.type < e1000_82576)
5844 return;
5845
5846 /* we only need to do this if VMDq is enabled */
5847 if (!adapter->vfs_allocated_count)
5848 return;
5849
5850 for (i = 0; i < hw->mac.uta_reg_count; i++)
5851 array_wr32(E1000_UTA, i, ~0);
5852}
5853
5854/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005855 * igb_intr_msi - Interrupt Handler
5856 * @irq: interrupt number
5857 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005858 **/
5859static irqreturn_t igb_intr_msi(int irq, void *data)
5860{
Alexander Duyck047e0032009-10-27 15:49:27 +00005861 struct igb_adapter *adapter = data;
5862 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005863 struct e1000_hw *hw = &adapter->hw;
5864 /* read ICR disables interrupts using IAM */
5865 u32 icr = rd32(E1000_ICR);
5866
Alexander Duyck047e0032009-10-27 15:49:27 +00005867 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005868
Alexander Duyck7f081d42010-01-07 17:41:00 +00005869 if (icr & E1000_ICR_DRSTA)
5870 schedule_work(&adapter->reset_task);
5871
Alexander Duyck047e0032009-10-27 15:49:27 +00005872 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005873 /* HW is reporting DMA is out of sync */
5874 adapter->stats.doosync++;
5875 }
5876
Auke Kok9d5c8242008-01-24 02:22:38 -08005877 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5878 hw->mac.get_link_status = 1;
5879 if (!test_bit(__IGB_DOWN, &adapter->state))
5880 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5881 }
5882
Matthew Vick1f6e8172012-08-18 07:26:33 +00005883 if (icr & E1000_ICR_TS) {
5884 u32 tsicr = rd32(E1000_TSICR);
5885
5886 if (tsicr & E1000_TSICR_TXTS) {
5887 /* acknowledge the interrupt */
5888 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5889 /* retrieve hardware timestamp */
5890 schedule_work(&adapter->ptp_tx_work);
5891 }
5892 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005893
Alexander Duyck047e0032009-10-27 15:49:27 +00005894 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005895
5896 return IRQ_HANDLED;
5897}
5898
5899/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005900 * igb_intr - Legacy Interrupt Handler
5901 * @irq: interrupt number
5902 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005903 **/
5904static irqreturn_t igb_intr(int irq, void *data)
5905{
Alexander Duyck047e0032009-10-27 15:49:27 +00005906 struct igb_adapter *adapter = data;
5907 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005908 struct e1000_hw *hw = &adapter->hw;
5909 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005910 * need for the IMC write
5911 */
Auke Kok9d5c8242008-01-24 02:22:38 -08005912 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005913
5914 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005915 * not set, then the adapter didn't send an interrupt
5916 */
Auke Kok9d5c8242008-01-24 02:22:38 -08005917 if (!(icr & E1000_ICR_INT_ASSERTED))
5918 return IRQ_NONE;
5919
Alexander Duyck0ba82992011-08-26 07:45:47 +00005920 igb_write_itr(q_vector);
5921
Alexander Duyck7f081d42010-01-07 17:41:00 +00005922 if (icr & E1000_ICR_DRSTA)
5923 schedule_work(&adapter->reset_task);
5924
Alexander Duyck047e0032009-10-27 15:49:27 +00005925 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005926 /* HW is reporting DMA is out of sync */
5927 adapter->stats.doosync++;
5928 }
5929
Auke Kok9d5c8242008-01-24 02:22:38 -08005930 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5931 hw->mac.get_link_status = 1;
5932 /* guard against interrupt when we're going down */
5933 if (!test_bit(__IGB_DOWN, &adapter->state))
5934 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5935 }
5936
Matthew Vick1f6e8172012-08-18 07:26:33 +00005937 if (icr & E1000_ICR_TS) {
5938 u32 tsicr = rd32(E1000_TSICR);
5939
5940 if (tsicr & E1000_TSICR_TXTS) {
5941 /* acknowledge the interrupt */
5942 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5943 /* retrieve hardware timestamp */
5944 schedule_work(&adapter->ptp_tx_work);
5945 }
5946 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005947
Alexander Duyck047e0032009-10-27 15:49:27 +00005948 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005949
5950 return IRQ_HANDLED;
5951}
5952
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005953static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005954{
Alexander Duyck047e0032009-10-27 15:49:27 +00005955 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005956 struct e1000_hw *hw = &adapter->hw;
5957
Alexander Duyck0ba82992011-08-26 07:45:47 +00005958 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5959 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5960 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5961 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005962 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005963 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005964 }
5965
5966 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5967 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005968 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005969 else
5970 igb_irq_enable(adapter);
5971 }
5972}
5973
Auke Kok9d5c8242008-01-24 02:22:38 -08005974/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005975 * igb_poll - NAPI Rx polling callback
5976 * @napi: napi polling structure
5977 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005978 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005979static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005980{
Alexander Duyck047e0032009-10-27 15:49:27 +00005981 struct igb_q_vector *q_vector = container_of(napi,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005982 struct igb_q_vector,
5983 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005984 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005985
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005986#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005987 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5988 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005989#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005990 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005991 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005992
Alexander Duyck0ba82992011-08-26 07:45:47 +00005993 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005994 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005995
Alexander Duyck16eb8812011-08-26 07:43:54 +00005996 /* If all work not completed, return budget and keep polling */
5997 if (!clean_complete)
5998 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005999
Alexander Duyck46544252009-02-19 20:39:04 -08006000 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00006001 napi_complete(napi);
6002 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08006003
Alexander Duyck16eb8812011-08-26 07:43:54 +00006004 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006005}
Al Viro6d8126f2008-03-16 22:23:24 +00006006
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006007/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006008 * igb_clean_tx_irq - Reclaim resources after transmit completes
6009 * @q_vector: pointer to q_vector containing needed info
Ben Hutchings49ce9c22012-07-10 10:56:00 +00006010 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006011 * returns true if ring is completely cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08006012 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00006013static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08006014{
Alexander Duyck047e0032009-10-27 15:49:27 +00006015 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00006016 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00006017 struct igb_tx_buffer *tx_buffer;
Alexander Duyckf4128782012-09-13 06:28:01 +00006018 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006019 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00006020 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00006021 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08006022
Alexander Duyck13fde972011-10-05 13:35:24 +00006023 if (test_bit(__IGB_DOWN, &adapter->state))
6024 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08006025
Alexander Duyck06034642011-08-26 07:44:22 +00006026 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00006027 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00006028 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006029
Alexander Duyckf4128782012-09-13 06:28:01 +00006030 do {
6031 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duyck8542db02011-08-26 07:44:43 +00006032
6033 /* if next_to_watch is not set then there is no work pending */
6034 if (!eop_desc)
6035 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00006036
Alexander Duyckf4128782012-09-13 06:28:01 +00006037 /* prevent any other reads prior to eop_desc */
Alexander Duyck70d289b2013-01-08 07:01:03 +00006038 read_barrier_depends();
Alexander Duyckf4128782012-09-13 06:28:01 +00006039
Alexander Duyck13fde972011-10-05 13:35:24 +00006040 /* if DD is not set pending work has not been completed */
6041 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
6042 break;
6043
Alexander Duyck8542db02011-08-26 07:44:43 +00006044 /* clear next_to_watch to prevent false hangs */
6045 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00006046
Alexander Duyckebe42d12011-08-26 07:45:09 +00006047 /* update the statistics for this packet */
6048 total_bytes += tx_buffer->bytecount;
6049 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00006050
Alexander Duyckebe42d12011-08-26 07:45:09 +00006051 /* free the skb */
6052 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duyckebe42d12011-08-26 07:45:09 +00006053
6054 /* unmap skb header data */
6055 dma_unmap_single(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00006056 dma_unmap_addr(tx_buffer, dma),
6057 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00006058 DMA_TO_DEVICE);
6059
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00006060 /* clear tx_buffer data */
6061 tx_buffer->skb = NULL;
6062 dma_unmap_len_set(tx_buffer, len, 0);
6063
Alexander Duyckebe42d12011-08-26 07:45:09 +00006064 /* clear last DMA location and unmap remaining buffers */
6065 while (tx_desc != eop_desc) {
Alexander Duyck13fde972011-10-05 13:35:24 +00006066 tx_buffer++;
6067 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006068 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00006069 if (unlikely(!i)) {
6070 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00006071 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00006072 tx_desc = IGB_TX_DESC(tx_ring, 0);
6073 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00006074
6075 /* unmap any remaining paged data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00006076 if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00006077 dma_unmap_page(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00006078 dma_unmap_addr(tx_buffer, dma),
6079 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00006080 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00006081 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00006082 }
6083 }
6084
Alexander Duyckebe42d12011-08-26 07:45:09 +00006085 /* move us one more past the eop_desc for start of next pkt */
6086 tx_buffer++;
6087 tx_desc++;
6088 i++;
6089 if (unlikely(!i)) {
6090 i -= tx_ring->count;
6091 tx_buffer = tx_ring->tx_buffer_info;
6092 tx_desc = IGB_TX_DESC(tx_ring, 0);
6093 }
Alexander Duyckf4128782012-09-13 06:28:01 +00006094
6095 /* issue prefetch for next Tx descriptor */
6096 prefetch(tx_desc);
6097
6098 /* update budget accounting */
6099 budget--;
6100 } while (likely(budget));
Alexander Duyck0e014cb2008-12-26 01:33:18 -08006101
Eric Dumazetbdbc0632012-01-04 20:23:36 +00006102 netdev_tx_completed_queue(txring_txq(tx_ring),
6103 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00006104 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006105 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00006106 u64_stats_update_begin(&tx_ring->tx_syncp);
6107 tx_ring->tx_stats.bytes += total_bytes;
6108 tx_ring->tx_stats.packets += total_packets;
6109 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006110 q_vector->tx.total_bytes += total_bytes;
6111 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08006112
Alexander Duyck6d095fa2011-08-26 07:46:19 +00006113 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00006114 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00006115
Auke Kok9d5c8242008-01-24 02:22:38 -08006116 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006117 * check with the clearing of time_stamp and movement of i
6118 */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00006119 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckf4128782012-09-13 06:28:01 +00006120 if (tx_buffer->next_to_watch &&
Alexander Duyck8542db02011-08-26 07:44:43 +00006121 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00006122 (adapter->tx_timeout_factor * HZ)) &&
6123 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006124
Auke Kok9d5c8242008-01-24 02:22:38 -08006125 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00006126 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08006127 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07006128 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006129 " TDH <%x>\n"
6130 " TDT <%x>\n"
6131 " next_to_use <%x>\n"
6132 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006133 "buffer_info[next_to_clean]\n"
6134 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00006135 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006136 " jiffies <%lx>\n"
6137 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07006138 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00006139 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00006140 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08006141 tx_ring->next_to_use,
6142 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00006143 tx_buffer->time_stamp,
Alexander Duyckf4128782012-09-13 06:28:01 +00006144 tx_buffer->next_to_watch,
Auke Kok9d5c8242008-01-24 02:22:38 -08006145 jiffies,
Alexander Duyckf4128782012-09-13 06:28:01 +00006146 tx_buffer->next_to_watch->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00006147 netif_stop_subqueue(tx_ring->netdev,
6148 tx_ring->queue_index);
6149
6150 /* we are about to reset, no point in enabling stuff */
6151 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08006152 }
6153 }
Alexander Duyck13fde972011-10-05 13:35:24 +00006154
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00006155#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyck13fde972011-10-05 13:35:24 +00006156 if (unlikely(total_packets &&
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006157 netif_carrier_ok(tx_ring->netdev) &&
6158 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00006159 /* Make sure that anybody stopping the queue after this
6160 * sees the new next_to_clean.
6161 */
6162 smp_mb();
6163 if (__netif_subqueue_stopped(tx_ring->netdev,
6164 tx_ring->queue_index) &&
6165 !(test_bit(__IGB_DOWN, &adapter->state))) {
6166 netif_wake_subqueue(tx_ring->netdev,
6167 tx_ring->queue_index);
6168
6169 u64_stats_update_begin(&tx_ring->tx_syncp);
6170 tx_ring->tx_stats.restart_queue++;
6171 u64_stats_update_end(&tx_ring->tx_syncp);
6172 }
6173 }
6174
6175 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006176}
6177
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006178/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006179 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6180 * @rx_ring: rx descriptor ring to store buffers on
6181 * @old_buff: donor buffer to have page reused
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006182 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006183 * Synchronizes page for reuse by the adapter
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006184 **/
6185static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6186 struct igb_rx_buffer *old_buff)
6187{
6188 struct igb_rx_buffer *new_buff;
6189 u16 nta = rx_ring->next_to_alloc;
6190
6191 new_buff = &rx_ring->rx_buffer_info[nta];
6192
6193 /* update, and store next to alloc */
6194 nta++;
6195 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6196
6197 /* transfer page from old buffer to new buffer */
6198 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
6199
6200 /* sync the buffer for use by the device */
6201 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
6202 old_buff->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006203 IGB_RX_BUFSZ,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006204 DMA_FROM_DEVICE);
6205}
6206
Alexander Duyck74e238e2013-02-02 05:07:11 +00006207static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6208 struct page *page,
6209 unsigned int truesize)
6210{
6211 /* avoid re-using remote pages */
6212 if (unlikely(page_to_nid(page) != numa_node_id()))
6213 return false;
6214
6215#if (PAGE_SIZE < 8192)
6216 /* if we are only owner of page we can reuse it */
6217 if (unlikely(page_count(page) != 1))
6218 return false;
6219
6220 /* flip page offset to other buffer */
6221 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6222
6223 /* since we are the only owner of the page and we need to
6224 * increment it, just set the value to 2 in order to avoid
6225 * an unnecessary locked operation
6226 */
6227 atomic_set(&page->_count, 2);
6228#else
6229 /* move offset up to the next cache line */
6230 rx_buffer->page_offset += truesize;
6231
6232 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6233 return false;
6234
6235 /* bump ref count on page before it is given to the stack */
6236 get_page(page);
6237#endif
6238
6239 return true;
6240}
6241
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006242/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006243 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6244 * @rx_ring: rx descriptor ring to transact packets on
6245 * @rx_buffer: buffer containing page to add
6246 * @rx_desc: descriptor containing length of buffer written by hardware
6247 * @skb: sk_buff to place the data into
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006248 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006249 * This function will add the data contained in rx_buffer->page to the skb.
6250 * This is done either through a direct copy if the data in the buffer is
6251 * less than the skb header size, otherwise it will just attach the page as
6252 * a frag to the skb.
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006253 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006254 * The function will then update the page offset if necessary and return
6255 * true if the buffer can be reused by the adapter.
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006256 **/
6257static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6258 struct igb_rx_buffer *rx_buffer,
6259 union e1000_adv_rx_desc *rx_desc,
6260 struct sk_buff *skb)
6261{
6262 struct page *page = rx_buffer->page;
6263 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyck74e238e2013-02-02 05:07:11 +00006264#if (PAGE_SIZE < 8192)
6265 unsigned int truesize = IGB_RX_BUFSZ;
6266#else
6267 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
6268#endif
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006269
6270 if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
6271 unsigned char *va = page_address(page) + rx_buffer->page_offset;
6272
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006273 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6274 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6275 va += IGB_TS_HDR_LEN;
6276 size -= IGB_TS_HDR_LEN;
6277 }
6278
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006279 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6280
6281 /* we can reuse buffer as-is, just make sure it is local */
6282 if (likely(page_to_nid(page) == numa_node_id()))
6283 return true;
6284
6285 /* this page cannot be reused so discard it */
6286 put_page(page);
6287 return false;
6288 }
6289
6290 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
Alexander Duyck74e238e2013-02-02 05:07:11 +00006291 rx_buffer->page_offset, size, truesize);
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006292
Alexander Duyck74e238e2013-02-02 05:07:11 +00006293 return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6294}
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006295
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006296static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6297 union e1000_adv_rx_desc *rx_desc,
6298 struct sk_buff *skb)
6299{
6300 struct igb_rx_buffer *rx_buffer;
6301 struct page *page;
6302
6303 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6304
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006305 page = rx_buffer->page;
6306 prefetchw(page);
6307
6308 if (likely(!skb)) {
6309 void *page_addr = page_address(page) +
6310 rx_buffer->page_offset;
6311
6312 /* prefetch first cache line of first page */
6313 prefetch(page_addr);
6314#if L1_CACHE_BYTES < 128
6315 prefetch(page_addr + L1_CACHE_BYTES);
6316#endif
6317
6318 /* allocate a skb to store the frags */
6319 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6320 IGB_RX_HDR_LEN);
6321 if (unlikely(!skb)) {
6322 rx_ring->rx_stats.alloc_failed++;
6323 return NULL;
6324 }
6325
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006326 /* we will be copying header into skb->data in
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006327 * pskb_may_pull so it is in our interest to prefetch
6328 * it now to avoid a possible cache miss
6329 */
6330 prefetchw(skb->data);
6331 }
6332
6333 /* we are reusing so sync this buffer for CPU use */
6334 dma_sync_single_range_for_cpu(rx_ring->dev,
6335 rx_buffer->dma,
6336 rx_buffer->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006337 IGB_RX_BUFSZ,
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006338 DMA_FROM_DEVICE);
6339
6340 /* pull page into skb */
6341 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
6342 /* hand second half of page back to the ring */
6343 igb_reuse_rx_page(rx_ring, rx_buffer);
6344 } else {
6345 /* we are not reusing the buffer so unmap it */
6346 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6347 PAGE_SIZE, DMA_FROM_DEVICE);
6348 }
6349
6350 /* clear contents of rx_buffer */
6351 rx_buffer->page = NULL;
6352
6353 return skb;
6354}
6355
Alexander Duyckcd392f52011-08-26 07:43:59 +00006356static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006357 union e1000_adv_rx_desc *rx_desc,
6358 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08006359{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07006360 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006361
Alexander Duyck294e7d72011-08-26 07:45:57 +00006362 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006363 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00006364 return;
6365
6366 /* Rx checksum disabled via ethtool */
6367 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08006368 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00006369
Auke Kok9d5c8242008-01-24 02:22:38 -08006370 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006371 if (igb_test_staterr(rx_desc,
6372 E1000_RXDEXT_STATERR_TCPE |
6373 E1000_RXDEXT_STATERR_IPE)) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006374 /* work around errata with sctp packets where the TCPE aka
Jesse Brandeburgb9473562009-04-27 22:36:13 +00006375 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6376 * packets, (aka let the stack check the crc32c)
6377 */
Alexander Duyck866cff02011-08-26 07:45:36 +00006378 if (!((skb->len == 60) &&
6379 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00006380 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00006381 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006382 u64_stats_update_end(&ring->rx_syncp);
6383 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006384 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08006385 return;
6386 }
6387 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006388 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
6389 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08006390 skb->ip_summed = CHECKSUM_UNNECESSARY;
6391
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006392 dev_dbg(ring->dev, "cksum success: bits %08X\n",
6393 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08006394}
6395
Alexander Duyck077887c2011-08-26 07:46:29 +00006396static inline void igb_rx_hash(struct igb_ring *ring,
6397 union e1000_adv_rx_desc *rx_desc,
6398 struct sk_buff *skb)
6399{
6400 if (ring->netdev->features & NETIF_F_RXHASH)
6401 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
6402}
6403
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006404/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006405 * igb_is_non_eop - process handling of non-EOP buffers
6406 * @rx_ring: Rx ring being processed
6407 * @rx_desc: Rx descriptor for current buffer
6408 * @skb: current socket buffer containing buffer in progress
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006409 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006410 * This function updates next to clean. If the buffer is an EOP buffer
6411 * this function exits returning false, otherwise it will place the
6412 * sk_buff in the next buffer to be chained and return true indicating
6413 * that this is in fact a non-EOP buffer.
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006414 **/
6415static bool igb_is_non_eop(struct igb_ring *rx_ring,
6416 union e1000_adv_rx_desc *rx_desc)
6417{
6418 u32 ntc = rx_ring->next_to_clean + 1;
6419
6420 /* fetch, update, and store next to clean */
6421 ntc = (ntc < rx_ring->count) ? ntc : 0;
6422 rx_ring->next_to_clean = ntc;
6423
6424 prefetch(IGB_RX_DESC(rx_ring, ntc));
6425
6426 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6427 return false;
6428
6429 return true;
6430}
6431
6432/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006433 * igb_get_headlen - determine size of header for LRO/GRO
6434 * @data: pointer to the start of the headers
6435 * @max_len: total length of section to find headers in
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006436 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006437 * This function is meant to determine the length of headers that will
6438 * be recognized by hardware for LRO, and GRO offloads. The main
6439 * motivation of doing this is to only perform one pull for IPv4 TCP
6440 * packets so that we can do basic things like calculating the gso_size
6441 * based on the average data per packet.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006442 **/
6443static unsigned int igb_get_headlen(unsigned char *data,
6444 unsigned int max_len)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006445{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006446 union {
6447 unsigned char *network;
6448 /* l2 headers */
6449 struct ethhdr *eth;
6450 struct vlan_hdr *vlan;
6451 /* l3 headers */
6452 struct iphdr *ipv4;
6453 struct ipv6hdr *ipv6;
6454 } hdr;
6455 __be16 protocol;
6456 u8 nexthdr = 0; /* default to not TCP */
6457 u8 hlen;
6458
6459 /* this should never happen, but better safe than sorry */
6460 if (max_len < ETH_HLEN)
6461 return max_len;
6462
6463 /* initialize network frame pointer */
6464 hdr.network = data;
6465
6466 /* set first protocol and move network header forward */
6467 protocol = hdr.eth->h_proto;
6468 hdr.network += ETH_HLEN;
6469
6470 /* handle any vlan tag if present */
6471 if (protocol == __constant_htons(ETH_P_8021Q)) {
6472 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6473 return max_len;
6474
6475 protocol = hdr.vlan->h_vlan_encapsulated_proto;
6476 hdr.network += VLAN_HLEN;
6477 }
6478
6479 /* handle L3 protocols */
6480 if (protocol == __constant_htons(ETH_P_IP)) {
6481 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6482 return max_len;
6483
6484 /* access ihl as a u8 to avoid unaligned access on ia64 */
6485 hlen = (hdr.network[0] & 0x0F) << 2;
6486
6487 /* verify hlen meets minimum size requirements */
6488 if (hlen < sizeof(struct iphdr))
6489 return hdr.network - data;
6490
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006491 /* record next protocol if header is present */
Alexander Duyckb9555f62013-02-01 08:56:47 +00006492 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006493 nexthdr = hdr.ipv4->protocol;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006494 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6495 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6496 return max_len;
6497
6498 /* record next protocol */
6499 nexthdr = hdr.ipv6->nexthdr;
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006500 hlen = sizeof(struct ipv6hdr);
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006501 } else {
6502 return hdr.network - data;
6503 }
6504
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006505 /* relocate pointer to start of L4 header */
6506 hdr.network += hlen;
6507
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006508 /* finally sort out TCP */
6509 if (nexthdr == IPPROTO_TCP) {
6510 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6511 return max_len;
6512
6513 /* access doff as a u8 to avoid unaligned access on ia64 */
6514 hlen = (hdr.network[12] & 0xF0) >> 2;
6515
6516 /* verify hlen meets minimum size requirements */
6517 if (hlen < sizeof(struct tcphdr))
6518 return hdr.network - data;
6519
6520 hdr.network += hlen;
6521 } else if (nexthdr == IPPROTO_UDP) {
6522 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6523 return max_len;
6524
6525 hdr.network += sizeof(struct udphdr);
6526 }
6527
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006528 /* If everything has gone correctly hdr.network should be the
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006529 * data section of the packet and will be the end of the header.
6530 * If not then it probably represents the end of the last recognized
6531 * header.
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006532 */
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006533 if ((hdr.network - data) < max_len)
6534 return hdr.network - data;
6535 else
6536 return max_len;
6537}
6538
6539/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006540 * igb_pull_tail - igb specific version of skb_pull_tail
6541 * @rx_ring: rx descriptor ring packet is being transacted on
6542 * @rx_desc: pointer to the EOP Rx descriptor
6543 * @skb: pointer to current skb being adjusted
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006544 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006545 * This function is an igb specific version of __pskb_pull_tail. The
6546 * main difference between this version and the original function is that
6547 * this function can make several assumptions about the state of things
6548 * that allow for significant optimizations versus the standard function.
6549 * As a result we can do things like drop a frag and maintain an accurate
6550 * truesize for the skb.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006551 */
6552static void igb_pull_tail(struct igb_ring *rx_ring,
6553 union e1000_adv_rx_desc *rx_desc,
6554 struct sk_buff *skb)
6555{
6556 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6557 unsigned char *va;
6558 unsigned int pull_len;
6559
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006560 /* it is valid to use page_address instead of kmap since we are
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006561 * working with pages allocated out of the lomem pool per
6562 * alloc_page(GFP_ATOMIC)
6563 */
6564 va = skb_frag_address(frag);
6565
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006566 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6567 /* retrieve timestamp from buffer */
6568 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6569
6570 /* update pointers to remove timestamp header */
6571 skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6572 frag->page_offset += IGB_TS_HDR_LEN;
6573 skb->data_len -= IGB_TS_HDR_LEN;
6574 skb->len -= IGB_TS_HDR_LEN;
6575
6576 /* move va to start of packet data */
6577 va += IGB_TS_HDR_LEN;
6578 }
6579
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006580 /* we need the header to contain the greater of either ETH_HLEN or
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006581 * 60 bytes if the skb->len is less than 60 for skb_pad.
6582 */
6583 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
6584
6585 /* align pull length to size of long to optimize memcpy performance */
6586 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
6587
6588 /* update all of the pointers */
6589 skb_frag_size_sub(frag, pull_len);
6590 frag->page_offset += pull_len;
6591 skb->data_len -= pull_len;
6592 skb->tail += pull_len;
6593}
6594
6595/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006596 * igb_cleanup_headers - Correct corrupted or empty headers
6597 * @rx_ring: rx descriptor ring packet is being transacted on
6598 * @rx_desc: pointer to the EOP Rx descriptor
6599 * @skb: pointer to current skb being fixed
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006600 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006601 * Address the case where we are pulling data in on pages only
6602 * and as such no data is present in the skb header.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006603 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006604 * In addition if skb is not at least 60 bytes we need to pad it so that
6605 * it is large enough to qualify as a valid Ethernet frame.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006606 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006607 * Returns true if an error was encountered and skb was freed.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006608 **/
6609static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6610 union e1000_adv_rx_desc *rx_desc,
6611 struct sk_buff *skb)
6612{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006613 if (unlikely((igb_test_staterr(rx_desc,
6614 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6615 struct net_device *netdev = rx_ring->netdev;
6616 if (!(netdev->features & NETIF_F_RXALL)) {
6617 dev_kfree_skb_any(skb);
6618 return true;
6619 }
6620 }
6621
6622 /* place header in linear portion of buffer */
6623 if (skb_is_nonlinear(skb))
6624 igb_pull_tail(rx_ring, rx_desc, skb);
6625
6626 /* if skb_pad returns an error the skb was freed */
6627 if (unlikely(skb->len < 60)) {
6628 int pad_len = 60 - skb->len;
6629
6630 if (skb_pad(skb, pad_len))
6631 return true;
6632 __skb_put(skb, pad_len);
6633 }
6634
6635 return false;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006636}
6637
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006638/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006639 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6640 * @rx_ring: rx descriptor ring packet is being transacted on
6641 * @rx_desc: pointer to the EOP Rx descriptor
6642 * @skb: pointer to current skb being populated
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006643 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006644 * This function checks the ring, descriptor, and packet information in
6645 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6646 * other fields within the skb.
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006647 **/
6648static void igb_process_skb_fields(struct igb_ring *rx_ring,
6649 union e1000_adv_rx_desc *rx_desc,
6650 struct sk_buff *skb)
6651{
6652 struct net_device *dev = rx_ring->netdev;
6653
6654 igb_rx_hash(rx_ring, rx_desc, skb);
6655
6656 igb_rx_checksum(rx_ring, rx_desc, skb);
6657
Matthew Vick20a48412013-04-24 07:42:06 +00006658 igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006659
Patrick McHardyf6469682013-04-19 02:04:27 +00006660 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006661 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6662 u16 vid;
6663 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6664 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6665 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6666 else
6667 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6668
Patrick McHardy86a9bad2013-04-19 02:04:30 +00006669 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006670 }
6671
6672 skb_record_rx_queue(skb, rx_ring->queue_index);
6673
6674 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6675}
6676
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006677static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006678{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006679 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006680 struct sk_buff *skb = rx_ring->skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08006681 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006682 u16 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08006683
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006684 do {
6685 union e1000_adv_rx_desc *rx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006686
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006687 /* return some buffers to hardware, one at a time is too slow */
6688 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6689 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6690 cleaned_count = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006691 }
6692
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006693 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006694
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006695 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6696 break;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006697
Alexander Duyck74e238e2013-02-02 05:07:11 +00006698 /* This memory barrier is needed to keep us from reading
6699 * any other fields out of the rx_desc until we know the
6700 * RXD_STAT_DD bit is set
6701 */
6702 rmb();
6703
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006704 /* retrieve a buffer from the ring */
Alexander Duyckf9d40f62013-04-17 20:41:04 +00006705 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
Alexander Duyck16eb8812011-08-26 07:43:54 +00006706
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006707 /* exit if we failed to retrieve a buffer */
6708 if (!skb)
6709 break;
6710
6711 cleaned_count++;
6712
6713 /* fetch next buffer in frame if non-eop */
6714 if (igb_is_non_eop(rx_ring, rx_desc))
6715 continue;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006716
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006717 /* verify the packet layout is correct */
6718 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6719 skb = NULL;
6720 continue;
Auke Kok9d5c8242008-01-24 02:22:38 -08006721 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006722
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006723 /* probably a little skewed due to removing CRC */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006724 total_bytes += skb->len;
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006725
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006726 /* populate checksum, timestamp, VLAN, and protocol */
6727 igb_process_skb_fields(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006728
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006729 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006730
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006731 /* reset skb pointer */
6732 skb = NULL;
6733
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006734 /* update budget accounting */
6735 total_packets++;
6736 } while (likely(total_packets < budget));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006737
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006738 /* place incomplete frames back on ring for completion */
6739 rx_ring->skb = skb;
6740
Eric Dumazet12dcd862010-10-15 17:27:10 +00006741 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006742 rx_ring->rx_stats.packets += total_packets;
6743 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006744 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006745 q_vector->rx.total_packets += total_packets;
6746 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006747
6748 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006749 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006750
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006751 return (total_packets < budget);
Auke Kok9d5c8242008-01-24 02:22:38 -08006752}
6753
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006754static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6755 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006756{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006757 struct page *page = bi->page;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006758 dma_addr_t dma;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006759
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006760 /* since we are recycling buffers we should seldom need to alloc */
6761 if (likely(page))
Alexander Duyckc023cd82011-08-26 07:43:43 +00006762 return true;
6763
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006764 /* alloc new page for storage */
6765 page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6766 if (unlikely(!page)) {
6767 rx_ring->rx_stats.alloc_failed++;
6768 return false;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006769 }
6770
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006771 /* map page for use */
6772 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006773
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006774 /* if mapping failed free memory back to system since
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006775 * there isn't much point in holding memory we can't use
6776 */
Alexander Duyckc023cd82011-08-26 07:43:43 +00006777 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006778 __free_page(page);
6779
Alexander Duyckc023cd82011-08-26 07:43:43 +00006780 rx_ring->rx_stats.alloc_failed++;
6781 return false;
6782 }
6783
6784 bi->dma = dma;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006785 bi->page = page;
6786 bi->page_offset = 0;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006787
Alexander Duyckc023cd82011-08-26 07:43:43 +00006788 return true;
6789}
6790
Auke Kok9d5c8242008-01-24 02:22:38 -08006791/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006792 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6793 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08006794 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006795void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006796{
Auke Kok9d5c8242008-01-24 02:22:38 -08006797 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006798 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006799 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006800
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006801 /* nothing to do */
6802 if (!cleaned_count)
6803 return;
6804
Alexander Duyck601369062011-08-26 07:44:05 +00006805 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006806 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006807 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006808
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006809 do {
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006810 if (!igb_alloc_mapped_page(rx_ring, bi))
Alexander Duyckc023cd82011-08-26 07:43:43 +00006811 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006812
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006813 /* Refresh the desc even if buffer_addrs didn't change
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006814 * because each write-back erases this info.
6815 */
Alexander Duyckf9d40f62013-04-17 20:41:04 +00006816 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Auke Kok9d5c8242008-01-24 02:22:38 -08006817
Alexander Duyckc023cd82011-08-26 07:43:43 +00006818 rx_desc++;
6819 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006820 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006821 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006822 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006823 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006824 i -= rx_ring->count;
6825 }
6826
6827 /* clear the hdr_addr for the next_to_use descriptor */
6828 rx_desc->read.hdr_addr = 0;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006829
6830 cleaned_count--;
6831 } while (cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006832
Alexander Duyckc023cd82011-08-26 07:43:43 +00006833 i += rx_ring->count;
6834
Auke Kok9d5c8242008-01-24 02:22:38 -08006835 if (rx_ring->next_to_use != i) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006836 /* record the next descriptor to use */
Auke Kok9d5c8242008-01-24 02:22:38 -08006837 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006838
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006839 /* update next to alloc since we have filled the ring */
6840 rx_ring->next_to_alloc = i;
6841
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006842 /* Force memory writes to complete before letting h/w
Auke Kok9d5c8242008-01-24 02:22:38 -08006843 * know there are new descriptors to fetch. (Only
6844 * applicable for weak-ordered memory model archs,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006845 * such as IA-64).
6846 */
Auke Kok9d5c8242008-01-24 02:22:38 -08006847 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006848 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006849 }
6850}
6851
6852/**
6853 * igb_mii_ioctl -
6854 * @netdev:
6855 * @ifreq:
6856 * @cmd:
6857 **/
6858static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6859{
6860 struct igb_adapter *adapter = netdev_priv(netdev);
6861 struct mii_ioctl_data *data = if_mii(ifr);
6862
6863 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6864 return -EOPNOTSUPP;
6865
6866 switch (cmd) {
6867 case SIOCGMIIPHY:
6868 data->phy_id = adapter->hw.phy.addr;
6869 break;
6870 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006871 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6872 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006873 return -EIO;
6874 break;
6875 case SIOCSMIIREG:
6876 default:
6877 return -EOPNOTSUPP;
6878 }
6879 return 0;
6880}
6881
6882/**
6883 * igb_ioctl -
6884 * @netdev:
6885 * @ifreq:
6886 * @cmd:
6887 **/
6888static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6889{
6890 switch (cmd) {
6891 case SIOCGMIIPHY:
6892 case SIOCGMIIREG:
6893 case SIOCSMIIREG:
6894 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006895 case SIOCSHWTSTAMP:
Matthew Vicka79f4f82012-08-10 05:40:44 +00006896 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006897 default:
6898 return -EOPNOTSUPP;
6899 }
6900}
6901
Alexander Duyck009bc062009-07-23 18:08:35 +00006902s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6903{
6904 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00006905
Jiang Liu23d028c2012-08-20 13:32:20 -06006906 if (pcie_capability_read_word(adapter->pdev, reg, value))
Alexander Duyck009bc062009-07-23 18:08:35 +00006907 return -E1000_ERR_CONFIG;
6908
Alexander Duyck009bc062009-07-23 18:08:35 +00006909 return 0;
6910}
6911
6912s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6913{
6914 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00006915
Jiang Liu23d028c2012-08-20 13:32:20 -06006916 if (pcie_capability_write_word(adapter->pdev, reg, *value))
Alexander Duyck009bc062009-07-23 18:08:35 +00006917 return -E1000_ERR_CONFIG;
6918
Alexander Duyck009bc062009-07-23 18:08:35 +00006919 return 0;
6920}
6921
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006922static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006923{
6924 struct igb_adapter *adapter = netdev_priv(netdev);
6925 struct e1000_hw *hw = &adapter->hw;
6926 u32 ctrl, rctl;
Patrick McHardyf6469682013-04-19 02:04:27 +00006927 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006928
Alexander Duyck5faf0302011-08-26 07:46:08 +00006929 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006930 /* enable VLAN tag insert/strip */
6931 ctrl = rd32(E1000_CTRL);
6932 ctrl |= E1000_CTRL_VME;
6933 wr32(E1000_CTRL, ctrl);
6934
Alexander Duyck51466232009-10-27 23:47:35 +00006935 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006936 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006937 rctl &= ~E1000_RCTL_CFIEN;
6938 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006939 } else {
6940 /* disable VLAN tag insert/strip */
6941 ctrl = rd32(E1000_CTRL);
6942 ctrl &= ~E1000_CTRL_VME;
6943 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006944 }
6945
Alexander Duycke1739522009-02-19 20:39:44 -08006946 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006947}
6948
Patrick McHardy80d5c362013-04-19 02:04:28 +00006949static int igb_vlan_rx_add_vid(struct net_device *netdev,
6950 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006951{
6952 struct igb_adapter *adapter = netdev_priv(netdev);
6953 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006954 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006955
Alexander Duyck51466232009-10-27 23:47:35 +00006956 /* attempt to add filter to vlvf array */
6957 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006958
Alexander Duyck51466232009-10-27 23:47:35 +00006959 /* add the filter since PF can receive vlans w/o entry in vlvf */
6960 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006961
6962 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006963
6964 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006965}
6966
Patrick McHardy80d5c362013-04-19 02:04:28 +00006967static int igb_vlan_rx_kill_vid(struct net_device *netdev,
6968 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006969{
6970 struct igb_adapter *adapter = netdev_priv(netdev);
6971 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006972 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006973 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006974
Alexander Duyck51466232009-10-27 23:47:35 +00006975 /* remove vlan from VLVF table array */
6976 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006977
Alexander Duyck51466232009-10-27 23:47:35 +00006978 /* if vid was not present in VLVF just remove it from table */
6979 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006980 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006981
6982 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006983
6984 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006985}
6986
6987static void igb_restore_vlan(struct igb_adapter *adapter)
6988{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006989 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006990
Alexander Duyck5faf0302011-08-26 07:46:08 +00006991 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6992
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006993 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00006994 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006995}
6996
David Decotigny14ad2512011-04-27 18:32:43 +00006997int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006998{
Alexander Duyck090b1792009-10-27 23:51:55 +00006999 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08007000 struct e1000_mac_info *mac = &adapter->hw.mac;
7001
7002 mac->autoneg = 0;
7003
David Decotigny14ad2512011-04-27 18:32:43 +00007004 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007005 * for the switch() below to work
7006 */
David Decotigny14ad2512011-04-27 18:32:43 +00007007 if ((spd & 1) || (dplx & ~1))
7008 goto err_inval;
7009
Akeem G. Abodunrinf502ef72013-04-05 16:49:06 +00007010 /* Fiber NIC's only allow 1000 gbps Full duplex
7011 * and 100Mbps Full duplex for 100baseFx sfp
7012 */
7013 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
7014 switch (spd + dplx) {
7015 case SPEED_10 + DUPLEX_HALF:
7016 case SPEED_10 + DUPLEX_FULL:
7017 case SPEED_100 + DUPLEX_HALF:
7018 goto err_inval;
7019 default:
7020 break;
7021 }
7022 }
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00007023
David Decotigny14ad2512011-04-27 18:32:43 +00007024 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007025 case SPEED_10 + DUPLEX_HALF:
7026 mac->forced_speed_duplex = ADVERTISE_10_HALF;
7027 break;
7028 case SPEED_10 + DUPLEX_FULL:
7029 mac->forced_speed_duplex = ADVERTISE_10_FULL;
7030 break;
7031 case SPEED_100 + DUPLEX_HALF:
7032 mac->forced_speed_duplex = ADVERTISE_100_HALF;
7033 break;
7034 case SPEED_100 + DUPLEX_FULL:
7035 mac->forced_speed_duplex = ADVERTISE_100_FULL;
7036 break;
7037 case SPEED_1000 + DUPLEX_FULL:
7038 mac->autoneg = 1;
7039 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
7040 break;
7041 case SPEED_1000 + DUPLEX_HALF: /* not supported */
7042 default:
David Decotigny14ad2512011-04-27 18:32:43 +00007043 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08007044 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00007045
7046 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
7047 adapter->hw.phy.mdix = AUTO_ALL_MODES;
7048
Auke Kok9d5c8242008-01-24 02:22:38 -08007049 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00007050
7051err_inval:
7052 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
7053 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08007054}
7055
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007056static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7057 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08007058{
7059 struct net_device *netdev = pci_get_drvdata(pdev);
7060 struct igb_adapter *adapter = netdev_priv(netdev);
7061 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07007062 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007063 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08007064#ifdef CONFIG_PM
7065 int retval = 0;
7066#endif
7067
7068 netif_device_detach(netdev);
7069
Alexander Duycka88f10e2008-07-08 15:13:38 -07007070 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007071 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07007072
Alexander Duyck047e0032009-10-27 15:49:27 +00007073 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007074
7075#ifdef CONFIG_PM
7076 retval = pci_save_state(pdev);
7077 if (retval)
7078 return retval;
7079#endif
7080
7081 status = rd32(E1000_STATUS);
7082 if (status & E1000_STATUS_LU)
7083 wufc &= ~E1000_WUFC_LNKC;
7084
7085 if (wufc) {
7086 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007087 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007088
7089 /* turn on all-multi mode if wake on multicast is enabled */
7090 if (wufc & E1000_WUFC_MC) {
7091 rctl = rd32(E1000_RCTL);
7092 rctl |= E1000_RCTL_MPE;
7093 wr32(E1000_RCTL, rctl);
7094 }
7095
7096 ctrl = rd32(E1000_CTRL);
7097 /* advertise wake from D3Cold */
7098 #define E1000_CTRL_ADVD3WUC 0x00100000
7099 /* phy power management enable */
7100 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
7101 ctrl |= E1000_CTRL_ADVD3WUC;
7102 wr32(E1000_CTRL, ctrl);
7103
Auke Kok9d5c8242008-01-24 02:22:38 -08007104 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00007105 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08007106
7107 wr32(E1000_WUC, E1000_WUC_PME_EN);
7108 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08007109 } else {
7110 wr32(E1000_WUC, 0);
7111 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08007112 }
7113
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007114 *enable_wake = wufc || adapter->en_mng_pt;
7115 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00007116 igb_power_down_link(adapter);
7117 else
7118 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007119
7120 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007121 * would have already happened in close and is redundant.
7122 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007123 igb_release_hw_control(adapter);
7124
7125 pci_disable_device(pdev);
7126
Auke Kok9d5c8242008-01-24 02:22:38 -08007127 return 0;
7128}
7129
7130#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00007131#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007132static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007133{
7134 int retval;
7135 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007136 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007137
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007138 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007139 if (retval)
7140 return retval;
7141
7142 if (wake) {
7143 pci_prepare_to_sleep(pdev);
7144 } else {
7145 pci_wake_from_d3(pdev, false);
7146 pci_set_power_state(pdev, PCI_D3hot);
7147 }
7148
7149 return 0;
7150}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00007151#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007152
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007153static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08007154{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007155 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007156 struct net_device *netdev = pci_get_drvdata(pdev);
7157 struct igb_adapter *adapter = netdev_priv(netdev);
7158 struct e1000_hw *hw = &adapter->hw;
7159 u32 err;
7160
7161 pci_set_power_state(pdev, PCI_D0);
7162 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00007163 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09007164
Alexander Duyckaed5dec2009-02-06 23:16:04 +00007165 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007166 if (err) {
7167 dev_err(&pdev->dev,
7168 "igb: Cannot enable PCI device from suspend\n");
7169 return err;
7170 }
7171 pci_set_master(pdev);
7172
7173 pci_enable_wake(pdev, PCI_D3hot, 0);
7174 pci_enable_wake(pdev, PCI_D3cold, 0);
7175
Stefan Assmann53c7d062012-12-04 06:00:12 +00007176 if (igb_init_interrupt_scheme(adapter, true)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07007177 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7178 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08007179 }
7180
Auke Kok9d5c8242008-01-24 02:22:38 -08007181 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00007182
7183 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007184 * driver.
7185 */
Alexander Duycka8564f02009-02-06 23:21:10 +00007186 igb_get_hw_control(adapter);
7187
Auke Kok9d5c8242008-01-24 02:22:38 -08007188 wr32(E1000_WUS, ~0);
7189
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007190 if (netdev->flags & IFF_UP) {
Alexander Duyck0c2cc022012-09-25 00:31:22 +00007191 rtnl_lock();
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007192 err = __igb_open(netdev, true);
Alexander Duyck0c2cc022012-09-25 00:31:22 +00007193 rtnl_unlock();
Alexander Duycka88f10e2008-07-08 15:13:38 -07007194 if (err)
7195 return err;
7196 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007197
7198 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007199 return 0;
7200}
7201
7202#ifdef CONFIG_PM_RUNTIME
7203static int igb_runtime_idle(struct device *dev)
7204{
7205 struct pci_dev *pdev = to_pci_dev(dev);
7206 struct net_device *netdev = pci_get_drvdata(pdev);
7207 struct igb_adapter *adapter = netdev_priv(netdev);
7208
7209 if (!igb_has_link(adapter))
7210 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7211
7212 return -EBUSY;
7213}
7214
7215static int igb_runtime_suspend(struct device *dev)
7216{
7217 struct pci_dev *pdev = to_pci_dev(dev);
7218 int retval;
7219 bool wake;
7220
7221 retval = __igb_shutdown(pdev, &wake, 1);
7222 if (retval)
7223 return retval;
7224
7225 if (wake) {
7226 pci_prepare_to_sleep(pdev);
7227 } else {
7228 pci_wake_from_d3(pdev, false);
7229 pci_set_power_state(pdev, PCI_D3hot);
7230 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007231
Auke Kok9d5c8242008-01-24 02:22:38 -08007232 return 0;
7233}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007234
7235static int igb_runtime_resume(struct device *dev)
7236{
7237 return igb_resume(dev);
7238}
7239#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08007240#endif
7241
7242static void igb_shutdown(struct pci_dev *pdev)
7243{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007244 bool wake;
7245
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007246 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007247
7248 if (system_state == SYSTEM_POWER_OFF) {
7249 pci_wake_from_d3(pdev, wake);
7250 pci_set_power_state(pdev, PCI_D3hot);
7251 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007252}
7253
Greg Rosefa44f2f2013-01-17 01:03:06 -08007254#ifdef CONFIG_PCI_IOV
7255static int igb_sriov_reinit(struct pci_dev *dev)
7256{
7257 struct net_device *netdev = pci_get_drvdata(dev);
7258 struct igb_adapter *adapter = netdev_priv(netdev);
7259 struct pci_dev *pdev = adapter->pdev;
7260
7261 rtnl_lock();
7262
7263 if (netif_running(netdev))
7264 igb_close(netdev);
7265
7266 igb_clear_interrupt_scheme(adapter);
7267
7268 igb_init_queue_configuration(adapter);
7269
7270 if (igb_init_interrupt_scheme(adapter, true)) {
7271 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7272 return -ENOMEM;
7273 }
7274
7275 if (netif_running(netdev))
7276 igb_open(netdev);
7277
7278 rtnl_unlock();
7279
7280 return 0;
7281}
7282
7283static int igb_pci_disable_sriov(struct pci_dev *dev)
7284{
7285 int err = igb_disable_sriov(dev);
7286
7287 if (!err)
7288 err = igb_sriov_reinit(dev);
7289
7290 return err;
7291}
7292
7293static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
7294{
7295 int err = igb_enable_sriov(dev, num_vfs);
7296
7297 if (err)
7298 goto out;
7299
7300 err = igb_sriov_reinit(dev);
7301 if (!err)
7302 return num_vfs;
7303
7304out:
7305 return err;
7306}
7307
7308#endif
7309static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7310{
7311#ifdef CONFIG_PCI_IOV
7312 if (num_vfs == 0)
7313 return igb_pci_disable_sriov(dev);
7314 else
7315 return igb_pci_enable_sriov(dev, num_vfs);
7316#endif
7317 return 0;
7318}
7319
Auke Kok9d5c8242008-01-24 02:22:38 -08007320#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007321/* Polling 'interrupt' - used by things like netconsole to send skbs
Auke Kok9d5c8242008-01-24 02:22:38 -08007322 * without having to re-enable interrupts. It's not called while
7323 * the interrupt routine is executing.
7324 */
7325static void igb_netpoll(struct net_device *netdev)
7326{
7327 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00007328 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00007329 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08007330 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08007331
Alexander Duyck047e0032009-10-27 15:49:27 +00007332 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00007333 q_vector = adapter->q_vector[i];
7334 if (adapter->msix_entries)
7335 wr32(E1000_EIMC, q_vector->eims_value);
7336 else
7337 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00007338 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00007339 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007340}
7341#endif /* CONFIG_NET_POLL_CONTROLLER */
7342
7343/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007344 * igb_io_error_detected - called when PCI error is detected
7345 * @pdev: Pointer to PCI device
7346 * @state: The current pci connection state
Auke Kok9d5c8242008-01-24 02:22:38 -08007347 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007348 * This function is called after a PCI bus error affecting
7349 * this device has been detected.
7350 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08007351static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7352 pci_channel_state_t state)
7353{
7354 struct net_device *netdev = pci_get_drvdata(pdev);
7355 struct igb_adapter *adapter = netdev_priv(netdev);
7356
7357 netif_device_detach(netdev);
7358
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00007359 if (state == pci_channel_io_perm_failure)
7360 return PCI_ERS_RESULT_DISCONNECT;
7361
Auke Kok9d5c8242008-01-24 02:22:38 -08007362 if (netif_running(netdev))
7363 igb_down(adapter);
7364 pci_disable_device(pdev);
7365
7366 /* Request a slot slot reset. */
7367 return PCI_ERS_RESULT_NEED_RESET;
7368}
7369
7370/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007371 * igb_io_slot_reset - called after the pci bus has been reset.
7372 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08007373 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007374 * Restart the card from scratch, as if from a cold-boot. Implementation
7375 * resembles the first-half of the igb_resume routine.
7376 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08007377static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7378{
7379 struct net_device *netdev = pci_get_drvdata(pdev);
7380 struct igb_adapter *adapter = netdev_priv(netdev);
7381 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08007382 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09007383 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08007384
Alexander Duyckaed5dec2009-02-06 23:16:04 +00007385 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007386 dev_err(&pdev->dev,
7387 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08007388 result = PCI_ERS_RESULT_DISCONNECT;
7389 } else {
7390 pci_set_master(pdev);
7391 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00007392 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08007393
7394 pci_enable_wake(pdev, PCI_D3hot, 0);
7395 pci_enable_wake(pdev, PCI_D3cold, 0);
7396
7397 igb_reset(adapter);
7398 wr32(E1000_WUS, ~0);
7399 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08007400 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007401
Jeff Kirsherea943d42008-12-11 20:34:19 -08007402 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7403 if (err) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007404 dev_err(&pdev->dev,
7405 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7406 err);
Jeff Kirsherea943d42008-12-11 20:34:19 -08007407 /* non-fatal, continue */
7408 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007409
Alexander Duyck40a914f2008-11-27 00:24:37 -08007410 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08007411}
7412
7413/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007414 * igb_io_resume - called when traffic can start flowing again.
7415 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08007416 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007417 * This callback is called when the error recovery driver tells us that
7418 * its OK to resume normal operation. Implementation resembles the
7419 * second-half of the igb_resume routine.
Auke Kok9d5c8242008-01-24 02:22:38 -08007420 */
7421static void igb_io_resume(struct pci_dev *pdev)
7422{
7423 struct net_device *netdev = pci_get_drvdata(pdev);
7424 struct igb_adapter *adapter = netdev_priv(netdev);
7425
Auke Kok9d5c8242008-01-24 02:22:38 -08007426 if (netif_running(netdev)) {
7427 if (igb_up(adapter)) {
7428 dev_err(&pdev->dev, "igb_up failed after reset\n");
7429 return;
7430 }
7431 }
7432
7433 netif_device_attach(netdev);
7434
7435 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007436 * driver.
7437 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007438 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007439}
7440
Alexander Duyck26ad9172009-10-05 06:32:49 +00007441static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007442 u8 qsel)
Alexander Duyck26ad9172009-10-05 06:32:49 +00007443{
7444 u32 rar_low, rar_high;
7445 struct e1000_hw *hw = &adapter->hw;
7446
7447 /* HW expects these in little endian so we reverse the byte order
7448 * from network order (big endian) to little endian
7449 */
7450 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007451 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
Alexander Duyck26ad9172009-10-05 06:32:49 +00007452 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7453
7454 /* Indicate to hardware the Address is Valid. */
7455 rar_high |= E1000_RAH_AV;
7456
7457 if (hw->mac.type == e1000_82575)
7458 rar_high |= E1000_RAH_POOL_1 * qsel;
7459 else
7460 rar_high |= E1000_RAH_POOL_1 << qsel;
7461
7462 wr32(E1000_RAL(index), rar_low);
7463 wrfl();
7464 wr32(E1000_RAH(index), rar_high);
7465 wrfl();
7466}
7467
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007468static int igb_set_vf_mac(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007469 int vf, unsigned char *mac_addr)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007470{
7471 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007472 /* VF MAC addresses start at end of receive addresses and moves
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007473 * towards the first, as a result a collision should not be possible
7474 */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007475 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007476
Alexander Duyck37680112009-02-19 20:40:30 -08007477 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007478
Alexander Duyck26ad9172009-10-05 06:32:49 +00007479 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007480
7481 return 0;
7482}
7483
Williams, Mitch A8151d292010-02-10 01:44:24 +00007484static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7485{
7486 struct igb_adapter *adapter = netdev_priv(netdev);
7487 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7488 return -EINVAL;
7489 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7490 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007491 dev_info(&adapter->pdev->dev,
7492 "Reload the VF driver to make this change effective.");
Williams, Mitch A8151d292010-02-10 01:44:24 +00007493 if (test_bit(__IGB_DOWN, &adapter->state)) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007494 dev_warn(&adapter->pdev->dev,
7495 "The VF MAC address has been set, but the PF device is not up.\n");
7496 dev_warn(&adapter->pdev->dev,
7497 "Bring the PF device up before attempting to use the VF device.\n");
Williams, Mitch A8151d292010-02-10 01:44:24 +00007498 }
7499 return igb_set_vf_mac(adapter, vf, mac);
7500}
7501
Lior Levy17dc5662011-02-08 02:28:46 +00007502static int igb_link_mbps(int internal_link_speed)
7503{
7504 switch (internal_link_speed) {
7505 case SPEED_100:
7506 return 100;
7507 case SPEED_1000:
7508 return 1000;
7509 default:
7510 return 0;
7511 }
7512}
7513
7514static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7515 int link_speed)
7516{
7517 int rf_dec, rf_int;
7518 u32 bcnrc_val;
7519
7520 if (tx_rate != 0) {
7521 /* Calculate the rate factor values to set */
7522 rf_int = link_speed / tx_rate;
7523 rf_dec = (link_speed - (rf_int * tx_rate));
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007524 rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
7525 tx_rate;
Lior Levy17dc5662011-02-08 02:28:46 +00007526
7527 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007528 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
7529 E1000_RTTBCNRC_RF_INT_MASK);
Lior Levy17dc5662011-02-08 02:28:46 +00007530 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7531 } else {
7532 bcnrc_val = 0;
7533 }
7534
7535 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007536 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
Lior Levyf00b0da2011-06-04 06:05:03 +00007537 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7538 */
7539 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00007540 wr32(E1000_RTTBCNRC, bcnrc_val);
7541}
7542
7543static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7544{
7545 int actual_link_speed, i;
7546 bool reset_rate = false;
7547
7548 /* VF TX rate limit was not set or not supported */
7549 if ((adapter->vf_rate_link_speed == 0) ||
7550 (adapter->hw.mac.type != e1000_82576))
7551 return;
7552
7553 actual_link_speed = igb_link_mbps(adapter->link_speed);
7554 if (actual_link_speed != adapter->vf_rate_link_speed) {
7555 reset_rate = true;
7556 adapter->vf_rate_link_speed = 0;
7557 dev_info(&adapter->pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007558 "Link speed has been changed. VF Transmit rate is disabled\n");
Lior Levy17dc5662011-02-08 02:28:46 +00007559 }
7560
7561 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7562 if (reset_rate)
7563 adapter->vf_data[i].tx_rate = 0;
7564
7565 igb_set_vf_rate_limit(&adapter->hw, i,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007566 adapter->vf_data[i].tx_rate,
7567 actual_link_speed);
Lior Levy17dc5662011-02-08 02:28:46 +00007568 }
7569}
7570
Williams, Mitch A8151d292010-02-10 01:44:24 +00007571static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7572{
Lior Levy17dc5662011-02-08 02:28:46 +00007573 struct igb_adapter *adapter = netdev_priv(netdev);
7574 struct e1000_hw *hw = &adapter->hw;
7575 int actual_link_speed;
7576
7577 if (hw->mac.type != e1000_82576)
7578 return -EOPNOTSUPP;
7579
7580 actual_link_speed = igb_link_mbps(adapter->link_speed);
7581 if ((vf >= adapter->vfs_allocated_count) ||
7582 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7583 (tx_rate < 0) || (tx_rate > actual_link_speed))
7584 return -EINVAL;
7585
7586 adapter->vf_rate_link_speed = actual_link_speed;
7587 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7588 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7589
7590 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007591}
7592
Lior Levy70ea4782013-03-03 20:27:48 +00007593static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7594 bool setting)
7595{
7596 struct igb_adapter *adapter = netdev_priv(netdev);
7597 struct e1000_hw *hw = &adapter->hw;
7598 u32 reg_val, reg_offset;
7599
7600 if (!adapter->vfs_allocated_count)
7601 return -EOPNOTSUPP;
7602
7603 if (vf >= adapter->vfs_allocated_count)
7604 return -EINVAL;
7605
7606 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
7607 reg_val = rd32(reg_offset);
7608 if (setting)
7609 reg_val |= ((1 << vf) |
7610 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7611 else
7612 reg_val &= ~((1 << vf) |
7613 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7614 wr32(reg_offset, reg_val);
7615
7616 adapter->vf_data[vf].spoofchk_enabled = setting;
7617 return E1000_SUCCESS;
7618}
7619
Williams, Mitch A8151d292010-02-10 01:44:24 +00007620static int igb_ndo_get_vf_config(struct net_device *netdev,
7621 int vf, struct ifla_vf_info *ivi)
7622{
7623 struct igb_adapter *adapter = netdev_priv(netdev);
7624 if (vf >= adapter->vfs_allocated_count)
7625 return -EINVAL;
7626 ivi->vf = vf;
7627 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007628 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007629 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7630 ivi->qos = adapter->vf_data[vf].pf_qos;
Lior Levy70ea4782013-03-03 20:27:48 +00007631 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007632 return 0;
7633}
7634
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007635static void igb_vmm_control(struct igb_adapter *adapter)
7636{
7637 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007638 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007639
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007640 switch (hw->mac.type) {
7641 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00007642 case e1000_i210:
7643 case e1000_i211:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00007644 case e1000_i354:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007645 default:
7646 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007647 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007648 case e1000_82576:
7649 /* notify HW that the MAC is adding vlan tags */
7650 reg = rd32(E1000_DTXCTL);
7651 reg |= E1000_DTXCTL_VLAN_ADDED;
7652 wr32(E1000_DTXCTL, reg);
7653 case e1000_82580:
7654 /* enable replication vlan tag stripping */
7655 reg = rd32(E1000_RPLOLR);
7656 reg |= E1000_RPLOLR_STRVLAN;
7657 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007658 case e1000_i350:
7659 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007660 break;
7661 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007662
Alexander Duyckd4960302009-10-27 15:53:45 +00007663 if (adapter->vfs_allocated_count) {
7664 igb_vmdq_set_loopback_pf(hw, true);
7665 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007666 igb_vmdq_set_anti_spoofing_pf(hw, true,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007667 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007668 } else {
7669 igb_vmdq_set_loopback_pf(hw, false);
7670 igb_vmdq_set_replication_pf(hw, false);
7671 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007672}
7673
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007674static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7675{
7676 struct e1000_hw *hw = &adapter->hw;
7677 u32 dmac_thr;
7678 u16 hwm;
7679
7680 if (hw->mac.type > e1000_82580) {
7681 if (adapter->flags & IGB_FLAG_DMAC) {
7682 u32 reg;
7683
7684 /* force threshold to 0. */
7685 wr32(E1000_DMCTXTH, 0);
7686
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007687 /* DMA Coalescing high water mark needs to be greater
Matthew Vicke8c626e2011-11-17 08:33:12 +00007688 * than the Rx threshold. Set hwm to PBA - max frame
7689 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007690 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007691 hwm = 64 * pba - adapter->max_frame_size / 16;
7692 if (hwm < 64 * (pba - 6))
7693 hwm = 64 * (pba - 6);
7694 reg = rd32(E1000_FCRTC);
7695 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7696 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7697 & E1000_FCRTC_RTH_COAL_MASK);
7698 wr32(E1000_FCRTC, reg);
7699
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007700 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
Matthew Vicke8c626e2011-11-17 08:33:12 +00007701 * frame size, capping it at PBA - 10KB.
7702 */
7703 dmac_thr = pba - adapter->max_frame_size / 512;
7704 if (dmac_thr < pba - 10)
7705 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007706 reg = rd32(E1000_DMACR);
7707 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007708 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7709 & E1000_DMACR_DMACTHR_MASK);
7710
7711 /* transition to L0x or L1 if available..*/
7712 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7713
7714 /* watchdog timer= +-1000 usec in 32usec intervals */
7715 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00007716
7717 /* Disable BMC-to-OS Watchdog Enable */
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00007718 if (hw->mac.type != e1000_i354)
7719 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7720
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007721 wr32(E1000_DMACR, reg);
7722
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007723 /* no lower threshold to disable
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007724 * coalescing(smart fifb)-UTRESH=0
7725 */
7726 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007727
7728 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7729
7730 wr32(E1000_DMCTLX, reg);
7731
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007732 /* free space in tx packet buffer to wake from
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007733 * DMA coal
7734 */
7735 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7736 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7737
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007738 /* make low power state decision controlled
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007739 * by DMA coal
7740 */
7741 reg = rd32(E1000_PCIEMISC);
7742 reg &= ~E1000_PCIEMISC_LX_DECISION;
7743 wr32(E1000_PCIEMISC, reg);
7744 } /* endif adapter->dmac is not disabled */
7745 } else if (hw->mac.type == e1000_82580) {
7746 u32 reg = rd32(E1000_PCIEMISC);
7747 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7748 wr32(E1000_DMACR, 0);
7749 }
7750}
7751
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007752/**
7753 * igb_read_i2c_byte - Reads 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007754 * @hw: pointer to hardware structure
7755 * @byte_offset: byte offset to read
7756 * @dev_addr: device address
7757 * @data: value read
7758 *
7759 * Performs byte read operation over I2C interface at
7760 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007761 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007762s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007763 u8 dev_addr, u8 *data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007764{
7765 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00007766 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007767 s32 status;
7768 u16 swfw_mask = 0;
7769
7770 if (!this_client)
7771 return E1000_ERR_I2C;
7772
7773 swfw_mask = E1000_SWFW_PHY0_SM;
7774
7775 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
7776 != E1000_SUCCESS)
7777 return E1000_ERR_SWFW_SYNC;
7778
7779 status = i2c_smbus_read_byte_data(this_client, byte_offset);
7780 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7781
7782 if (status < 0)
7783 return E1000_ERR_I2C;
7784 else {
7785 *data = status;
7786 return E1000_SUCCESS;
7787 }
7788}
7789
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007790/**
7791 * igb_write_i2c_byte - Writes 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007792 * @hw: pointer to hardware structure
7793 * @byte_offset: byte offset to write
7794 * @dev_addr: device address
7795 * @data: value to write
7796 *
7797 * Performs byte write operation over I2C interface at
7798 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007799 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007800s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007801 u8 dev_addr, u8 data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007802{
7803 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00007804 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007805 s32 status;
7806 u16 swfw_mask = E1000_SWFW_PHY0_SM;
7807
7808 if (!this_client)
7809 return E1000_ERR_I2C;
7810
7811 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
7812 return E1000_ERR_SWFW_SYNC;
7813 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
7814 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7815
7816 if (status)
7817 return E1000_ERR_I2C;
7818 else
7819 return E1000_SUCCESS;
7820
7821}
Auke Kok9d5c8242008-01-24 02:22:38 -08007822/* igb_main.c */