blob: 21a34e9e2645e65439e7f842ef358d085c276324 [file] [log] [blame]
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00001/* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
Auke Kok9d5c8242008-01-24 02:22:38 -080023
Jeff Kirsher876d2d62011-10-21 20:01:34 +000024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
Auke Kok9d5c8242008-01-24 02:22:38 -080026#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000029#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/vmalloc.h>
31#include <linux/pagemap.h>
32#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080033#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Andre Guedes05f9d3e2017-10-16 18:01:28 -070037#include <net/pkt_sched.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000041#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/if_vlan.h>
43#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070044#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/delay.h>
46#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000047#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080051#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040052#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000053#include <linux/pm_runtime.h>
John Holland806ffb12016-02-18 12:10:52 +010054#include <linux/etherdevice.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070055#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070056#include <linux/dca.h>
57#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +000058#include <linux/i2c.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080059#include "igb.h"
60
Carolyn Wyborny67b1b902013-04-17 16:44:53 +000061#define MAJ 5
Todd Fujinaka07423372016-08-24 08:40:23 -070062#define MIN 4
Todd Fujinaka6fb46902015-05-20 15:40:20 -070063#define BUILD 0
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080064#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000065__stringify(BUILD) "-k"
Andre Guedes05f9d3e2017-10-16 18:01:28 -070066
67enum queue_mode {
68 QUEUE_MODE_STRICT_PRIORITY,
69 QUEUE_MODE_STREAM_RESERVATION,
70};
71
72enum tx_queue_prio {
73 TX_QUEUE_PRIO_HIGH,
74 TX_QUEUE_PRIO_LOW,
75};
76
Auke Kok9d5c8242008-01-24 02:22:38 -080077char igb_driver_name[] = "igb";
78char igb_driver_version[] = DRV_VERSION;
79static const char igb_driver_string[] =
80 "Intel(R) Gigabit Ethernet Network Driver";
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +000081static const char igb_copyright[] =
Carolyn Wyborny74cfb2e2014-02-25 17:58:57 -080082 "Copyright (c) 2007-2014 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080083
Auke Kok9d5c8242008-01-24 02:22:38 -080084static const struct e1000_info *igb_info_tbl[] = {
85 [board_82575] = &e1000_82575_info,
86};
87
Carolyn Wybornycd1631c2014-04-11 01:47:08 +000088static const struct pci_device_id igb_pci_tbl[] = {
Carolyn Wybornyceb5f132013-04-18 22:21:30 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Carolyn Wyborny53b87ce2013-07-16 19:18:36 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +0000103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +0000105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +0000106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +0000109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +0000111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +0000114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +0000115 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700116 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
117 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000119 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000120 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800121 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
122 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
123 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
124 /* required last entry */
125 {0, }
126};
127
128MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
129
Auke Kok9d5c8242008-01-24 02:22:38 -0800130static int igb_setup_all_tx_resources(struct igb_adapter *);
131static int igb_setup_all_rx_resources(struct igb_adapter *);
132static void igb_free_all_tx_resources(struct igb_adapter *);
133static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000134static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800135static int igb_probe(struct pci_dev *, const struct pci_device_id *);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500136static void igb_remove(struct pci_dev *pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -0800137static int igb_sw_init(struct igb_adapter *);
Stefan Assmann46eafa52016-02-03 09:20:50 +0100138int igb_open(struct net_device *);
139int igb_close(struct net_device *);
Stefan Assmann53c7d062012-12-04 06:00:12 +0000140static void igb_configure(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static void igb_configure_tx(struct igb_adapter *);
142static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800143static void igb_clean_all_tx_rings(struct igb_adapter *);
144static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700145static void igb_clean_tx_ring(struct igb_ring *);
146static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000147static void igb_set_rx_mode(struct net_device *);
Kees Cook26566ea2017-10-16 17:29:35 -0700148static void igb_update_phy_info(struct timer_list *);
149static void igb_watchdog(struct timer_list *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800150static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000151static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800152static void igb_get_stats64(struct net_device *dev,
153 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800154static int igb_change_mtu(struct net_device *, int);
155static int igb_set_mac(struct net_device *, void *);
Alexander Duyckbf456ab2016-01-06 23:11:43 -0800156static void igb_set_uta(struct igb_adapter *adapter, bool set);
Auke Kok9d5c8242008-01-24 02:22:38 -0800157static irqreturn_t igb_intr(int irq, void *);
158static irqreturn_t igb_intr_msi(int irq, void *);
159static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000160static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700161#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000162static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700163static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700164#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700165static int igb_poll(struct napi_struct *, int);
Alexander Duyck7f0ba842016-03-07 09:30:21 -0800166static bool igb_clean_tx_irq(struct igb_q_vector *, int);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -0700167static int igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
169static void igb_tx_timeout(struct net_device *);
170static void igb_reset_task(struct work_struct *);
Carolyn Wybornyc502ea22014-04-11 01:46:33 +0000171static void igb_vlan_mode(struct net_device *netdev,
172 netdev_features_t features);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000173static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
174static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800175static void igb_restore_vlan(struct igb_adapter *);
Yury Kylulin83c21332017-03-07 11:20:25 +0300176static void igb_rar_set_index(struct igb_adapter *, u32);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800177static void igb_ping_all_vfs(struct igb_adapter *);
178static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800179static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000180static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Yury Kylulin83c21332017-03-07 11:20:25 +0300181static void igb_flush_mac_table(struct igb_adapter *);
182static int igb_available_rars(struct igb_adapter *, u8);
183static void igb_set_default_mac_filter(struct igb_adapter *);
184static int igb_uc_sync(struct net_device *, const unsigned char *);
185static int igb_uc_unsync(struct net_device *, const unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800186static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000187static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
188static int igb_ndo_set_vf_vlan(struct net_device *netdev,
Moshe Shemesh79aab092016-09-22 12:11:15 +0300189 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -0400190static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
Lior Levy70ea4782013-03-03 20:27:48 +0000191static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
192 bool setting);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000193static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
194 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000195static void igb_check_vf_rate_limit(struct igb_adapter *);
Gangfeng Huang0e71def2016-07-06 13:22:54 +0800196static void igb_nfc_filter_exit(struct igb_adapter *adapter);
197static void igb_nfc_filter_restore(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000198
199#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000200static int igb_vf_configure(struct igb_adapter *adapter, int vf);
Stefan Assmann781798a2013-09-24 05:18:39 +0000201static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
Todd Fujinakaceee3452015-08-07 17:27:39 -0700202static int igb_disable_sriov(struct pci_dev *dev);
203static int igb_pci_disable_sriov(struct pci_dev *dev);
RongQing Li46a01692011-10-18 22:52:35 +0000204#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800205
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000206static int igb_suspend(struct device *);
207static int igb_resume(struct device *);
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000208static int igb_runtime_suspend(struct device *dev);
209static int igb_runtime_resume(struct device *dev);
210static int igb_runtime_idle(struct device *dev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000211static const struct dev_pm_ops igb_pm_ops = {
212 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
213 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
214 igb_runtime_idle)
215};
Auke Kok9d5c8242008-01-24 02:22:38 -0800216static void igb_shutdown(struct pci_dev *);
Greg Rosefa44f2f2013-01-17 01:03:06 -0800217static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700218#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700219static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
220static struct notifier_block dca_notifier = {
221 .notifier_call = igb_notify_dca,
222 .next = NULL,
223 .priority = 0
224};
225#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800226#ifdef CONFIG_NET_POLL_CONTROLLER
227/* for netdump / net console */
228static void igb_netpoll(struct net_device *);
229#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800230#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6dd6d2b2014-04-11 01:46:48 +0000231static unsigned int max_vfs;
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000232module_param(max_vfs, uint, 0);
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000233MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000234#endif /* CONFIG_PCI_IOV */
235
Auke Kok9d5c8242008-01-24 02:22:38 -0800236static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
237 pci_channel_state_t);
238static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
239static void igb_io_resume(struct pci_dev *);
240
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700241static const struct pci_error_handlers igb_err_handler = {
Auke Kok9d5c8242008-01-24 02:22:38 -0800242 .error_detected = igb_io_error_detected,
243 .slot_reset = igb_io_slot_reset,
244 .resume = igb_io_resume,
245};
246
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000247static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800248
249static struct pci_driver igb_driver = {
250 .name = igb_driver_name,
251 .id_table = igb_pci_tbl,
252 .probe = igb_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500253 .remove = igb_remove,
Auke Kok9d5c8242008-01-24 02:22:38 -0800254#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000255 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800256#endif
257 .shutdown = igb_shutdown,
Greg Rosefa44f2f2013-01-17 01:03:06 -0800258 .sriov_configure = igb_pci_sriov_configure,
Auke Kok9d5c8242008-01-24 02:22:38 -0800259 .err_handler = &igb_err_handler
260};
261
262MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
263MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
264MODULE_LICENSE("GPL");
265MODULE_VERSION(DRV_VERSION);
266
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000267#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
268static int debug = -1;
269module_param(debug, int, 0);
270MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
271
Taku Izumic97ec422010-04-27 14:39:30 +0000272struct igb_reg_info {
273 u32 ofs;
274 char *name;
275};
276
277static const struct igb_reg_info igb_reg_info_tbl[] = {
278
279 /* General Registers */
280 {E1000_CTRL, "CTRL"},
281 {E1000_STATUS, "STATUS"},
282 {E1000_CTRL_EXT, "CTRL_EXT"},
283
284 /* Interrupt Registers */
285 {E1000_ICR, "ICR"},
286
287 /* RX Registers */
288 {E1000_RCTL, "RCTL"},
289 {E1000_RDLEN(0), "RDLEN"},
290 {E1000_RDH(0), "RDH"},
291 {E1000_RDT(0), "RDT"},
292 {E1000_RXDCTL(0), "RXDCTL"},
293 {E1000_RDBAL(0), "RDBAL"},
294 {E1000_RDBAH(0), "RDBAH"},
295
296 /* TX Registers */
297 {E1000_TCTL, "TCTL"},
298 {E1000_TDBAL(0), "TDBAL"},
299 {E1000_TDBAH(0), "TDBAH"},
300 {E1000_TDLEN(0), "TDLEN"},
301 {E1000_TDH(0), "TDH"},
302 {E1000_TDT(0), "TDT"},
303 {E1000_TXDCTL(0), "TXDCTL"},
304 {E1000_TDFH, "TDFH"},
305 {E1000_TDFT, "TDFT"},
306 {E1000_TDFHS, "TDFHS"},
307 {E1000_TDFPC, "TDFPC"},
308
309 /* List Terminator */
310 {}
311};
312
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000313/* igb_regdump - register printout routine */
Taku Izumic97ec422010-04-27 14:39:30 +0000314static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
315{
316 int n = 0;
317 char rname[16];
318 u32 regs[8];
319
320 switch (reginfo->ofs) {
321 case E1000_RDLEN(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RDLEN(n));
324 break;
325 case E1000_RDH(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDH(n));
328 break;
329 case E1000_RDT(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_RDT(n));
332 break;
333 case E1000_RXDCTL(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_RXDCTL(n));
336 break;
337 case E1000_RDBAL(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_RDBAL(n));
340 break;
341 case E1000_RDBAH(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_RDBAH(n));
344 break;
345 case E1000_TDBAL(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_RDBAL(n));
348 break;
349 case E1000_TDBAH(0):
350 for (n = 0; n < 4; n++)
351 regs[n] = rd32(E1000_TDBAH(n));
352 break;
353 case E1000_TDLEN(0):
354 for (n = 0; n < 4; n++)
355 regs[n] = rd32(E1000_TDLEN(n));
356 break;
357 case E1000_TDH(0):
358 for (n = 0; n < 4; n++)
359 regs[n] = rd32(E1000_TDH(n));
360 break;
361 case E1000_TDT(0):
362 for (n = 0; n < 4; n++)
363 regs[n] = rd32(E1000_TDT(n));
364 break;
365 case E1000_TXDCTL(0):
366 for (n = 0; n < 4; n++)
367 regs[n] = rd32(E1000_TXDCTL(n));
368 break;
369 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000370 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000371 return;
372 }
373
374 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000375 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
376 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000377}
378
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000379/* igb_dump - Print registers, Tx-rings and Rx-rings */
Taku Izumic97ec422010-04-27 14:39:30 +0000380static void igb_dump(struct igb_adapter *adapter)
381{
382 struct net_device *netdev = adapter->netdev;
383 struct e1000_hw *hw = &adapter->hw;
384 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000385 struct igb_ring *tx_ring;
386 union e1000_adv_tx_desc *tx_desc;
387 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000388 struct igb_ring *rx_ring;
389 union e1000_adv_rx_desc *rx_desc;
390 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000391 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000392
393 if (!netif_msg_hw(adapter))
394 return;
395
396 /* Print netdevice Info */
397 if (netdev) {
398 dev_info(&adapter->pdev->dev, "Net device Info\n");
Tobias Klauser4a7c9722017-01-18 17:45:01 +0100399 pr_info("Device Name state trans_start\n");
400 pr_info("%-15s %016lX %016lX\n", netdev->name,
401 netdev->state, dev_trans_start(netdev));
Taku Izumic97ec422010-04-27 14:39:30 +0000402 }
403
404 /* Print Registers */
405 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000406 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000407 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
408 reginfo->name; reginfo++) {
409 igb_regdump(hw, reginfo);
410 }
411
412 /* Print TX Ring Summary */
413 if (!netdev || !netif_running(netdev))
414 goto exit;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000417 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000418 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000419 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000420 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000421 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000422 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
423 n, tx_ring->next_to_use, tx_ring->next_to_clean,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000424 (u64)dma_unmap_addr(buffer_info, dma),
425 dma_unmap_len(buffer_info, len),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000426 buffer_info->next_to_watch,
427 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000428 }
429
430 /* Print TX Rings */
431 if (!netif_msg_tx_done(adapter))
432 goto rx_ring_summary;
433
434 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
435
436 /* Transmit Descriptor Formats
437 *
438 * Advanced Transmit Descriptor
439 * +--------------------------------------------------------------+
440 * 0 | Buffer Address [63:0] |
441 * +--------------------------------------------------------------+
442 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
443 * +--------------------------------------------------------------+
444 * 63 46 45 40 39 38 36 35 32 31 24 15 0
445 */
446
447 for (n = 0; n < adapter->num_tx_queues; n++) {
448 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000449 pr_info("------------------------------------\n");
450 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
451 pr_info("------------------------------------\n");
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000452 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000453
454 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000455 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000456 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000457 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000458 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000459 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000460 if (i == tx_ring->next_to_use &&
461 i == tx_ring->next_to_clean)
462 next_desc = " NTC/U";
463 else if (i == tx_ring->next_to_use)
464 next_desc = " NTU";
465 else if (i == tx_ring->next_to_clean)
466 next_desc = " NTC";
467 else
468 next_desc = "";
469
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000470 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
471 i, le64_to_cpu(u0->a),
Taku Izumic97ec422010-04-27 14:39:30 +0000472 le64_to_cpu(u0->b),
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000473 (u64)dma_unmap_addr(buffer_info, dma),
474 dma_unmap_len(buffer_info, len),
Taku Izumic97ec422010-04-27 14:39:30 +0000475 buffer_info->next_to_watch,
476 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000477 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000478
Emil Tantilovb6695882012-07-28 05:07:48 +0000479 if (netif_msg_pktdata(adapter) && buffer_info->skb)
Taku Izumic97ec422010-04-27 14:39:30 +0000480 print_hex_dump(KERN_INFO, "",
481 DUMP_PREFIX_ADDRESS,
Emil Tantilovb6695882012-07-28 05:07:48 +0000482 16, 1, buffer_info->skb->data,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000483 dma_unmap_len(buffer_info, len),
484 true);
Taku Izumic97ec422010-04-27 14:39:30 +0000485 }
486 }
487
488 /* Print RX Rings Summary */
489rx_ring_summary:
490 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000491 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000492 for (n = 0; n < adapter->num_rx_queues; n++) {
493 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000494 pr_info(" %5d %5X %5X\n",
495 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000496 }
497
498 /* Print RX Rings */
499 if (!netif_msg_rx_status(adapter))
500 goto exit;
501
502 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
503
504 /* Advanced Receive Descriptor (Read) Format
505 * 63 1 0
506 * +-----------------------------------------------------+
507 * 0 | Packet Buffer Address [63:1] |A0/NSE|
508 * +----------------------------------------------+------+
509 * 8 | Header Buffer Address [63:1] | DD |
510 * +-----------------------------------------------------+
511 *
512 *
513 * Advanced Receive Descriptor (Write-Back) Format
514 *
515 * 63 48 47 32 31 30 21 20 17 16 4 3 0
516 * +------------------------------------------------------+
517 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
518 * | Checksum Ident | | | | Type | Type |
519 * +------------------------------------------------------+
520 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
521 * +------------------------------------------------------+
522 * 63 48 47 32 31 20 19 0
523 */
524
525 for (n = 0; n < adapter->num_rx_queues; n++) {
526 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000527 pr_info("------------------------------------\n");
528 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
529 pr_info("------------------------------------\n");
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000530 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
531 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000532
533 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000534 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000535 struct igb_rx_buffer *buffer_info;
536 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000537 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000538 u0 = (struct my_u0 *)rx_desc;
539 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000540
541 if (i == rx_ring->next_to_use)
542 next_desc = " NTU";
543 else if (i == rx_ring->next_to_clean)
544 next_desc = " NTC";
545 else
546 next_desc = "";
547
Taku Izumic97ec422010-04-27 14:39:30 +0000548 if (staterr & E1000_RXD_STAT_DD) {
549 /* Descriptor Done */
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000550 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
551 "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000552 le64_to_cpu(u0->a),
553 le64_to_cpu(u0->b),
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000554 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000555 } else {
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000556 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
557 "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000558 le64_to_cpu(u0->a),
559 le64_to_cpu(u0->b),
560 (u64)buffer_info->dma,
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000561 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000562
Emil Tantilovb6695882012-07-28 05:07:48 +0000563 if (netif_msg_pktdata(adapter) &&
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000564 buffer_info->dma && buffer_info->page) {
Alexander Duyck44390ca2011-08-26 07:43:38 +0000565 print_hex_dump(KERN_INFO, "",
566 DUMP_PREFIX_ADDRESS,
567 16, 1,
Emil Tantilovb6695882012-07-28 05:07:48 +0000568 page_address(buffer_info->page) +
569 buffer_info->page_offset,
Alexander Duyck8649aae2017-02-06 18:27:03 -0800570 igb_rx_bufsz(rx_ring), true);
Taku Izumic97ec422010-04-27 14:39:30 +0000571 }
572 }
Taku Izumic97ec422010-04-27 14:39:30 +0000573 }
574 }
575
576exit:
577 return;
578}
579
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000580/**
581 * igb_get_i2c_data - Reads the I2C SDA data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000582 * @hw: pointer to hardware structure
583 * @i2cctl: Current value of I2CCTL register
584 *
585 * Returns the I2C data bit value
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000586 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000587static int igb_get_i2c_data(void *data)
588{
589 struct igb_adapter *adapter = (struct igb_adapter *)data;
590 struct e1000_hw *hw = &adapter->hw;
591 s32 i2cctl = rd32(E1000_I2CPARAMS);
592
Carolyn Wybornyda1f1df2014-04-11 02:11:17 +0000593 return !!(i2cctl & E1000_I2C_DATA_IN);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000594}
595
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000596/**
597 * igb_set_i2c_data - Sets the I2C data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000598 * @data: pointer to hardware structure
599 * @state: I2C data value (0 or 1) to set
600 *
601 * Sets the I2C data bit
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000602 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000603static void igb_set_i2c_data(void *data, int state)
604{
605 struct igb_adapter *adapter = (struct igb_adapter *)data;
606 struct e1000_hw *hw = &adapter->hw;
607 s32 i2cctl = rd32(E1000_I2CPARAMS);
608
609 if (state)
610 i2cctl |= E1000_I2C_DATA_OUT;
611 else
612 i2cctl &= ~E1000_I2C_DATA_OUT;
613
614 i2cctl &= ~E1000_I2C_DATA_OE_N;
615 i2cctl |= E1000_I2C_CLK_OE_N;
616 wr32(E1000_I2CPARAMS, i2cctl);
617 wrfl();
618
619}
620
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000621/**
622 * igb_set_i2c_clk - Sets the I2C SCL clock
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000623 * @data: pointer to hardware structure
624 * @state: state to set clock
625 *
626 * Sets the I2C clock line to state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000627 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000628static void igb_set_i2c_clk(void *data, int state)
629{
630 struct igb_adapter *adapter = (struct igb_adapter *)data;
631 struct e1000_hw *hw = &adapter->hw;
632 s32 i2cctl = rd32(E1000_I2CPARAMS);
633
634 if (state) {
635 i2cctl |= E1000_I2C_CLK_OUT;
636 i2cctl &= ~E1000_I2C_CLK_OE_N;
637 } else {
638 i2cctl &= ~E1000_I2C_CLK_OUT;
639 i2cctl &= ~E1000_I2C_CLK_OE_N;
640 }
641 wr32(E1000_I2CPARAMS, i2cctl);
642 wrfl();
643}
644
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000645/**
646 * igb_get_i2c_clk - Gets the I2C SCL clock state
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000647 * @data: pointer to hardware structure
648 *
649 * Gets the I2C clock state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000650 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000651static int igb_get_i2c_clk(void *data)
652{
653 struct igb_adapter *adapter = (struct igb_adapter *)data;
654 struct e1000_hw *hw = &adapter->hw;
655 s32 i2cctl = rd32(E1000_I2CPARAMS);
656
Carolyn Wybornyda1f1df2014-04-11 02:11:17 +0000657 return !!(i2cctl & E1000_I2C_CLK_IN);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000658}
659
660static const struct i2c_algo_bit_data igb_i2c_algo = {
661 .setsda = igb_set_i2c_data,
662 .setscl = igb_set_i2c_clk,
663 .getsda = igb_get_i2c_data,
664 .getscl = igb_get_i2c_clk,
665 .udelay = 5,
666 .timeout = 20,
667};
668
Auke Kok9d5c8242008-01-24 02:22:38 -0800669/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000670 * igb_get_hw_dev - return device
671 * @hw: pointer to hardware structure
672 *
673 * used by hardware layer to print debugging information
Auke Kok9d5c8242008-01-24 02:22:38 -0800674 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000675struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800676{
677 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000678 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800679}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000680
681/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000682 * igb_init_module - Driver Registration Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800683 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000684 * igb_init_module is the first routine called when the driver is
685 * loaded. All it does is register with the PCI subsystem.
Auke Kok9d5c8242008-01-24 02:22:38 -0800686 **/
687static int __init igb_init_module(void)
688{
689 int ret;
Carolyn Wyborny9005df32014-04-11 01:45:34 +0000690
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000691 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800692 igb_driver_string, igb_driver_version);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000693 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800694
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700695#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700696 dca_register_notify(&dca_notifier);
697#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800698 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800699 return ret;
700}
701
702module_init(igb_init_module);
703
704/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000705 * igb_exit_module - Driver Exit Cleanup Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800706 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000707 * igb_exit_module is called just before the driver is removed
708 * from memory.
Auke Kok9d5c8242008-01-24 02:22:38 -0800709 **/
710static void __exit igb_exit_module(void)
711{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700712#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700713 dca_unregister_notify(&dca_notifier);
714#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800715 pci_unregister_driver(&igb_driver);
716}
717
718module_exit(igb_exit_module);
719
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800720#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
721/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000722 * igb_cache_ring_register - Descriptor ring to register mapping
723 * @adapter: board private structure to initialize
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800724 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000725 * Once we know the feature-set enabled for the device, we'll cache
726 * the register offset the descriptor ring is assigned to.
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800727 **/
728static void igb_cache_ring_register(struct igb_adapter *adapter)
729{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000730 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000731 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800732
733 switch (adapter->hw.mac.type) {
734 case e1000_82576:
735 /* The queues are allocated for virtualization such that VF 0
736 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
737 * In order to avoid collision we start at the first free queue
738 * and continue consuming queues in the same sequence
739 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000740 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000741 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000742 adapter->rx_ring[i]->reg_idx = rbase_offset +
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000743 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000744 }
Carolyn Wybornyb26141d2014-04-17 04:10:13 +0000745 /* Fall through */
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800746 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000747 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000748 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000749 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000750 case e1000_i210:
751 case e1000_i211:
Carolyn Wybornyb26141d2014-04-17 04:10:13 +0000752 /* Fall through */
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800753 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000754 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000755 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000756 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000757 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800758 break;
759 }
760}
761
Fujinaka, Todd22a8b292014-03-13 04:29:01 +0000762u32 igb_rd32(struct e1000_hw *hw, u32 reg)
763{
764 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
Mark Rutland6aa7de02017-10-23 14:07:29 -0700765 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
Fujinaka, Todd22a8b292014-03-13 04:29:01 +0000766 u32 value = 0;
767
768 if (E1000_REMOVED(hw_addr))
769 return ~value;
770
771 value = readl(&hw_addr[reg]);
772
773 /* reads should not return all F's */
774 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
775 struct net_device *netdev = igb->netdev;
776 hw->hw_addr = NULL;
777 netif_device_detach(netdev);
778 netdev_err(netdev, "PCIe link lost, device now detached\n");
779 }
780
781 return value;
782}
783
Alexander Duyck4be000c2011-08-26 07:45:52 +0000784/**
785 * igb_write_ivar - configure ivar for given MSI-X vector
786 * @hw: pointer to the HW structure
787 * @msix_vector: vector number we are allocating to a given ring
788 * @index: row index of IVAR register to write within IVAR table
789 * @offset: column offset of in IVAR, should be multiple of 8
790 *
791 * This function is intended to handle the writing of the IVAR register
792 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
793 * each containing an cause allocation for an Rx and Tx ring, and a
794 * variable number of rows depending on the number of queues supported.
795 **/
796static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
797 int index, int offset)
798{
799 u32 ivar = array_rd32(E1000_IVAR0, index);
800
801 /* clear any bits that are currently set */
802 ivar &= ~((u32)0xFF << offset);
803
804 /* write vector and valid bit */
805 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
806
807 array_wr32(E1000_IVAR0, index, ivar);
808}
809
Auke Kok9d5c8242008-01-24 02:22:38 -0800810#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000811static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800812{
Alexander Duyck047e0032009-10-27 15:49:27 +0000813 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800814 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000815 int rx_queue = IGB_N0_QUEUE;
816 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000817 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000818
Alexander Duyck0ba82992011-08-26 07:45:47 +0000819 if (q_vector->rx.ring)
820 rx_queue = q_vector->rx.ring->reg_idx;
821 if (q_vector->tx.ring)
822 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700823
824 switch (hw->mac.type) {
825 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800826 /* The 82575 assigns vectors using a bitmask, which matches the
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000827 * bitmask for the EICR/EIMS/EIMC registers. To assign one
828 * or more queues to a vector, we write the appropriate bits
829 * into the MSIXBM register for that vector.
830 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000831 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800832 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000833 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800834 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Carolyn Wybornycd14ef52013-12-10 07:58:34 +0000835 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000836 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800837 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000838 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700839 break;
840 case e1000_82576:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000841 /* 82576 uses a table that essentially consists of 2 columns
Alexander Duyck4be000c2011-08-26 07:45:52 +0000842 * with 8 rows. The ordering is column-major so we use the
843 * lower 3 bits as the row index, and the 4th bit as the
844 * column offset.
845 */
846 if (rx_queue > IGB_N0_QUEUE)
847 igb_write_ivar(hw, msix_vector,
848 rx_queue & 0x7,
849 (rx_queue & 0x8) << 1);
850 if (tx_queue > IGB_N0_QUEUE)
851 igb_write_ivar(hw, msix_vector,
852 tx_queue & 0x7,
853 ((tx_queue & 0x8) << 1) + 8);
Jacob Kellera51d8c22016-04-13 16:08:28 -0700854 q_vector->eims_value = BIT(msix_vector);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700855 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000856 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000857 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000858 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000859 case e1000_i210:
860 case e1000_i211:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000861 /* On 82580 and newer adapters the scheme is similar to 82576
Alexander Duyck4be000c2011-08-26 07:45:52 +0000862 * however instead of ordering column-major we have things
863 * ordered row-major. So we traverse the table by using
864 * bit 0 as the column offset, and the remaining bits as the
865 * row index.
866 */
867 if (rx_queue > IGB_N0_QUEUE)
868 igb_write_ivar(hw, msix_vector,
869 rx_queue >> 1,
870 (rx_queue & 0x1) << 4);
871 if (tx_queue > IGB_N0_QUEUE)
872 igb_write_ivar(hw, msix_vector,
873 tx_queue >> 1,
874 ((tx_queue & 0x1) << 4) + 8);
Jacob Kellera51d8c22016-04-13 16:08:28 -0700875 q_vector->eims_value = BIT(msix_vector);
Alexander Duyck55cac242009-11-19 12:42:21 +0000876 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700877 default:
878 BUG();
879 break;
880 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000881
882 /* add q_vector eims value to global eims_enable_mask */
883 adapter->eims_enable_mask |= q_vector->eims_value;
884
885 /* configure q_vector to set itr on first interrupt */
886 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800887}
888
889/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000890 * igb_configure_msix - Configure MSI-X hardware
891 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800892 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000893 * igb_configure_msix sets up the hardware to properly
894 * generate MSI-X interrupts.
Auke Kok9d5c8242008-01-24 02:22:38 -0800895 **/
896static void igb_configure_msix(struct igb_adapter *adapter)
897{
898 u32 tmp;
899 int i, vector = 0;
900 struct e1000_hw *hw = &adapter->hw;
901
902 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800903
904 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700905 switch (hw->mac.type) {
906 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800907 tmp = rd32(E1000_CTRL_EXT);
908 /* enable MSI-X PBA support*/
909 tmp |= E1000_CTRL_EXT_PBA_CLR;
910
911 /* Auto-Mask interrupts upon ICR read. */
912 tmp |= E1000_CTRL_EXT_EIAME;
913 tmp |= E1000_CTRL_EXT_IRCA;
914
915 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000916
917 /* enable msix_other interrupt */
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000918 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700919 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800920
Alexander Duyck2d064c02008-07-08 15:10:12 -0700921 break;
922
923 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000924 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000925 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000926 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000927 case e1000_i210:
928 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000929 /* Turn on MSI-X capability first, or our settings
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000930 * won't stick. And it will take days to debug.
931 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000932 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000933 E1000_GPIE_PBA | E1000_GPIE_EIAME |
934 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700935
Alexander Duyck047e0032009-10-27 15:49:27 +0000936 /* enable msix_other interrupt */
Jacob Kellera51d8c22016-04-13 16:08:28 -0700937 adapter->eims_other = BIT(vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000938 tmp = (vector++ | E1000_IVAR_VALID) << 8;
939
940 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700941 break;
942 default:
943 /* do nothing, since nothing else supports MSI-X */
944 break;
945 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000946
947 adapter->eims_enable_mask |= adapter->eims_other;
948
Alexander Duyck26b39272010-02-17 01:00:41 +0000949 for (i = 0; i < adapter->num_q_vectors; i++)
950 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000951
Auke Kok9d5c8242008-01-24 02:22:38 -0800952 wrfl();
953}
954
955/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000956 * igb_request_msix - Initialize MSI-X interrupts
957 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800958 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000959 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
960 * kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -0800961 **/
962static int igb_request_msix(struct igb_adapter *adapter)
963{
964 struct net_device *netdev = adapter->netdev;
Stefan Assmann52285b72012-12-04 06:00:17 +0000965 int i, err = 0, vector = 0, free_vector = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800966
Auke Kok9d5c8242008-01-24 02:22:38 -0800967 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000968 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800969 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000970 goto err_out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000971
972 for (i = 0; i < adapter->num_q_vectors; i++) {
973 struct igb_q_vector *q_vector = adapter->q_vector[i];
974
Stefan Assmann52285b72012-12-04 06:00:17 +0000975 vector++;
976
Jarod Wilson7b06a692015-10-19 11:52:04 -0400977 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000978
Alexander Duyck0ba82992011-08-26 07:45:47 +0000979 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000980 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000981 q_vector->rx.ring->queue_index);
982 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000983 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000984 q_vector->tx.ring->queue_index);
985 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000986 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000987 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000988 else
989 sprintf(q_vector->name, "%s-unused", netdev->name);
990
991 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000992 igb_msix_ring, 0, q_vector->name,
993 q_vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000994 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000995 goto err_free;
Alexander Duyck047e0032009-10-27 15:49:27 +0000996 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800997
Auke Kok9d5c8242008-01-24 02:22:38 -0800998 igb_configure_msix(adapter);
999 return 0;
Stefan Assmann52285b72012-12-04 06:00:17 +00001000
1001err_free:
1002 /* free already assigned IRQs */
1003 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
1004
1005 vector--;
1006 for (i = 0; i < vector; i++) {
1007 free_irq(adapter->msix_entries[free_vector++].vector,
1008 adapter->q_vector[i]);
1009 }
1010err_out:
Auke Kok9d5c8242008-01-24 02:22:38 -08001011 return err;
1012}
1013
Alexander Duyck047e0032009-10-27 15:49:27 +00001014/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001015 * igb_free_q_vector - Free memory allocated for specific interrupt vector
1016 * @adapter: board private structure to initialize
1017 * @v_idx: Index of vector to be freed
Alexander Duyck5536d212012-09-25 00:31:17 +00001018 *
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001019 * This function frees the memory allocated to the q_vector.
Alexander Duyck5536d212012-09-25 00:31:17 +00001020 **/
1021static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1022{
1023 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1024
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001025 adapter->q_vector[v_idx] = NULL;
1026
1027 /* igb_get_stats64() might access the rings on this vector,
1028 * we must wait a grace period before freeing it.
1029 */
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08001030 if (q_vector)
1031 kfree_rcu(q_vector, rcu);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001032}
1033
1034/**
1035 * igb_reset_q_vector - Reset config for interrupt vector
1036 * @adapter: board private structure to initialize
1037 * @v_idx: Index of vector to be reset
1038 *
1039 * If NAPI is enabled it will delete any references to the
1040 * NAPI struct. This is preparation for igb_free_q_vector.
1041 **/
1042static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1043{
1044 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1045
Christoph Paaschcb06d102014-03-21 03:48:19 -07001046 /* Coming from igb_set_interrupt_capability, the vectors are not yet
1047 * allocated. So, q_vector is NULL so we should stop here.
1048 */
1049 if (!q_vector)
1050 return;
1051
Alexander Duyck5536d212012-09-25 00:31:17 +00001052 if (q_vector->tx.ring)
1053 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1054
1055 if (q_vector->rx.ring)
Toshiaki Makita2439fc42015-04-13 18:15:11 +09001056 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
Alexander Duyck5536d212012-09-25 00:31:17 +00001057
Alexander Duyck5536d212012-09-25 00:31:17 +00001058 netif_napi_del(&q_vector->napi);
1059
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001060}
1061
1062static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1063{
1064 int v_idx = adapter->num_q_vectors;
1065
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001066 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001067 pci_disable_msix(adapter->pdev);
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001068 else if (adapter->flags & IGB_FLAG_HAS_MSI)
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001069 pci_disable_msi(adapter->pdev);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001070
1071 while (v_idx--)
1072 igb_reset_q_vector(adapter, v_idx);
Alexander Duyck5536d212012-09-25 00:31:17 +00001073}
1074
1075/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001076 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1077 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001078 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001079 * This function frees the memory allocated to the q_vectors. In addition if
1080 * NAPI is enabled it will delete any references to the NAPI struct prior
1081 * to freeing the q_vector.
Alexander Duyck047e0032009-10-27 15:49:27 +00001082 **/
1083static void igb_free_q_vectors(struct igb_adapter *adapter)
1084{
Alexander Duyck5536d212012-09-25 00:31:17 +00001085 int v_idx = adapter->num_q_vectors;
Alexander Duyck047e0032009-10-27 15:49:27 +00001086
Alexander Duyck5536d212012-09-25 00:31:17 +00001087 adapter->num_tx_queues = 0;
1088 adapter->num_rx_queues = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00001089 adapter->num_q_vectors = 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001090
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001091 while (v_idx--) {
1092 igb_reset_q_vector(adapter, v_idx);
Alexander Duyck5536d212012-09-25 00:31:17 +00001093 igb_free_q_vector(adapter, v_idx);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001094 }
Alexander Duyck047e0032009-10-27 15:49:27 +00001095}
1096
1097/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001098 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1099 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001100 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001101 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1102 * MSI-X interrupts allocated.
Alexander Duyck047e0032009-10-27 15:49:27 +00001103 */
1104static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1105{
Alexander Duyck047e0032009-10-27 15:49:27 +00001106 igb_free_q_vectors(adapter);
1107 igb_reset_interrupt_capability(adapter);
1108}
Auke Kok9d5c8242008-01-24 02:22:38 -08001109
1110/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001111 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1112 * @adapter: board private structure to initialize
1113 * @msix: boolean value of MSIX capability
Auke Kok9d5c8242008-01-24 02:22:38 -08001114 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001115 * Attempt to configure interrupts using the best available
1116 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001117 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001118static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
Auke Kok9d5c8242008-01-24 02:22:38 -08001119{
1120 int err;
1121 int numvecs, i;
1122
Stefan Assmann53c7d062012-12-04 06:00:12 +00001123 if (!msix)
1124 goto msi_only;
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001125 adapter->flags |= IGB_FLAG_HAS_MSIX;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001126
Alexander Duyck83b71802009-02-06 23:15:45 +00001127 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001128 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001129 if (adapter->vfs_allocated_count)
1130 adapter->num_tx_queues = 1;
1131 else
1132 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001133
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001134 /* start with one vector for every Rx queue */
Alexander Duyck047e0032009-10-27 15:49:27 +00001135 numvecs = adapter->num_rx_queues;
1136
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001137 /* if Tx handler is separate add 1 for every Tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001138 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1139 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001140
1141 /* store the number of vectors reserved for queues */
1142 adapter->num_q_vectors = numvecs;
1143
1144 /* add 1 vector for link status interrupts */
1145 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001146 for (i = 0; i < numvecs; i++)
1147 adapter->msix_entries[i].entry = i;
1148
Alexander Gordeev479d02d2014-02-18 11:11:43 +01001149 err = pci_enable_msix_range(adapter->pdev,
1150 adapter->msix_entries,
1151 numvecs,
1152 numvecs);
1153 if (err > 0)
Alexander Duyck0c2cc022012-09-25 00:31:22 +00001154 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08001155
1156 igb_reset_interrupt_capability(adapter);
1157
1158 /* If we can't do MSI-X, try MSI */
1159msi_only:
Christoph Paaschb7093232014-03-21 04:02:09 -07001160 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001161#ifdef CONFIG_PCI_IOV
1162 /* disable SR-IOV for non MSI-X configurations */
1163 if (adapter->vf_data) {
1164 struct e1000_hw *hw = &adapter->hw;
1165 /* disable iov and allow time for transactions to clear */
1166 pci_disable_sriov(adapter->pdev);
1167 msleep(500);
1168
Yury Kylulin4827cc32017-03-07 11:20:26 +03001169 kfree(adapter->vf_mac_list);
1170 adapter->vf_mac_list = NULL;
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001171 kfree(adapter->vf_data);
1172 adapter->vf_data = NULL;
1173 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001174 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001175 msleep(100);
1176 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1177 }
1178#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001179 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001180 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001181 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001182 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001183 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001184 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001185 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001186 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001187}
1188
Alexander Duyck5536d212012-09-25 00:31:17 +00001189static void igb_add_ring(struct igb_ring *ring,
1190 struct igb_ring_container *head)
1191{
1192 head->ring = ring;
1193 head->count++;
1194}
1195
1196/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001197 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1198 * @adapter: board private structure to initialize
1199 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1200 * @v_idx: index of vector in adapter struct
1201 * @txr_count: total number of Tx rings to allocate
1202 * @txr_idx: index of first Tx ring to allocate
1203 * @rxr_count: total number of Rx rings to allocate
1204 * @rxr_idx: index of first Rx ring to allocate
Alexander Duyck5536d212012-09-25 00:31:17 +00001205 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001206 * We allocate one q_vector. If allocation fails we return -ENOMEM.
Alexander Duyck5536d212012-09-25 00:31:17 +00001207 **/
1208static int igb_alloc_q_vector(struct igb_adapter *adapter,
1209 int v_count, int v_idx,
1210 int txr_count, int txr_idx,
1211 int rxr_count, int rxr_idx)
1212{
1213 struct igb_q_vector *q_vector;
1214 struct igb_ring *ring;
1215 int ring_count, size;
1216
1217 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1218 if (txr_count > 1 || rxr_count > 1)
1219 return -ENOMEM;
1220
1221 ring_count = txr_count + rxr_count;
1222 size = sizeof(struct igb_q_vector) +
1223 (sizeof(struct igb_ring) * ring_count);
1224
1225 /* allocate q_vector and rings */
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001226 q_vector = adapter->q_vector[v_idx];
Shota Suzuki72ddef02015-07-01 09:25:52 +09001227 if (!q_vector) {
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001228 q_vector = kzalloc(size, GFP_KERNEL);
Shota Suzuki72ddef02015-07-01 09:25:52 +09001229 } else if (size > ksize(q_vector)) {
1230 kfree_rcu(q_vector, rcu);
1231 q_vector = kzalloc(size, GFP_KERNEL);
1232 } else {
Toshiaki Makitac0a06ee2015-04-13 18:15:10 +09001233 memset(q_vector, 0, size);
Shota Suzuki72ddef02015-07-01 09:25:52 +09001234 }
Alexander Duyck5536d212012-09-25 00:31:17 +00001235 if (!q_vector)
1236 return -ENOMEM;
1237
1238 /* initialize NAPI */
1239 netif_napi_add(adapter->netdev, &q_vector->napi,
1240 igb_poll, 64);
1241
1242 /* tie q_vector and adapter together */
1243 adapter->q_vector[v_idx] = q_vector;
1244 q_vector->adapter = adapter;
1245
1246 /* initialize work limits */
1247 q_vector->tx.work_limit = adapter->tx_work_limit;
1248
1249 /* initialize ITR configuration */
Jarod Wilson7b06a692015-10-19 11:52:04 -04001250 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
Alexander Duyck5536d212012-09-25 00:31:17 +00001251 q_vector->itr_val = IGB_START_ITR;
1252
1253 /* initialize pointer to rings */
1254 ring = q_vector->ring;
1255
Alexander Duyck4e2276672013-02-12 02:31:01 +00001256 /* intialize ITR */
1257 if (rxr_count) {
1258 /* rx or rx/tx vector */
1259 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1260 q_vector->itr_val = adapter->rx_itr_setting;
1261 } else {
1262 /* tx only vector */
1263 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1264 q_vector->itr_val = adapter->tx_itr_setting;
1265 }
1266
Alexander Duyck5536d212012-09-25 00:31:17 +00001267 if (txr_count) {
1268 /* assign generic ring traits */
1269 ring->dev = &adapter->pdev->dev;
1270 ring->netdev = adapter->netdev;
1271
1272 /* configure backlink on ring */
1273 ring->q_vector = q_vector;
1274
1275 /* update q_vector Tx values */
1276 igb_add_ring(ring, &q_vector->tx);
1277
1278 /* For 82575, context index must be unique per ring. */
1279 if (adapter->hw.mac.type == e1000_82575)
1280 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1281
1282 /* apply Tx specific ring traits */
1283 ring->count = adapter->tx_ring_count;
1284 ring->queue_index = txr_idx;
1285
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001286 ring->cbs_enable = false;
1287 ring->idleslope = 0;
1288 ring->sendslope = 0;
1289 ring->hicredit = 0;
1290 ring->locredit = 0;
1291
John Stultz827da442013-10-07 15:51:58 -07001292 u64_stats_init(&ring->tx_syncp);
1293 u64_stats_init(&ring->tx_syncp2);
1294
Alexander Duyck5536d212012-09-25 00:31:17 +00001295 /* assign ring to adapter */
1296 adapter->tx_ring[txr_idx] = ring;
1297
1298 /* push pointer to next ring */
1299 ring++;
1300 }
1301
1302 if (rxr_count) {
1303 /* assign generic ring traits */
1304 ring->dev = &adapter->pdev->dev;
1305 ring->netdev = adapter->netdev;
1306
1307 /* configure backlink on ring */
1308 ring->q_vector = q_vector;
1309
1310 /* update q_vector Rx values */
1311 igb_add_ring(ring, &q_vector->rx);
1312
1313 /* set flag indicating ring supports SCTP checksum offload */
1314 if (adapter->hw.mac.type >= e1000_82576)
1315 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1316
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00001317 /* On i350, i354, i210, and i211, loopback VLAN packets
Alexander Duyck5536d212012-09-25 00:31:17 +00001318 * have the tag byte-swapped.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001319 */
Alexander Duyck5536d212012-09-25 00:31:17 +00001320 if (adapter->hw.mac.type >= e1000_i350)
1321 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1322
1323 /* apply Rx specific ring traits */
1324 ring->count = adapter->rx_ring_count;
1325 ring->queue_index = rxr_idx;
1326
John Stultz827da442013-10-07 15:51:58 -07001327 u64_stats_init(&ring->rx_syncp);
1328
Alexander Duyck5536d212012-09-25 00:31:17 +00001329 /* assign ring to adapter */
1330 adapter->rx_ring[rxr_idx] = ring;
1331 }
1332
1333 return 0;
1334}
1335
1336
Auke Kok9d5c8242008-01-24 02:22:38 -08001337/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001338 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1339 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001340 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001341 * We allocate one q_vector per queue interrupt. If allocation fails we
1342 * return -ENOMEM.
Alexander Duyck047e0032009-10-27 15:49:27 +00001343 **/
1344static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1345{
Alexander Duyck5536d212012-09-25 00:31:17 +00001346 int q_vectors = adapter->num_q_vectors;
1347 int rxr_remaining = adapter->num_rx_queues;
1348 int txr_remaining = adapter->num_tx_queues;
1349 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1350 int err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001351
Alexander Duyck5536d212012-09-25 00:31:17 +00001352 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1353 for (; rxr_remaining; v_idx++) {
1354 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1355 0, 0, 1, rxr_idx);
1356
1357 if (err)
1358 goto err_out;
1359
1360 /* update counts and index */
1361 rxr_remaining--;
1362 rxr_idx++;
1363 }
1364 }
1365
1366 for (; v_idx < q_vectors; v_idx++) {
1367 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1368 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001369
Alexander Duyck5536d212012-09-25 00:31:17 +00001370 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1371 tqpv, txr_idx, rqpv, rxr_idx);
1372
1373 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001374 goto err_out;
Alexander Duyck5536d212012-09-25 00:31:17 +00001375
1376 /* update counts and index */
1377 rxr_remaining -= rqpv;
1378 txr_remaining -= tqpv;
1379 rxr_idx++;
1380 txr_idx++;
Alexander Duyck047e0032009-10-27 15:49:27 +00001381 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001382
Alexander Duyck047e0032009-10-27 15:49:27 +00001383 return 0;
1384
1385err_out:
Alexander Duyck5536d212012-09-25 00:31:17 +00001386 adapter->num_tx_queues = 0;
1387 adapter->num_rx_queues = 0;
1388 adapter->num_q_vectors = 0;
1389
1390 while (v_idx--)
1391 igb_free_q_vector(adapter, v_idx);
1392
Alexander Duyck047e0032009-10-27 15:49:27 +00001393 return -ENOMEM;
1394}
1395
Alexander Duyck047e0032009-10-27 15:49:27 +00001396/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001397 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1398 * @adapter: board private structure to initialize
1399 * @msix: boolean value of MSIX capability
Alexander Duyck047e0032009-10-27 15:49:27 +00001400 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001401 * This function initializes the interrupts and allocates all of the queues.
Alexander Duyck047e0032009-10-27 15:49:27 +00001402 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001403static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
Alexander Duyck047e0032009-10-27 15:49:27 +00001404{
1405 struct pci_dev *pdev = adapter->pdev;
1406 int err;
1407
Stefan Assmann53c7d062012-12-04 06:00:12 +00001408 igb_set_interrupt_capability(adapter, msix);
Alexander Duyck047e0032009-10-27 15:49:27 +00001409
1410 err = igb_alloc_q_vectors(adapter);
1411 if (err) {
1412 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1413 goto err_alloc_q_vectors;
1414 }
1415
Alexander Duyck5536d212012-09-25 00:31:17 +00001416 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001417
1418 return 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001419
Alexander Duyck047e0032009-10-27 15:49:27 +00001420err_alloc_q_vectors:
1421 igb_reset_interrupt_capability(adapter);
1422 return err;
1423}
1424
1425/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001426 * igb_request_irq - initialize interrupts
1427 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -08001428 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001429 * Attempts to configure interrupts using the best available
1430 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001431 **/
1432static int igb_request_irq(struct igb_adapter *adapter)
1433{
1434 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001435 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001436 int err = 0;
1437
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001438 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001439 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001440 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001441 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001442 /* fall back to MSI */
Alexander Duyck5536d212012-09-25 00:31:17 +00001443 igb_free_all_tx_resources(adapter);
1444 igb_free_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001445
Alexander Duyck047e0032009-10-27 15:49:27 +00001446 igb_clear_interrupt_scheme(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001447 err = igb_init_interrupt_scheme(adapter, false);
1448 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001449 goto request_done;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001450
Alexander Duyck047e0032009-10-27 15:49:27 +00001451 igb_setup_all_tx_resources(adapter);
1452 igb_setup_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001453 igb_configure(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001454 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001455
Alexander Duyckc74d5882011-08-26 07:46:45 +00001456 igb_assign_vector(adapter->q_vector[0], 0);
1457
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001458 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001459 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001460 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001461 if (!err)
1462 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001463
Auke Kok9d5c8242008-01-24 02:22:38 -08001464 /* fall back to legacy interrupts */
1465 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001466 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001467 }
1468
Alexander Duyckc74d5882011-08-26 07:46:45 +00001469 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001470 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001471
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001472 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001473 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001474 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475
1476request_done:
1477 return err;
1478}
1479
1480static void igb_free_irq(struct igb_adapter *adapter)
1481{
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001482 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001483 int vector = 0, i;
1484
Alexander Duyck047e0032009-10-27 15:49:27 +00001485 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001486
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001487 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001488 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001489 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001490 } else {
1491 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001492 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001493}
1494
1495/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001496 * igb_irq_disable - Mask off interrupt generation on the NIC
1497 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001498 **/
1499static void igb_irq_disable(struct igb_adapter *adapter)
1500{
1501 struct e1000_hw *hw = &adapter->hw;
1502
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001503 /* we need to be careful when disabling interrupts. The VFs are also
Alexander Duyck25568a52009-10-27 23:49:59 +00001504 * mapped into these registers and so clearing the bits can cause
1505 * issues on the VF drivers so we only need to clear what we set
1506 */
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001508 u32 regval = rd32(E1000_EIAM);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001509
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001510 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1511 wr32(E1000_EIMC, adapter->eims_enable_mask);
1512 regval = rd32(E1000_EIAC);
1513 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001514 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001515
1516 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001517 wr32(E1000_IMC, ~0);
1518 wrfl();
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001519 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Emil Tantilov81a61852010-08-02 14:40:52 +00001520 int i;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001521
Emil Tantilov81a61852010-08-02 14:40:52 +00001522 for (i = 0; i < adapter->num_q_vectors; i++)
1523 synchronize_irq(adapter->msix_entries[i].vector);
1524 } else {
1525 synchronize_irq(adapter->pdev->irq);
1526 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001527}
1528
1529/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001530 * igb_irq_enable - Enable default interrupt generation settings
1531 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001532 **/
1533static void igb_irq_enable(struct igb_adapter *adapter)
1534{
1535 struct e1000_hw *hw = &adapter->hw;
1536
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001537 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001538 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001539 u32 regval = rd32(E1000_EIAC);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001540
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001541 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1542 regval = rd32(E1000_EIAM);
1543 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001544 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001545 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001546 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001547 ims |= E1000_IMS_VMMB;
1548 }
1549 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001550 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001551 wr32(E1000_IMS, IMS_ENABLE_MASK |
1552 E1000_IMS_DRSTA);
1553 wr32(E1000_IAM, IMS_ENABLE_MASK |
1554 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001555 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001556}
1557
1558static void igb_update_mng_vlan(struct igb_adapter *adapter)
1559{
Alexander Duyck51466232009-10-27 23:47:35 +00001560 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001561 u16 pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08001562 u16 vid = adapter->hw.mng_cookie.vlan_id;
1563 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001564
Alexander Duyck51466232009-10-27 23:47:35 +00001565 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1566 /* add VID to filter table */
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001567 igb_vfta_set(hw, vid, pf_id, true, true);
Alexander Duyck51466232009-10-27 23:47:35 +00001568 adapter->mng_vlan_id = vid;
1569 } else {
1570 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1571 }
1572
1573 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1574 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001575 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001576 /* remove VID from filter table */
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001577 igb_vfta_set(hw, vid, pf_id, false, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08001578 }
1579}
1580
1581/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001582 * igb_release_hw_control - release control of the h/w to f/w
1583 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001584 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001585 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1586 * For ASF and Pass Through versions of f/w this means that the
1587 * driver is no longer loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001588 **/
1589static void igb_release_hw_control(struct igb_adapter *adapter)
1590{
1591 struct e1000_hw *hw = &adapter->hw;
1592 u32 ctrl_ext;
1593
1594 /* Let firmware take over control of h/w */
1595 ctrl_ext = rd32(E1000_CTRL_EXT);
1596 wr32(E1000_CTRL_EXT,
1597 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1598}
1599
Auke Kok9d5c8242008-01-24 02:22:38 -08001600/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001601 * igb_get_hw_control - get control of the h/w from f/w
1602 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001604 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1605 * For ASF and Pass Through versions of f/w this means that
1606 * the driver is loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001607 **/
1608static void igb_get_hw_control(struct igb_adapter *adapter)
1609{
1610 struct e1000_hw *hw = &adapter->hw;
1611 u32 ctrl_ext;
1612
1613 /* Let firmware know the driver has taken over */
1614 ctrl_ext = rd32(E1000_CTRL_EXT);
1615 wr32(E1000_CTRL_EXT,
1616 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1617}
1618
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001619static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1620{
1621 struct net_device *netdev = adapter->netdev;
1622 struct e1000_hw *hw = &adapter->hw;
1623
1624 WARN_ON(hw->mac.type != e1000_i210);
1625
1626 if (enable)
1627 adapter->flags |= IGB_FLAG_FQTSS;
1628 else
1629 adapter->flags &= ~IGB_FLAG_FQTSS;
1630
1631 if (netif_running(netdev))
1632 schedule_work(&adapter->reset_task);
1633}
1634
1635static bool is_fqtss_enabled(struct igb_adapter *adapter)
1636{
1637 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1638}
1639
1640static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1641 enum tx_queue_prio prio)
1642{
1643 u32 val;
1644
1645 WARN_ON(hw->mac.type != e1000_i210);
1646 WARN_ON(queue < 0 || queue > 4);
1647
1648 val = rd32(E1000_I210_TXDCTL(queue));
1649
1650 if (prio == TX_QUEUE_PRIO_HIGH)
1651 val |= E1000_TXDCTL_PRIORITY;
1652 else
1653 val &= ~E1000_TXDCTL_PRIORITY;
1654
1655 wr32(E1000_I210_TXDCTL(queue), val);
1656}
1657
1658static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1659{
1660 u32 val;
1661
1662 WARN_ON(hw->mac.type != e1000_i210);
1663 WARN_ON(queue < 0 || queue > 1);
1664
1665 val = rd32(E1000_I210_TQAVCC(queue));
1666
1667 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1668 val |= E1000_TQAVCC_QUEUEMODE;
1669 else
1670 val &= ~E1000_TQAVCC_QUEUEMODE;
1671
1672 wr32(E1000_I210_TQAVCC(queue), val);
1673}
1674
1675/**
1676 * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
1677 * @adapter: pointer to adapter struct
1678 * @queue: queue number
1679 * @enable: true = enable CBS, false = disable CBS
1680 * @idleslope: idleSlope in kbps
1681 * @sendslope: sendSlope in kbps
1682 * @hicredit: hiCredit in bytes
1683 * @locredit: loCredit in bytes
1684 *
1685 * Configure CBS for a given hardware queue. When disabling, idleslope,
1686 * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
1687 * success. Negative otherwise.
1688 **/
1689static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1690 bool enable, int idleslope, int sendslope,
1691 int hicredit, int locredit)
1692{
1693 struct net_device *netdev = adapter->netdev;
1694 struct e1000_hw *hw = &adapter->hw;
1695 u32 tqavcc;
1696 u16 value;
1697
1698 WARN_ON(hw->mac.type != e1000_i210);
1699 WARN_ON(queue < 0 || queue > 1);
1700
1701 if (enable) {
1702 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1703 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1704
1705 /* According to i210 datasheet section 7.2.7.7, we should set
1706 * the 'idleSlope' field from TQAVCC register following the
1707 * equation:
1708 *
1709 * For 100 Mbps link speed:
1710 *
1711 * value = BW * 0x7735 * 0.2 (E1)
1712 *
1713 * For 1000Mbps link speed:
1714 *
1715 * value = BW * 0x7735 * 2 (E2)
1716 *
1717 * E1 and E2 can be merged into one equation as shown below.
1718 * Note that 'link-speed' is in Mbps.
1719 *
1720 * value = BW * 0x7735 * 2 * link-speed
1721 * -------------- (E3)
1722 * 1000
1723 *
1724 * 'BW' is the percentage bandwidth out of full link speed
1725 * which can be found with the following equation. Note that
1726 * idleSlope here is the parameter from this function which
1727 * is in kbps.
1728 *
1729 * BW = idleSlope
1730 * ----------------- (E4)
1731 * link-speed * 1000
1732 *
1733 * That said, we can come up with a generic equation to
1734 * calculate the value we should set it TQAVCC register by
1735 * replacing 'BW' in E3 by E4. The resulting equation is:
1736 *
1737 * value = idleSlope * 0x7735 * 2 * link-speed
1738 * ----------------- -------------- (E5)
1739 * link-speed * 1000 1000
1740 *
1741 * 'link-speed' is present in both sides of the fraction so
1742 * it is canceled out. The final equation is the following:
1743 *
1744 * value = idleSlope * 61034
1745 * ----------------- (E6)
1746 * 1000000
1747 */
1748 value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
1749
1750 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1751 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1752 tqavcc |= value;
1753 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1754
1755 wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
1756 } else {
1757 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1758 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1759
1760 /* Set idleSlope to zero. */
1761 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1762 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1763 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1764
1765 /* Set hiCredit to zero. */
1766 wr32(E1000_I210_TQAVHC(queue), 0);
1767 }
1768
1769 /* XXX: In i210 controller the sendSlope and loCredit parameters from
1770 * CBS are not configurable by software so we don't do any 'controller
1771 * configuration' in respect to these parameters.
1772 */
1773
1774 netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1775 (enable) ? "enabled" : "disabled", queue,
1776 idleslope, sendslope, hicredit, locredit);
1777}
1778
1779static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1780 bool enable, int idleslope, int sendslope,
1781 int hicredit, int locredit)
1782{
1783 struct igb_ring *ring;
1784
1785 if (queue < 0 || queue > adapter->num_tx_queues)
1786 return -EINVAL;
1787
1788 ring = adapter->tx_ring[queue];
1789
1790 ring->cbs_enable = enable;
1791 ring->idleslope = idleslope;
1792 ring->sendslope = sendslope;
1793 ring->hicredit = hicredit;
1794 ring->locredit = locredit;
1795
1796 return 0;
1797}
1798
1799static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1800{
1801 struct igb_ring *ring;
1802 int i;
1803
1804 for (i = 0; i < adapter->num_tx_queues; i++) {
1805 ring = adapter->tx_ring[i];
1806
1807 if (ring->cbs_enable)
1808 return true;
1809 }
1810
1811 return false;
1812}
1813
1814static void igb_setup_tx_mode(struct igb_adapter *adapter)
1815{
1816 struct net_device *netdev = adapter->netdev;
1817 struct e1000_hw *hw = &adapter->hw;
1818 u32 val;
1819
1820 /* Only i210 controller supports changing the transmission mode. */
1821 if (hw->mac.type != e1000_i210)
1822 return;
1823
1824 if (is_fqtss_enabled(adapter)) {
1825 int i, max_queue;
1826
1827 /* Configure TQAVCTRL register: set transmit mode to 'Qav',
1828 * set data fetch arbitration to 'round robin' and set data
1829 * transfer arbitration to 'credit shaper algorithm.
1830 */
1831 val = rd32(E1000_I210_TQAVCTRL);
1832 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
1833 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1834 wr32(E1000_I210_TQAVCTRL, val);
1835
1836 /* Configure Tx and Rx packet buffers sizes as described in
1837 * i210 datasheet section 7.2.7.7.
1838 */
1839 val = rd32(E1000_TXPBS);
1840 val &= ~I210_TXPBSIZE_MASK;
1841 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1842 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1843 wr32(E1000_TXPBS, val);
1844
1845 val = rd32(E1000_RXPBS);
1846 val &= ~I210_RXPBSIZE_MASK;
1847 val |= I210_RXPBSIZE_PB_32KB;
1848 wr32(E1000_RXPBS, val);
1849
1850 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1851 * register should not exceed the buffer size programmed in
1852 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1853 * so according to the datasheet we should set MAX_TPKT_SIZE to
1854 * 4kB / 64.
1855 *
1856 * However, when we do so, no frame from queue 2 and 3 are
1857 * transmitted. It seems the MAX_TPKT_SIZE should not be great
1858 * or _equal_ to the buffer size programmed in TXPBS. For this
1859 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1860 */
1861 val = (4096 - 1) / 64;
1862 wr32(E1000_I210_DTXMXPKTSZ, val);
1863
1864 /* Since FQTSS mode is enabled, apply any CBS configuration
1865 * previously set. If no previous CBS configuration has been
1866 * done, then the initial configuration is applied, which means
1867 * CBS is disabled.
1868 */
1869 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1870 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1871
1872 for (i = 0; i < max_queue; i++) {
1873 struct igb_ring *ring = adapter->tx_ring[i];
1874
1875 igb_configure_cbs(adapter, i, ring->cbs_enable,
1876 ring->idleslope, ring->sendslope,
1877 ring->hicredit, ring->locredit);
1878 }
1879 } else {
1880 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1881 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1882 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1883
1884 val = rd32(E1000_I210_TQAVCTRL);
1885 /* According to Section 8.12.21, the other flags we've set when
1886 * enabling FQTSS are not relevant when disabling FQTSS so we
1887 * don't set they here.
1888 */
1889 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1890 wr32(E1000_I210_TQAVCTRL, val);
1891 }
1892
1893 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1894 "enabled" : "disabled");
1895}
1896
Auke Kok9d5c8242008-01-24 02:22:38 -08001897/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001898 * igb_configure - configure the hardware for RX and TX
1899 * @adapter: private board structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001900 **/
1901static void igb_configure(struct igb_adapter *adapter)
1902{
1903 struct net_device *netdev = adapter->netdev;
1904 int i;
1905
1906 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001907 igb_set_rx_mode(netdev);
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001908 igb_setup_tx_mode(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001909
1910 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001911
Alexander Duyck85b430b2009-10-27 15:50:29 +00001912 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001913 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001914 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001915
Gangfeng Huang0e71def2016-07-06 13:22:54 +08001916 igb_nfc_filter_restore(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001917 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001918 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001919
1920 igb_rx_fifo_flush_82575(&adapter->hw);
1921
Alexander Duyckc493ea42009-03-20 00:16:50 +00001922 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001923 * at least 1 descriptor unused to make sure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001924 * next_to_use != next_to_clean
1925 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001926 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001927 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001928 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001929 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001930}
1931
Nick Nunley88a268c2010-02-17 01:01:59 +00001932/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001933 * igb_power_up_link - Power up the phy/serdes link
1934 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001935 **/
1936void igb_power_up_link(struct igb_adapter *adapter)
1937{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001938 igb_reset_phy(&adapter->hw);
1939
Nick Nunley88a268c2010-02-17 01:01:59 +00001940 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1941 igb_power_up_phy_copper(&adapter->hw);
1942 else
1943 igb_power_up_serdes_link_82575(&adapter->hw);
Todd Fujinakaaec653c2014-06-17 06:58:11 +00001944
1945 igb_setup_link(&adapter->hw);
Nick Nunley88a268c2010-02-17 01:01:59 +00001946}
1947
1948/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001949 * igb_power_down_link - Power down the phy/serdes link
1950 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001951 */
1952static void igb_power_down_link(struct igb_adapter *adapter)
1953{
1954 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1955 igb_power_down_phy_copper_82575(&adapter->hw);
1956 else
1957 igb_shutdown_serdes_link_82575(&adapter->hw);
1958}
Auke Kok9d5c8242008-01-24 02:22:38 -08001959
1960/**
Carolyn Wyborny56cec242013-10-17 05:36:26 +00001961 * Detect and switch function for Media Auto Sense
1962 * @adapter: address of the board private structure
1963 **/
1964static void igb_check_swap_media(struct igb_adapter *adapter)
1965{
1966 struct e1000_hw *hw = &adapter->hw;
1967 u32 ctrl_ext, connsw;
1968 bool swap_now = false;
1969
1970 ctrl_ext = rd32(E1000_CTRL_EXT);
1971 connsw = rd32(E1000_CONNSW);
1972
1973 /* need to live swap if current media is copper and we have fiber/serdes
1974 * to go to.
1975 */
1976
1977 if ((hw->phy.media_type == e1000_media_type_copper) &&
1978 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1979 swap_now = true;
1980 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
1981 /* copper signal takes time to appear */
1982 if (adapter->copper_tries < 4) {
1983 adapter->copper_tries++;
1984 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
1985 wr32(E1000_CONNSW, connsw);
1986 return;
1987 } else {
1988 adapter->copper_tries = 0;
1989 if ((connsw & E1000_CONNSW_PHYSD) &&
1990 (!(connsw & E1000_CONNSW_PHY_PDN))) {
1991 swap_now = true;
1992 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
1993 wr32(E1000_CONNSW, connsw);
1994 }
1995 }
1996 }
1997
1998 if (!swap_now)
1999 return;
2000
2001 switch (hw->phy.media_type) {
2002 case e1000_media_type_copper:
2003 netdev_info(adapter->netdev,
2004 "MAS: changing media to fiber/serdes\n");
2005 ctrl_ext |=
2006 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2007 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2008 adapter->copper_tries = 0;
2009 break;
2010 case e1000_media_type_internal_serdes:
2011 case e1000_media_type_fiber:
2012 netdev_info(adapter->netdev,
2013 "MAS: changing media to copper\n");
2014 ctrl_ext &=
2015 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2016 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2017 break;
2018 default:
2019 /* shouldn't get here during regular operation */
2020 netdev_err(adapter->netdev,
2021 "AMS: Invalid media type found, returning\n");
2022 break;
2023 }
2024 wr32(E1000_CTRL_EXT, ctrl_ext);
2025}
2026
2027/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002028 * igb_up - Open the interface and prepare it to handle traffic
2029 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002030 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08002031int igb_up(struct igb_adapter *adapter)
2032{
2033 struct e1000_hw *hw = &adapter->hw;
2034 int i;
2035
2036 /* hardware has been reset, we need to reload some things */
2037 igb_configure(adapter);
2038
2039 clear_bit(__IGB_DOWN, &adapter->state);
2040
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002041 for (i = 0; i < adapter->num_q_vectors; i++)
2042 napi_enable(&(adapter->q_vector[i]->napi));
2043
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00002044 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Auke Kok9d5c8242008-01-24 02:22:38 -08002045 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00002046 else
2047 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002048
2049 /* Clear any pending interrupts. */
2050 rd32(E1000_ICR);
2051 igb_irq_enable(adapter);
2052
Alexander Duyckd4960302009-10-27 15:53:45 +00002053 /* notify VFs that reset has been completed */
2054 if (adapter->vfs_allocated_count) {
2055 u32 reg_data = rd32(E1000_CTRL_EXT);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00002056
Alexander Duyckd4960302009-10-27 15:53:45 +00002057 reg_data |= E1000_CTRL_EXT_PFRSTD;
2058 wr32(E1000_CTRL_EXT, reg_data);
2059 }
2060
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00002061 netif_tx_start_all_queues(adapter->netdev);
2062
Alexander Duyck25568a52009-10-27 23:49:59 +00002063 /* start the watchdog. */
2064 hw->mac.get_link_status = 1;
2065 schedule_work(&adapter->watchdog_task);
2066
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002067 if ((adapter->flags & IGB_FLAG_EEE) &&
2068 (!hw->dev_spec._82575.eee_disable))
2069 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2070
Auke Kok9d5c8242008-01-24 02:22:38 -08002071 return 0;
2072}
2073
2074void igb_down(struct igb_adapter *adapter)
2075{
Auke Kok9d5c8242008-01-24 02:22:38 -08002076 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00002077 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002078 u32 tctl, rctl;
2079 int i;
2080
2081 /* signal that we're down so the interrupt handler does not
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002082 * reschedule our watchdog timer
2083 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002084 set_bit(__IGB_DOWN, &adapter->state);
2085
2086 /* disable receives in the hardware */
2087 rctl = rd32(E1000_RCTL);
2088 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2089 /* flush and sleep below */
2090
Gangfeng Huang94221ae752017-05-27 09:17:53 +08002091 igb_nfc_filter_exit(adapter);
2092
Todd Fujinakaf28ea082015-03-20 17:41:53 -07002093 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002094 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002095
2096 /* disable transmits in the hardware */
2097 tctl = rd32(E1000_TCTL);
2098 tctl &= ~E1000_TCTL_EN;
2099 wr32(E1000_TCTL, tctl);
2100 /* flush both disables and wait for them to finish */
2101 wrfl();
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00002102 usleep_range(10000, 11000);
Auke Kok9d5c8242008-01-24 02:22:38 -08002103
Auke Kok9d5c8242008-01-24 02:22:38 -08002104 igb_irq_disable(adapter);
2105
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00002106 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2107
Carolyn Wyborny41f149a2013-04-30 00:21:32 +00002108 for (i = 0; i < adapter->num_q_vectors; i++) {
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08002109 if (adapter->q_vector[i]) {
2110 napi_synchronize(&adapter->q_vector[i]->napi);
2111 napi_disable(&adapter->q_vector[i]->napi);
2112 }
Carolyn Wyborny41f149a2013-04-30 00:21:32 +00002113 }
2114
Auke Kok9d5c8242008-01-24 02:22:38 -08002115 del_timer_sync(&adapter->watchdog_timer);
2116 del_timer_sync(&adapter->phy_info_timer);
2117
Alexander Duyck04fe6352009-02-06 23:22:32 +00002118 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00002119 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07002120 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00002121 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00002122
Auke Kok9d5c8242008-01-24 02:22:38 -08002123 adapter->link_speed = 0;
2124 adapter->link_duplex = 0;
2125
Jeff Kirsher30236822008-06-24 17:01:15 -07002126 if (!pci_channel_offline(adapter->pdev))
2127 igb_reset(adapter);
Alexander Duyck16903ca2016-01-06 23:11:18 -08002128
2129 /* clear VLAN promisc flag so VFTA will be updated if necessary */
2130 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2131
Auke Kok9d5c8242008-01-24 02:22:38 -08002132 igb_clean_all_tx_rings(adapter);
2133 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00002134#ifdef CONFIG_IGB_DCA
2135
2136 /* since we reset the hardware DCA settings were cleared */
2137 igb_setup_dca(adapter);
2138#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002139}
2140
2141void igb_reinit_locked(struct igb_adapter *adapter)
2142{
2143 WARN_ON(in_interrupt());
2144 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00002145 usleep_range(1000, 2000);
Auke Kok9d5c8242008-01-24 02:22:38 -08002146 igb_down(adapter);
2147 igb_up(adapter);
2148 clear_bit(__IGB_RESETTING, &adapter->state);
2149}
2150
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002151/** igb_enable_mas - Media Autosense re-enable after swap
2152 *
2153 * @adapter: adapter struct
2154 **/
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002155static void igb_enable_mas(struct igb_adapter *adapter)
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002156{
2157 struct e1000_hw *hw = &adapter->hw;
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002158 u32 connsw = rd32(E1000_CONNSW);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002159
2160 /* configure for SerDes media detect */
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002161 if ((hw->phy.media_type == e1000_media_type_copper) &&
2162 (!(connsw & E1000_CONNSW_SERDESD))) {
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002163 connsw |= E1000_CONNSW_ENRGSRC;
2164 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2165 wr32(E1000_CONNSW, connsw);
2166 wrfl();
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002167 }
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002168}
2169
Auke Kok9d5c8242008-01-24 02:22:38 -08002170void igb_reset(struct igb_adapter *adapter)
2171{
Alexander Duyck090b1792009-10-27 23:51:55 +00002172 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07002174 struct e1000_mac_info *mac = &hw->mac;
2175 struct e1000_fc_info *fc = &hw->fc;
Alexander Duyck45693bc2016-01-06 23:10:39 -08002176 u32 pba, hwm;
Auke Kok9d5c8242008-01-24 02:22:38 -08002177
2178 /* Repartition Pba for greater than 9k mtu
2179 * To take effect CTRL.RST is required.
2180 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002181 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002182 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002183 case e1000_i354:
Alexander Duyck55cac242009-11-19 12:42:21 +00002184 case e1000_82580:
2185 pba = rd32(E1000_RXPBS);
2186 pba = igb_rxpbs_adjust_82580(pba);
2187 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002188 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00002189 pba = rd32(E1000_RXPBS);
2190 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002191 break;
2192 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002193 case e1000_i210:
2194 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002195 default:
2196 pba = E1000_PBA_34K;
2197 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07002198 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002199
Alexander Duyck45693bc2016-01-06 23:10:39 -08002200 if (mac->type == e1000_82575) {
2201 u32 min_rx_space, min_tx_space, needed_tx_space;
2202
2203 /* write Rx PBA so that hardware can report correct Tx PBA */
Auke Kok9d5c8242008-01-24 02:22:38 -08002204 wr32(E1000_PBA, pba);
2205
2206 /* To maintain wire speed transmits, the Tx FIFO should be
2207 * large enough to accommodate two full transmit packets,
2208 * rounded up to the next 1KB and expressed in KB. Likewise,
2209 * the Rx FIFO should be large enough to accommodate at least
2210 * one full receive packet and is similarly rounded up and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002211 * expressed in KB.
2212 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002213 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2214
2215 /* The Tx FIFO also stores 16 bytes of information about the Tx
2216 * but don't include Ethernet FCS because hardware appends it.
2217 * We only need to round down to the nearest 512 byte block
2218 * count since the value we care about is 2 frames, not 1.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002219 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002220 min_tx_space = adapter->max_frame_size;
2221 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2222 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2223
2224 /* upper 16 bits has Tx packet buffer allocation size in KB */
2225 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
Auke Kok9d5c8242008-01-24 02:22:38 -08002226
2227 /* If current Tx allocation is less than the min Tx FIFO size,
2228 * and the min Tx FIFO size is less than the current Rx FIFO
Alexander Duyck45693bc2016-01-06 23:10:39 -08002229 * allocation, take space away from current Rx allocation.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002230 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002231 if (needed_tx_space < pba) {
2232 pba -= needed_tx_space;
Auke Kok9d5c8242008-01-24 02:22:38 -08002233
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002234 /* if short on Rx space, Rx wins and must trump Tx
2235 * adjustment
2236 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002237 if (pba < min_rx_space)
2238 pba = min_rx_space;
2239 }
Alexander Duyck45693bc2016-01-06 23:10:39 -08002240
2241 /* adjust PBA for jumbo frames */
Alexander Duyck2d064c02008-07-08 15:10:12 -07002242 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08002243 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002244
Alexander Duyck45693bc2016-01-06 23:10:39 -08002245 /* flow control settings
2246 * The high water mark must be low enough to fit one full frame
2247 * after transmitting the pause frame. As such we must have enough
2248 * space to allow for us to complete our current transmit and then
2249 * receive the frame that is in progress from the link partner.
2250 * Set it to:
2251 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002252 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002253 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002254
Matthew Vickd48507f2012-11-08 04:03:58 +00002255 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
Alexander Duyckd405ea32009-12-23 13:21:27 +00002256 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08002257 fc->pause_time = 0xFFFF;
2258 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00002259 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08002260
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002261 /* disable receive for all VFs and wait one second */
2262 if (adapter->vfs_allocated_count) {
2263 int i;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00002264
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002265 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00002266 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002267
2268 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00002269 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002270
2271 /* disable transmits and receives */
2272 wr32(E1000_VFRE, 0);
2273 wr32(E1000_VFTE, 0);
2274 }
2275
Auke Kok9d5c8242008-01-24 02:22:38 -08002276 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00002277 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002278 wr32(E1000_WUC, 0);
2279
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002280 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2281 /* need to resetup here after media swap */
2282 adapter->ei.get_invariants(hw);
2283 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2284 }
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002285 if ((mac->type == e1000_82575) &&
2286 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2287 igb_enable_mas(adapter);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002288 }
Alexander Duyck330a6d62009-10-27 23:51:35 +00002289 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00002290 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002291
Yury Kylulin83c21332017-03-07 11:20:25 +03002292 /* RAR registers were cleared during init_hw, clear mac table */
2293 igb_flush_mac_table(adapter);
2294 __dev_uc_unsync(adapter->netdev, NULL);
2295
2296 /* Recover default RAR entry */
2297 igb_set_default_mac_filter(adapter);
2298
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002299 /* Flow control settings reset on hardware reset, so guarantee flow
Matthew Vicka27416b2012-04-18 02:57:44 +00002300 * control is off when forcing speed.
2301 */
2302 if (!hw->mac.autoneg)
2303 igb_force_mac_fc(hw);
2304
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00002305 igb_init_dmac(adapter, pba);
Carolyn Wybornye4288932012-12-07 03:01:42 +00002306#ifdef CONFIG_IGB_HWMON
2307 /* Re-initialize the thermal sensor on i350 devices. */
2308 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2309 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2310 /* If present, re-initialize the external thermal sensor
2311 * interface.
2312 */
2313 if (adapter->ets)
2314 mac->ops.init_thermal_sensor_thresh(hw);
2315 }
2316 }
2317#endif
Jeff Kirsherb9361362014-03-13 16:07:14 -07002318 /* Re-establish EEE setting */
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002319 if (hw->phy.media_type == e1000_media_type_copper) {
2320 switch (mac->type) {
2321 case e1000_i350:
2322 case e1000_i210:
2323 case e1000_i211:
Todd Fujinakac4c112f2014-08-29 06:43:13 +00002324 igb_set_eee_i350(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002325 break;
2326 case e1000_i354:
Todd Fujinakac4c112f2014-08-29 06:43:13 +00002327 igb_set_eee_i354(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002328 break;
2329 default:
2330 break;
2331 }
2332 }
Nick Nunley88a268c2010-02-17 01:01:59 +00002333 if (!netif_running(adapter->netdev))
2334 igb_power_down_link(adapter);
2335
Auke Kok9d5c8242008-01-24 02:22:38 -08002336 igb_update_mng_vlan(adapter);
2337
2338 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2339 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2340
Matthew Vick1f6e8172012-08-18 07:26:33 +00002341 /* Re-enable PTP, where applicable. */
Jacob Keller4f3ce712016-05-24 13:56:29 -07002342 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2343 igb_ptp_reset(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00002344
Alexander Duyck330a6d62009-10-27 23:51:35 +00002345 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002346}
2347
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002348static netdev_features_t igb_fix_features(struct net_device *netdev,
2349 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002350{
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002351 /* Since there is no support for separate Rx/Tx vlan accel
2352 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002353 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002354 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2355 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002356 else
Patrick McHardyf6469682013-04-19 02:04:27 +00002357 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002358
2359 return features;
2360}
2361
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002362static int igb_set_features(struct net_device *netdev,
2363 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00002364{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002365 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00002366 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00002367
Patrick McHardyf6469682013-04-19 02:04:27 +00002368 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002369 igb_vlan_mode(netdev, features);
2370
Alexander Duyck16903ca2016-01-06 23:11:18 -08002371 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
Ben Greear89eaefb2012-03-06 09:41:58 +00002372 return 0;
2373
Gangfeng Huang0e71def2016-07-06 13:22:54 +08002374 if (!(features & NETIF_F_NTUPLE)) {
2375 struct hlist_node *node2;
2376 struct igb_nfc_filter *rule;
2377
2378 spin_lock(&adapter->nfc_lock);
2379 hlist_for_each_entry_safe(rule, node2,
2380 &adapter->nfc_filter_list, nfc_node) {
2381 igb_erase_filter(adapter, rule);
2382 hlist_del(&rule->nfc_node);
2383 kfree(rule);
2384 }
2385 spin_unlock(&adapter->nfc_lock);
2386 adapter->nfc_filter_count = 0;
2387 }
2388
Ben Greear89eaefb2012-03-06 09:41:58 +00002389 netdev->features = features;
2390
2391 if (netif_running(netdev))
2392 igb_reinit_locked(adapter);
2393 else
2394 igb_reset(adapter);
2395
Michał Mirosławac52caa2011-06-08 08:38:01 +00002396 return 0;
2397}
2398
Alexander Duyck268f9d32016-01-06 23:11:34 -08002399static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2400 struct net_device *dev,
2401 const unsigned char *addr, u16 vid,
2402 u16 flags)
2403{
2404 /* guarantee we can provide a unique filter for the unicast address */
2405 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2406 struct igb_adapter *adapter = netdev_priv(dev);
Alexander Duyck268f9d32016-01-06 23:11:34 -08002407 int vfn = adapter->vfs_allocated_count;
Alexander Duyck268f9d32016-01-06 23:11:34 -08002408
Yury Kylulin83c21332017-03-07 11:20:25 +03002409 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
Alexander Duyck268f9d32016-01-06 23:11:34 -08002410 return -ENOMEM;
2411 }
2412
2413 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2414}
2415
Alexander Duycke10715d2016-04-14 17:19:38 -04002416#define IGB_MAX_MAC_HDR_LEN 127
2417#define IGB_MAX_NETWORK_HDR_LEN 511
2418
2419static netdev_features_t
2420igb_features_check(struct sk_buff *skb, struct net_device *dev,
2421 netdev_features_t features)
2422{
2423 unsigned int network_hdr_len, mac_hdr_len;
2424
2425 /* Make certain the headers can be described by a context descriptor */
2426 mac_hdr_len = skb_network_header(skb) - skb->data;
2427 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2428 return features & ~(NETIF_F_HW_CSUM |
2429 NETIF_F_SCTP_CRC |
2430 NETIF_F_HW_VLAN_CTAG_TX |
2431 NETIF_F_TSO |
2432 NETIF_F_TSO6);
2433
2434 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2435 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2436 return features & ~(NETIF_F_HW_CSUM |
2437 NETIF_F_SCTP_CRC |
2438 NETIF_F_TSO |
2439 NETIF_F_TSO6);
2440
2441 /* We can only support IPV4 TSO in tunnels if we can mangle the
2442 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2443 */
2444 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2445 features &= ~NETIF_F_TSO;
2446
2447 return features;
2448}
2449
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002450static int igb_offload_cbs(struct igb_adapter *adapter,
2451 struct tc_cbs_qopt_offload *qopt)
2452{
2453 struct e1000_hw *hw = &adapter->hw;
2454 int err;
2455
2456 /* CBS offloading is only supported by i210 controller. */
2457 if (hw->mac.type != e1000_i210)
2458 return -EOPNOTSUPP;
2459
2460 /* CBS offloading is only supported by queue 0 and queue 1. */
2461 if (qopt->queue < 0 || qopt->queue > 1)
2462 return -EINVAL;
2463
2464 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2465 qopt->idleslope, qopt->sendslope,
2466 qopt->hicredit, qopt->locredit);
2467 if (err)
2468 return err;
2469
2470 if (is_fqtss_enabled(adapter)) {
2471 igb_configure_cbs(adapter, qopt->queue, qopt->enable,
2472 qopt->idleslope, qopt->sendslope,
2473 qopt->hicredit, qopt->locredit);
2474
2475 if (!is_any_cbs_enabled(adapter))
2476 enable_fqtss(adapter, false);
2477
2478 } else {
2479 enable_fqtss(adapter, true);
2480 }
2481
2482 return 0;
2483}
2484
2485static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2486 void *type_data)
2487{
2488 struct igb_adapter *adapter = netdev_priv(dev);
2489
2490 switch (type) {
Nogah Frankel8521db42017-11-06 07:23:43 +01002491 case TC_SETUP_QDISC_CBS:
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002492 return igb_offload_cbs(adapter, type_data);
2493
2494 default:
2495 return -EOPNOTSUPP;
2496 }
2497}
2498
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002499static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00002500 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002501 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00002502 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00002503 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002504 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002505 .ndo_set_mac_address = igb_set_mac,
2506 .ndo_change_mtu = igb_change_mtu,
2507 .ndo_do_ioctl = igb_ioctl,
2508 .ndo_tx_timeout = igb_tx_timeout,
2509 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002510 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2511 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00002512 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2513 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04002514 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
Lior Levy70ea4782013-03-03 20:27:48 +00002515 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
Williams, Mitch A8151d292010-02-10 01:44:24 +00002516 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002517#ifdef CONFIG_NET_POLL_CONTROLLER
2518 .ndo_poll_controller = igb_netpoll,
2519#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002520 .ndo_fix_features = igb_fix_features,
2521 .ndo_set_features = igb_set_features,
Alexander Duyck268f9d32016-01-06 23:11:34 -08002522 .ndo_fdb_add = igb_ndo_fdb_add,
Alexander Duycke10715d2016-04-14 17:19:38 -04002523 .ndo_features_check = igb_features_check,
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002524 .ndo_setup_tc = igb_setup_tc,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002525};
2526
Taku Izumi42bfd33a2008-06-20 12:10:30 +09002527/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002528 * igb_set_fw_version - Configure version string for ethtool
2529 * @adapter: adapter struct
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002530 **/
2531void igb_set_fw_version(struct igb_adapter *adapter)
2532{
2533 struct e1000_hw *hw = &adapter->hw;
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002534 struct e1000_fw_version fw;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002535
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002536 igb_get_fw_version(hw, &fw);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002537
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002538 switch (hw->mac.type) {
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002539 case e1000_i210:
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002540 case e1000_i211:
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002541 if (!(igb_get_flash_presence_i210(hw))) {
2542 snprintf(adapter->fw_version,
2543 sizeof(adapter->fw_version),
2544 "%2d.%2d-%d",
2545 fw.invm_major, fw.invm_minor,
2546 fw.invm_img_type);
2547 break;
2548 }
2549 /* fall through */
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002550 default:
2551 /* if option is rom valid, display its version too */
2552 if (fw.or_valid) {
2553 snprintf(adapter->fw_version,
2554 sizeof(adapter->fw_version),
2555 "%d.%d, 0x%08x, %d.%d.%d",
2556 fw.eep_major, fw.eep_minor, fw.etrack_id,
2557 fw.or_major, fw.or_build, fw.or_patch);
2558 /* no option rom */
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002559 } else if (fw.etrack_id != 0X0000) {
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002560 snprintf(adapter->fw_version,
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002561 sizeof(adapter->fw_version),
2562 "%d.%d, 0x%08x",
2563 fw.eep_major, fw.eep_minor, fw.etrack_id);
2564 } else {
2565 snprintf(adapter->fw_version,
2566 sizeof(adapter->fw_version),
2567 "%d.%d.%d",
2568 fw.eep_major, fw.eep_minor, fw.eep_build);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002569 }
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002570 break;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002571 }
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002572}
2573
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002574/**
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002575 * igb_init_mas - init Media Autosense feature if enabled in the NVM
2576 *
2577 * @adapter: adapter struct
2578 **/
2579static void igb_init_mas(struct igb_adapter *adapter)
2580{
2581 struct e1000_hw *hw = &adapter->hw;
2582 u16 eeprom_data;
2583
2584 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2585 switch (hw->bus.func) {
2586 case E1000_FUNC_0:
2587 if (eeprom_data & IGB_MAS_ENABLE_0) {
2588 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2589 netdev_info(adapter->netdev,
2590 "MAS: Enabling Media Autosense for port %d\n",
2591 hw->bus.func);
2592 }
2593 break;
2594 case E1000_FUNC_1:
2595 if (eeprom_data & IGB_MAS_ENABLE_1) {
2596 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2597 netdev_info(adapter->netdev,
2598 "MAS: Enabling Media Autosense for port %d\n",
2599 hw->bus.func);
2600 }
2601 break;
2602 case E1000_FUNC_2:
2603 if (eeprom_data & IGB_MAS_ENABLE_2) {
2604 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2605 netdev_info(adapter->netdev,
2606 "MAS: Enabling Media Autosense for port %d\n",
2607 hw->bus.func);
2608 }
2609 break;
2610 case E1000_FUNC_3:
2611 if (eeprom_data & IGB_MAS_ENABLE_3) {
2612 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2613 netdev_info(adapter->netdev,
2614 "MAS: Enabling Media Autosense for port %d\n",
2615 hw->bus.func);
2616 }
2617 break;
2618 default:
2619 /* Shouldn't get here */
2620 netdev_err(adapter->netdev,
2621 "MAS: Invalid port configuration, returning\n");
2622 break;
2623 }
2624}
2625
2626/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002627 * igb_init_i2c - Init I2C interface
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002628 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002629 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002630static s32 igb_init_i2c(struct igb_adapter *adapter)
2631{
Todd Fujinaka23d87822014-06-04 07:12:15 +00002632 s32 status = 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002633
2634 /* I2C interface supported on i350 devices */
2635 if (adapter->hw.mac.type != e1000_i350)
Todd Fujinaka23d87822014-06-04 07:12:15 +00002636 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002637
2638 /* Initialize the i2c bus which is controlled by the registers.
2639 * This bus will use the i2c_algo_bit structue that implements
2640 * the protocol through toggling of the 4 bits in the register.
2641 */
2642 adapter->i2c_adap.owner = THIS_MODULE;
2643 adapter->i2c_algo = igb_i2c_algo;
2644 adapter->i2c_algo.data = adapter;
2645 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2646 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2647 strlcpy(adapter->i2c_adap.name, "igb BB",
2648 sizeof(adapter->i2c_adap.name));
2649 status = i2c_bit_add_bus(&adapter->i2c_adap);
2650 return status;
2651}
2652
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002653/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002654 * igb_probe - Device Initialization Routine
2655 * @pdev: PCI device information struct
2656 * @ent: entry in igb_pci_tbl
Auke Kok9d5c8242008-01-24 02:22:38 -08002657 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002658 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002659 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002660 * igb_probe initializes an adapter identified by a pci_dev structure.
2661 * The OS initialization, configuring of the adapter private structure,
2662 * and a hardware reset occur.
Auke Kok9d5c8242008-01-24 02:22:38 -08002663 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002664static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Auke Kok9d5c8242008-01-24 02:22:38 -08002665{
2666 struct net_device *netdev;
2667 struct igb_adapter *adapter;
2668 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00002669 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002670 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00002671 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
David S. Miller2d6a5e92009-03-17 15:01:30 -07002673 int err, pci_using_dac;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002674 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08002675
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002676 /* Catch broken hardware that put the wrong VF device ID in
2677 * the PCIe SR-IOV capability.
2678 */
2679 if (pdev->is_virtfn) {
2680 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002681 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002682 return -EINVAL;
2683 }
2684
Alexander Duyckaed5dec2009-02-06 23:16:04 +00002685 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002686 if (err)
2687 return err;
2688
2689 pci_using_dac = 0;
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002690 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08002691 if (!err) {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002692 pci_using_dac = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08002693 } else {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002694 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08002695 if (err) {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002696 dev_err(&pdev->dev,
2697 "No usable DMA configuration, aborting\n");
2698 goto err_dma;
Auke Kok9d5c8242008-01-24 02:22:38 -08002699 }
2700 }
2701
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02002702 err = pci_request_mem_regions(pdev, igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002703 if (err)
2704 goto err_pci_reg;
2705
Frans Pop19d5afd2009-10-02 10:04:12 -07002706 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002707
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07002709 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002710
2711 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08002712 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00002713 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08002714 if (!netdev)
2715 goto err_alloc_etherdev;
2716
2717 SET_NETDEV_DEV(netdev, &pdev->dev);
2718
2719 pci_set_drvdata(pdev, netdev);
2720 adapter = netdev_priv(netdev);
2721 adapter->netdev = netdev;
2722 adapter->pdev = pdev;
2723 hw = &adapter->hw;
2724 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00002725 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002726
Auke Kok9d5c8242008-01-24 02:22:38 -08002727 err = -EIO;
Jarod Wilson73bf8042015-09-10 15:37:50 -04002728 adapter->io_addr = pci_iomap(pdev, 0, 0);
2729 if (!adapter->io_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 goto err_ioremap;
Jarod Wilson73bf8042015-09-10 15:37:50 -04002731 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
2732 hw->hw_addr = adapter->io_addr;
Auke Kok9d5c8242008-01-24 02:22:38 -08002733
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002734 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08002735 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002736 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08002737
2738 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2739
Aaron Sierra89dbefb2013-10-31 00:32:34 +00002740 netdev->mem_start = pci_resource_start(pdev, 0);
2741 netdev->mem_end = pci_resource_end(pdev, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002742
Auke Kok9d5c8242008-01-24 02:22:38 -08002743 /* PCI config space info */
2744 hw->vendor_id = pdev->vendor;
2745 hw->device_id = pdev->device;
2746 hw->revision_id = pdev->revision;
2747 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2748 hw->subsystem_device_id = pdev->subsystem_device;
2749
Auke Kok9d5c8242008-01-24 02:22:38 -08002750 /* Copy the default MAC, PHY and NVM function pointers */
2751 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2752 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2753 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2754 /* Initialize skew-specific constants */
2755 err = ei->get_invariants(hw);
2756 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00002757 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08002758
Alexander Duyck450c87c2009-02-06 23:22:11 +00002759 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08002760 err = igb_sw_init(adapter);
2761 if (err)
2762 goto err_sw_init;
2763
2764 igb_get_bus_info_pcie(hw);
2765
2766 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002767
2768 /* Copper options */
2769 if (hw->phy.media_type == e1000_media_type_copper) {
2770 hw->phy.mdix = AUTO_ALL_MODES;
2771 hw->phy.disable_polarity_correction = false;
2772 hw->phy.ms_type = e1000_ms_hw_default;
2773 }
2774
2775 if (igb_check_reset_block(hw))
2776 dev_info(&pdev->dev,
2777 "PHY reset is blocked due to SOL/IDER session.\n");
2778
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002779 /* features is initialized to 0 in allocation, it might have bits
Alexander Duyck077887c2011-08-26 07:46:29 +00002780 * set by igb_sw_init so we should use an or instead of an
2781 * assignment.
2782 */
2783 netdev->features |= NETIF_F_SG |
Alexander Duyck077887c2011-08-26 07:46:29 +00002784 NETIF_F_TSO |
2785 NETIF_F_TSO6 |
2786 NETIF_F_RXHASH |
2787 NETIF_F_RXCSUM |
Alexander Duycke10715d2016-04-14 17:19:38 -04002788 NETIF_F_HW_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002789
Alexander Duyck6e033702016-01-13 07:31:23 -08002790 if (hw->mac.type >= e1000_82576)
2791 netdev->features |= NETIF_F_SCTP_CRC;
2792
Alexander Duycke10715d2016-04-14 17:19:38 -04002793#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2794 NETIF_F_GSO_GRE_CSUM | \
Tom Herbert7e133182016-05-18 09:06:10 -07002795 NETIF_F_GSO_IPXIP4 | \
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002796 NETIF_F_GSO_IPXIP6 | \
Alexander Duycke10715d2016-04-14 17:19:38 -04002797 NETIF_F_GSO_UDP_TUNNEL | \
2798 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2799
2800 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
2801 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
2802
Alexander Duyck077887c2011-08-26 07:46:29 +00002803 /* copy netdev features into list of user selectable features */
Alexander Duycke10715d2016-04-14 17:19:38 -04002804 netdev->hw_features |= netdev->features |
2805 NETIF_F_HW_VLAN_CTAG_RX |
2806 NETIF_F_HW_VLAN_CTAG_TX |
2807 NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08002808
Alexander Duyck6e033702016-01-13 07:31:23 -08002809 if (hw->mac.type >= e1000_i350)
2810 netdev->hw_features |= NETIF_F_NTUPLE;
2811
Alexander Duycke10715d2016-04-14 17:19:38 -04002812 if (pci_using_dac)
2813 netdev->features |= NETIF_F_HIGHDMA;
Alexander Duyck077887c2011-08-26 07:46:29 +00002814
Alexander Duycke10715d2016-04-14 17:19:38 -04002815 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
Alexander Duyck6e033702016-01-13 07:31:23 -08002816 netdev->mpls_features |= NETIF_F_HW_CSUM;
Alexander Duycke10715d2016-04-14 17:19:38 -04002817 netdev->hw_enc_features |= netdev->vlan_features;
2818
2819 /* set this bit last since it cannot be part of vlan_features */
2820 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2821 NETIF_F_HW_VLAN_CTAG_RX |
2822 NETIF_F_HW_VLAN_CTAG_TX;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002823
Ben Greear6b8f0922012-03-06 09:41:53 +00002824 netdev->priv_flags |= IFF_SUPP_NOFCS;
2825
Jiri Pirko01789342011-08-16 06:29:00 +00002826 netdev->priv_flags |= IFF_UNICAST_FLT;
2827
Jarod Wilson91c527a2016-10-17 15:54:05 -04002828 /* MTU range: 68 - 9216 */
2829 netdev->min_mtu = ETH_MIN_MTU;
2830 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2831
Alexander Duyck330a6d62009-10-27 23:51:35 +00002832 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002833
2834 /* before reading the NVM, reset the controller to put the device in a
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002835 * known good starting state
2836 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002837 hw->mac.ops.reset_hw(hw);
2838
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002839 /* make sure the NVM is good , i211/i210 parts can have special NVM
2840 * that doesn't contain a checksum
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002841 */
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002842 switch (hw->mac.type) {
2843 case e1000_i210:
2844 case e1000_i211:
2845 if (igb_get_flash_presence_i210(hw)) {
2846 if (hw->nvm.ops.validate(hw) < 0) {
2847 dev_err(&pdev->dev,
2848 "The NVM Checksum Is Not Valid\n");
2849 err = -EIO;
2850 goto err_eeprom;
2851 }
2852 }
2853 break;
2854 default:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002855 if (hw->nvm.ops.validate(hw) < 0) {
2856 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2857 err = -EIO;
2858 goto err_eeprom;
2859 }
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002860 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002861 }
2862
John Holland806ffb12016-02-18 12:10:52 +01002863 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
2864 /* copy the MAC address out of the NVM */
2865 if (hw->mac.ops.read_mac_addr(hw))
2866 dev_err(&pdev->dev, "NVM Read Error\n");
2867 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002868
2869 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002870
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00002871 if (!is_valid_ether_addr(netdev->dev_addr)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002872 dev_err(&pdev->dev, "Invalid MAC Address\n");
2873 err = -EIO;
2874 goto err_eeprom;
2875 }
2876
Yury Kylulin83c21332017-03-07 11:20:25 +03002877 igb_set_default_mac_filter(adapter);
2878
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002879 /* get firmware version for ethtool -i */
2880 igb_set_fw_version(adapter);
2881
Todd Fujinaka27dff8b2014-05-29 05:47:26 +00002882 /* configure RXPBSIZE and TXPBSIZE */
2883 if (hw->mac.type == e1000_i210) {
2884 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2885 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2886 }
2887
Kees Cook26566ea2017-10-16 17:29:35 -07002888 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
2889 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002890
2891 INIT_WORK(&adapter->reset_task, igb_reset_task);
2892 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2893
Alexander Duyck450c87c2009-02-06 23:22:11 +00002894 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002895 adapter->fc_autoneg = true;
2896 hw->mac.autoneg = true;
2897 hw->phy.autoneg_advertised = 0x2f;
2898
Alexander Duyck0cce1192009-07-23 18:10:24 +00002899 hw->fc.requested_mode = e1000_fc_default;
2900 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002901
Auke Kok9d5c8242008-01-24 02:22:38 -08002902 igb_validate_mdi_setting(hw);
2903
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002904 /* By default, support wake on port A */
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002905 if (hw->bus.func == 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002906 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2907
2908 /* Check the NVM for wake support on non-port A ports */
2909 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002910 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002911 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2912 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002913 else if (hw->bus.func == 1)
2914 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002915
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002916 if (eeprom_data & IGB_EEPROM_APME)
2917 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002918
2919 /* now that we have the eeprom settings, apply the special cases where
2920 * the eeprom may be wrong or the board simply won't support wake on
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002921 * lan on a particular port
2922 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002923 switch (pdev->device) {
2924 case E1000_DEV_ID_82575GB_QUAD_COPPER:
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002925 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002926 break;
2927 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002928 case E1000_DEV_ID_82576_FIBER:
2929 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002930 /* Wake events only supported on port A for dual fiber
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002931 * regardless of eeprom setting
2932 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002933 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002934 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002935 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002936 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002937 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002938 /* if quad port adapter, disable WoL on all but port A */
2939 if (global_quad_port_a != 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002940 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002941 else
2942 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2943 /* Reset for multiple quad port adapters */
2944 if (++global_quad_port_a == 4)
2945 global_quad_port_a = 0;
2946 break;
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002947 default:
2948 /* If the device can't wake, don't set software support */
2949 if (!device_can_wakeup(&adapter->pdev->dev))
2950 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002951 }
2952
2953 /* initialize the wol settings based on the eeprom settings */
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002954 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2955 adapter->wol |= E1000_WUFC_MAG;
2956
2957 /* Some vendors want WoL disabled by default, but still supported */
2958 if ((hw->mac.type == e1000_i350) &&
2959 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2960 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2961 adapter->wol = 0;
2962 }
2963
Todd Fujinaka5e350b92016-01-05 10:08:28 -08002964 /* Some vendors want the ability to Use the EEPROM setting as
2965 * enable/disable only, and not for capability
2966 */
2967 if (((hw->mac.type == e1000_i350) ||
2968 (hw->mac.type == e1000_i354)) &&
2969 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
2970 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2971 adapter->wol = 0;
2972 }
2973 if (hw->mac.type == e1000_i350) {
2974 if (((pdev->subsystem_device == 0x5001) ||
2975 (pdev->subsystem_device == 0x5002)) &&
2976 (hw->bus.func == 0)) {
2977 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2978 adapter->wol = 0;
2979 }
2980 if (pdev->subsystem_device == 0x1F52)
2981 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2982 }
2983
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002984 device_set_wakeup_enable(&adapter->pdev->dev,
2985 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
Auke Kok9d5c8242008-01-24 02:22:38 -08002986
2987 /* reset the hardware with the new settings */
2988 igb_reset(adapter);
2989
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002990 /* Init the I2C interface */
2991 err = igb_init_i2c(adapter);
2992 if (err) {
2993 dev_err(&pdev->dev, "failed to init i2c interface\n");
2994 goto err_eeprom;
2995 }
2996
Auke Kok9d5c8242008-01-24 02:22:38 -08002997 /* let the f/w know that the h/w is now under the control of the
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00002998 * driver.
2999 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003000 igb_get_hw_control(adapter);
3001
Auke Kok9d5c8242008-01-24 02:22:38 -08003002 strcpy(netdev->name, "eth%d");
3003 err = register_netdev(netdev);
3004 if (err)
3005 goto err_register;
3006
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00003007 /* carrier off reporting is important to ethtool even BEFORE open */
3008 netif_carrier_off(netdev);
3009
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003010#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08003011 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003012 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003013 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003014 igb_setup_dca(adapter);
3015 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003016
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003017#endif
Carolyn Wybornye4288932012-12-07 03:01:42 +00003018#ifdef CONFIG_IGB_HWMON
3019 /* Initialize the thermal sensor on i350 devices. */
3020 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3021 u16 ets_word;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00003022
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003023 /* Read the NVM to determine if this i350 device supports an
Carolyn Wybornye4288932012-12-07 03:01:42 +00003024 * external thermal sensor.
3025 */
3026 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3027 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3028 adapter->ets = true;
3029 else
3030 adapter->ets = false;
3031 if (igb_sysfs_init(adapter))
3032 dev_err(&pdev->dev,
3033 "failed to allocate sysfs resources\n");
3034 } else {
3035 adapter->ets = false;
3036 }
3037#endif
Carolyn Wyborny56cec242013-10-17 05:36:26 +00003038 /* Check if Media Autosense is enabled */
3039 adapter->ei = *ei;
3040 if (hw->dev_spec._82575.mas_capable)
3041 igb_init_mas(adapter);
3042
Anders Berggren673b8b72011-02-04 07:32:32 +00003043 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00003044 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00003045
Auke Kok9d5c8242008-01-24 02:22:38 -08003046 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003047 /* print bus type/speed/width info, not applicable to i354 */
3048 if (hw->mac.type != e1000_i354) {
3049 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3050 netdev->name,
3051 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3052 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3053 "unknown"),
3054 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3055 "Width x4" :
3056 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3057 "Width x2" :
3058 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3059 "Width x1" : "unknown"), netdev->dev_addr);
3060 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003061
Todd Fujinaka53ea6c72013-08-23 07:49:00 +00003062 if ((hw->mac.type >= e1000_i210 ||
3063 igb_get_flash_presence_i210(hw))) {
3064 ret_val = igb_read_part_string(hw, part_str,
3065 E1000_PBANUM_LENGTH);
3066 } else {
3067 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3068 }
3069
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00003070 if (ret_val)
3071 strcpy(part_str, "Unknown");
3072 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08003073 dev_info(&pdev->dev,
3074 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00003075 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003076 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08003077 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003078 if (hw->phy.media_type == e1000_media_type_copper) {
3079 switch (hw->mac.type) {
3080 case e1000_i350:
3081 case e1000_i210:
3082 case e1000_i211:
3083 /* Enable EEE for internal copper PHY devices */
Todd Fujinakac4c112f2014-08-29 06:43:13 +00003084 err = igb_set_eee_i350(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003085 if ((!err) &&
3086 (!hw->dev_spec._82575.eee_disable)) {
3087 adapter->eee_advert =
3088 MDIO_EEE_100TX | MDIO_EEE_1000T;
3089 adapter->flags |= IGB_FLAG_EEE;
3090 }
3091 break;
3092 case e1000_i354:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003093 if ((rd32(E1000_CTRL_EXT) &
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003094 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
Todd Fujinakac4c112f2014-08-29 06:43:13 +00003095 err = igb_set_eee_i354(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003096 if ((!err) &&
3097 (!hw->dev_spec._82575.eee_disable)) {
3098 adapter->eee_advert =
3099 MDIO_EEE_100TX | MDIO_EEE_1000T;
3100 adapter->flags |= IGB_FLAG_EEE;
3101 }
3102 }
3103 break;
3104 default:
3105 break;
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003106 }
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08003107 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003108 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003109 return 0;
3110
3111err_register:
3112 igb_release_hw_control(adapter);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003113 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
Auke Kok9d5c8242008-01-24 02:22:38 -08003114err_eeprom:
3115 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003116 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003117
3118 if (hw->flash_address)
3119 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08003120err_sw_init:
Yury Kylulin83c21332017-03-07 11:20:25 +03003121 kfree(adapter->mac_table);
Jia-Ju Bai42ad1a02015-08-05 22:05:16 +08003122 kfree(adapter->shadow_vfta);
Alexander Duyck047e0032009-10-27 15:49:27 +00003123 igb_clear_interrupt_scheme(adapter);
Todd Fujinakaceee3452015-08-07 17:27:39 -07003124#ifdef CONFIG_PCI_IOV
3125 igb_disable_sriov(pdev);
3126#endif
Jarod Wilson73bf8042015-09-10 15:37:50 -04003127 pci_iounmap(pdev, adapter->io_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003128err_ioremap:
3129 free_netdev(netdev);
3130err_alloc_etherdev:
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02003131 pci_release_mem_regions(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003132err_pci_reg:
3133err_dma:
3134 pci_disable_device(pdev);
3135 return err;
3136}
3137
Greg Rosefa44f2f2013-01-17 01:03:06 -08003138#ifdef CONFIG_PCI_IOV
Stefan Assmann781798a2013-09-24 05:18:39 +00003139static int igb_disable_sriov(struct pci_dev *pdev)
Greg Rosefa44f2f2013-01-17 01:03:06 -08003140{
3141 struct net_device *netdev = pci_get_drvdata(pdev);
3142 struct igb_adapter *adapter = netdev_priv(netdev);
3143 struct e1000_hw *hw = &adapter->hw;
3144
3145 /* reclaim resources allocated to VFs */
3146 if (adapter->vf_data) {
3147 /* disable iov and allow time for transactions to clear */
Alexander Duyckb09186d2013-03-26 00:03:26 +00003148 if (pci_vfs_assigned(pdev)) {
Greg Rosefa44f2f2013-01-17 01:03:06 -08003149 dev_warn(&pdev->dev,
3150 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3151 return -EPERM;
3152 } else {
3153 pci_disable_sriov(pdev);
3154 msleep(500);
3155 }
3156
Yury Kylulin4827cc32017-03-07 11:20:26 +03003157 kfree(adapter->vf_mac_list);
3158 adapter->vf_mac_list = NULL;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003159 kfree(adapter->vf_data);
3160 adapter->vf_data = NULL;
3161 adapter->vfs_allocated_count = 0;
3162 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3163 wrfl();
3164 msleep(100);
3165 dev_info(&pdev->dev, "IOV Disabled\n");
3166
3167 /* Re-enable DMA Coalescing flag since IOV is turned off */
3168 adapter->flags |= IGB_FLAG_DMAC;
3169 }
3170
3171 return 0;
3172}
3173
3174static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3175{
3176 struct net_device *netdev = pci_get_drvdata(pdev);
3177 struct igb_adapter *adapter = netdev_priv(netdev);
3178 int old_vfs = pci_num_vf(pdev);
Yury Kylulin4827cc32017-03-07 11:20:26 +03003179 struct vf_mac_filter *mac_list;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003180 int err = 0;
Yury Kylulin4827cc32017-03-07 11:20:26 +03003181 int num_vf_mac_filters, i;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003182
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00003183 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
Mitch A Williams50267192013-06-20 06:03:36 +00003184 err = -EPERM;
3185 goto out;
3186 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003187 if (!num_vfs)
3188 goto out;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003189
Stefan Assmann781798a2013-09-24 05:18:39 +00003190 if (old_vfs) {
3191 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3192 old_vfs, max_vfs);
3193 adapter->vfs_allocated_count = old_vfs;
3194 } else
3195 adapter->vfs_allocated_count = num_vfs;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003196
3197 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3198 sizeof(struct vf_data_storage), GFP_KERNEL);
3199
3200 /* if allocation failed then we do not support SR-IOV */
3201 if (!adapter->vf_data) {
3202 adapter->vfs_allocated_count = 0;
3203 dev_err(&pdev->dev,
3204 "Unable to allocate memory for VF Data Storage\n");
3205 err = -ENOMEM;
3206 goto out;
3207 }
3208
Yury Kylulin4827cc32017-03-07 11:20:26 +03003209 /* Due to the limited number of RAR entries calculate potential
3210 * number of MAC filters available for the VFs. Reserve entries
3211 * for PF default MAC, PF MAC filters and at least one RAR entry
3212 * for each VF for VF MAC.
3213 */
3214 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3215 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3216 adapter->vfs_allocated_count);
3217
3218 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3219 sizeof(struct vf_mac_filter),
3220 GFP_KERNEL);
3221
3222 mac_list = adapter->vf_mac_list;
3223 INIT_LIST_HEAD(&adapter->vf_macs.l);
3224
3225 if (adapter->vf_mac_list) {
3226 /* Initialize list of VF MAC filters */
3227 for (i = 0; i < num_vf_mac_filters; i++) {
3228 mac_list->vf = -1;
3229 mac_list->free = true;
3230 list_add(&mac_list->l, &adapter->vf_macs.l);
3231 mac_list++;
3232 }
3233 } else {
3234 /* If we could not allocate memory for the VF MAC filters
3235 * we can continue without this feature but warn user.
3236 */
3237 dev_err(&pdev->dev,
3238 "Unable to allocate memory for VF MAC filter list\n");
3239 }
3240
Stefan Assmann781798a2013-09-24 05:18:39 +00003241 /* only call pci_enable_sriov() if no VFs are allocated already */
3242 if (!old_vfs) {
3243 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3244 if (err)
3245 goto err_out;
3246 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003247 dev_info(&pdev->dev, "%d VFs allocated\n",
3248 adapter->vfs_allocated_count);
3249 for (i = 0; i < adapter->vfs_allocated_count; i++)
3250 igb_vf_configure(adapter, i);
3251
3252 /* DMA Coalescing is not supported in IOV mode. */
3253 adapter->flags &= ~IGB_FLAG_DMAC;
3254 goto out;
3255
3256err_out:
Yury Kylulin4827cc32017-03-07 11:20:26 +03003257 kfree(adapter->vf_mac_list);
3258 adapter->vf_mac_list = NULL;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003259 kfree(adapter->vf_data);
3260 adapter->vf_data = NULL;
3261 adapter->vfs_allocated_count = 0;
3262out:
3263 return err;
3264}
3265
3266#endif
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003267/**
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003268 * igb_remove_i2c - Cleanup I2C interface
3269 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003270 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003271static void igb_remove_i2c(struct igb_adapter *adapter)
3272{
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003273 /* free the adapter bus structure */
3274 i2c_del_adapter(&adapter->i2c_adap);
3275}
3276
Auke Kok9d5c8242008-01-24 02:22:38 -08003277/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003278 * igb_remove - Device Removal Routine
3279 * @pdev: PCI device information struct
Auke Kok9d5c8242008-01-24 02:22:38 -08003280 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003281 * igb_remove is called by the PCI subsystem to alert the driver
3282 * that it should release a PCI device. The could be caused by a
3283 * Hot-Plug event, or because the driver is going to be removed from
3284 * memory.
Auke Kok9d5c8242008-01-24 02:22:38 -08003285 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003286static void igb_remove(struct pci_dev *pdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003287{
3288 struct net_device *netdev = pci_get_drvdata(pdev);
3289 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003290 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003291
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003292 pm_runtime_get_noresume(&pdev->dev);
Carolyn Wybornye4288932012-12-07 03:01:42 +00003293#ifdef CONFIG_IGB_HWMON
3294 igb_sysfs_exit(adapter);
3295#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003296 igb_remove_i2c(adapter);
Matthew Vicka79f4f82012-08-10 05:40:44 +00003297 igb_ptp_stop(adapter);
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003298 /* The watchdog timer may be rescheduled, so explicitly
Tejun Heo760141a2010-12-12 16:45:14 +01003299 * disable watchdog from being rescheduled.
3300 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003301 set_bit(__IGB_DOWN, &adapter->state);
3302 del_timer_sync(&adapter->watchdog_timer);
3303 del_timer_sync(&adapter->phy_info_timer);
3304
Tejun Heo760141a2010-12-12 16:45:14 +01003305 cancel_work_sync(&adapter->reset_task);
3306 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003307
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003308#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003309 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003310 dev_info(&pdev->dev, "DCA disabled\n");
3311 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003312 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08003313 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003314 }
3315#endif
3316
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003318 * would have already happened in close and is redundant.
3319 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003320 igb_release_hw_control(adapter);
3321
Alexander Duyck37680112009-02-19 20:40:30 -08003322#ifdef CONFIG_PCI_IOV
Greg Rosefa44f2f2013-01-17 01:03:06 -08003323 igb_disable_sriov(pdev);
Alexander Duyck37680112009-02-19 20:40:30 -08003324#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00003325
Alex Williamsonc23d92b2015-07-29 14:38:15 -06003326 unregister_netdev(netdev);
3327
3328 igb_clear_interrupt_scheme(adapter);
3329
Jarod Wilson73bf8042015-09-10 15:37:50 -04003330 pci_iounmap(pdev, adapter->io_addr);
Alexander Duyck28b07592009-02-06 23:20:31 +00003331 if (hw->flash_address)
3332 iounmap(hw->flash_address);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02003333 pci_release_mem_regions(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003334
Yury Kylulin83c21332017-03-07 11:20:25 +03003335 kfree(adapter->mac_table);
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003336 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08003337 free_netdev(netdev);
3338
Frans Pop19d5afd2009-10-02 10:04:12 -07003339 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08003340
Auke Kok9d5c8242008-01-24 02:22:38 -08003341 pci_disable_device(pdev);
3342}
3343
3344/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003345 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3346 * @adapter: board private structure to initialize
Alexander Duycka6b623e2009-10-27 23:47:53 +00003347 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003348 * This function initializes the vf specific data storage and then attempts to
3349 * allocate the VFs. The reason for ordering it this way is because it is much
3350 * mor expensive time wise to disable SR-IOV than it is to allocate and free
3351 * the memory for the VFs.
Alexander Duycka6b623e2009-10-27 23:47:53 +00003352 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003353static void igb_probe_vfs(struct igb_adapter *adapter)
Alexander Duycka6b623e2009-10-27 23:47:53 +00003354{
3355#ifdef CONFIG_PCI_IOV
3356 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003357 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka6b623e2009-10-27 23:47:53 +00003358
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003359 /* Virtualization features not supported on i210 family. */
3360 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3361 return;
3362
Jan Beulichbe069982015-10-19 04:23:29 -06003363 /* Of the below we really only want the effect of getting
3364 * IGB_FLAG_HAS_MSIX set (if available), without which
3365 * igb_enable_sriov() has no effect.
3366 */
3367 igb_set_interrupt_capability(adapter, true);
3368 igb_reset_interrupt_capability(adapter);
3369
Greg Rosefa44f2f2013-01-17 01:03:06 -08003370 pci_sriov_set_totalvfs(pdev, 7);
Stefan Assmann6423fc32015-07-10 15:01:12 +02003371 igb_enable_sriov(pdev, max_vfs);
Alexander Duycka6b623e2009-10-27 23:47:53 +00003372
Alexander Duycka6b623e2009-10-27 23:47:53 +00003373#endif /* CONFIG_PCI_IOV */
3374}
3375
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003376unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003377{
3378 struct e1000_hw *hw = &adapter->hw;
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003379 unsigned int max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08003380
Matthew Vick374a5422012-05-18 04:54:58 +00003381 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003382 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003383 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00003384 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003385 break;
Matthew Vick374a5422012-05-18 04:54:58 +00003386 case e1000_82575:
3387 case e1000_i210:
3388 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3389 break;
3390 case e1000_i350:
3391 /* I350 cannot do RSS and SR-IOV at the same time */
3392 if (!!adapter->vfs_allocated_count) {
3393 max_rss_queues = 1;
3394 break;
3395 }
3396 /* fall through */
3397 case e1000_82576:
3398 if (!!adapter->vfs_allocated_count) {
3399 max_rss_queues = 2;
3400 break;
3401 }
3402 /* fall through */
3403 case e1000_82580:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003404 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003405 default:
Matthew Vick374a5422012-05-18 04:54:58 +00003406 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003407 break;
3408 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003409
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003410 return max_rss_queues;
3411}
3412
3413static void igb_init_queue_configuration(struct igb_adapter *adapter)
3414{
3415 u32 max_rss_queues;
3416
3417 max_rss_queues = igb_get_max_rss_queues(adapter);
Matthew Vick374a5422012-05-18 04:54:58 +00003418 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3419
Shota Suzuki72ddef02015-07-01 09:25:52 +09003420 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3421}
3422
3423void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3424 const u32 max_rss_queues)
3425{
3426 struct e1000_hw *hw = &adapter->hw;
3427
Matthew Vick374a5422012-05-18 04:54:58 +00003428 /* Determine if we need to pair queues. */
3429 switch (hw->mac.type) {
3430 case e1000_82575:
3431 case e1000_i211:
3432 /* Device supports enough interrupts without queue pairing. */
3433 break;
3434 case e1000_82576:
Matthew Vick374a5422012-05-18 04:54:58 +00003435 case e1000_82580:
3436 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003437 case e1000_i354:
Matthew Vick374a5422012-05-18 04:54:58 +00003438 case e1000_i210:
3439 default:
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003440 /* If rss_queues > half of max_rss_queues, pair the queues in
Matthew Vick374a5422012-05-18 04:54:58 +00003441 * order to conserve interrupts due to limited supply.
3442 */
3443 if (adapter->rss_queues > (max_rss_queues / 2))
3444 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Shota Suzuki37a5d162015-12-11 18:44:00 +09003445 else
3446 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
Matthew Vick374a5422012-05-18 04:54:58 +00003447 break;
3448 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003449}
3450
3451/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003452 * igb_sw_init - Initialize general software structures (struct igb_adapter)
3453 * @adapter: board private structure to initialize
Greg Rosefa44f2f2013-01-17 01:03:06 -08003454 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003455 * igb_sw_init initializes the Adapter private data structure.
3456 * Fields are initialized based on PCI device information and
3457 * OS network device settings (MTU size).
Greg Rosefa44f2f2013-01-17 01:03:06 -08003458 **/
3459static int igb_sw_init(struct igb_adapter *adapter)
3460{
3461 struct e1000_hw *hw = &adapter->hw;
3462 struct net_device *netdev = adapter->netdev;
3463 struct pci_dev *pdev = adapter->pdev;
3464
3465 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3466
3467 /* set default ring sizes */
3468 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3469 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3470
3471 /* set default ITR values */
3472 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3473 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3474
3475 /* set default work limits */
3476 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3477
3478 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3479 VLAN_HLEN;
3480 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3481
Gangfeng Huang0e71def2016-07-06 13:22:54 +08003482 spin_lock_init(&adapter->nfc_lock);
Greg Rosefa44f2f2013-01-17 01:03:06 -08003483 spin_lock_init(&adapter->stats64_lock);
3484#ifdef CONFIG_PCI_IOV
3485 switch (hw->mac.type) {
3486 case e1000_82576:
3487 case e1000_i350:
3488 if (max_vfs > 7) {
3489 dev_warn(&pdev->dev,
3490 "Maximum of 7 VFs per PF, using max\n");
Alex Williamsond0f63ac2013-03-13 15:50:24 +00003491 max_vfs = adapter->vfs_allocated_count = 7;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003492 } else
3493 adapter->vfs_allocated_count = max_vfs;
3494 if (adapter->vfs_allocated_count)
3495 dev_warn(&pdev->dev,
3496 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3497 break;
3498 default:
3499 break;
3500 }
3501#endif /* CONFIG_PCI_IOV */
3502
Stefan Assmanncbfe3602015-09-17 14:46:10 +02003503 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3504 adapter->flags |= IGB_FLAG_HAS_MSIX;
3505
Yury Kylulin83c21332017-03-07 11:20:25 +03003506 adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
3507 hw->mac.rar_entry_count, GFP_ATOMIC);
3508 if (!adapter->mac_table)
3509 return -ENOMEM;
3510
Todd Fujinakaceee3452015-08-07 17:27:39 -07003511 igb_probe_vfs(adapter);
3512
Greg Rosefa44f2f2013-01-17 01:03:06 -08003513 igb_init_queue_configuration(adapter);
Alexander Duycka99955f2009-11-12 18:37:19 +00003514
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003515 /* Setup and initialize a copy of the hw vlan table array */
Joe Perchesb2adaca2013-02-03 17:43:58 +00003516 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3517 GFP_ATOMIC);
Christophe JAILLET18eb8632017-08-27 08:39:51 +02003518 if (!adapter->shadow_vfta)
3519 return -ENOMEM;
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003520
Alexander Duycka6b623e2009-10-27 23:47:53 +00003521 /* This call may decrease the number of queues */
Stefan Assmann53c7d062012-12-04 06:00:12 +00003522 if (igb_init_interrupt_scheme(adapter, true)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003523 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3524 return -ENOMEM;
3525 }
3526
3527 /* Explicitly disable IRQ since the NIC can be in any state. */
3528 igb_irq_disable(adapter);
3529
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003530 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08003531 adapter->flags &= ~IGB_FLAG_DMAC;
3532
Auke Kok9d5c8242008-01-24 02:22:38 -08003533 set_bit(__IGB_DOWN, &adapter->state);
3534 return 0;
3535}
3536
3537/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003538 * igb_open - Called when a network interface is made active
3539 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003540 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003541 * Returns 0 on success, negative value on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003542 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003543 * The open entry point is called when a network interface is made
3544 * active by the system (IFF_UP). At this point all resources needed
3545 * for transmit and receive operations are allocated, the interrupt
3546 * handler is registered with the OS, the watchdog timer is started,
3547 * and the stack is notified that the interface is ready.
Auke Kok9d5c8242008-01-24 02:22:38 -08003548 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003549static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08003550{
3551 struct igb_adapter *adapter = netdev_priv(netdev);
3552 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003553 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003554 int err;
3555 int i;
3556
3557 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003558 if (test_bit(__IGB_TESTING, &adapter->state)) {
3559 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08003560 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003561 }
3562
3563 if (!resuming)
3564 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003565
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00003566 netif_carrier_off(netdev);
3567
Auke Kok9d5c8242008-01-24 02:22:38 -08003568 /* allocate transmit descriptors */
3569 err = igb_setup_all_tx_resources(adapter);
3570 if (err)
3571 goto err_setup_tx;
3572
3573 /* allocate receive descriptors */
3574 err = igb_setup_all_rx_resources(adapter);
3575 if (err)
3576 goto err_setup_rx;
3577
Nick Nunley88a268c2010-02-17 01:01:59 +00003578 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003579
Auke Kok9d5c8242008-01-24 02:22:38 -08003580 /* before we allocate an interrupt, we must be ready to handle it.
3581 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3582 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003583 * clean_rx handler before we do so.
3584 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003585 igb_configure(adapter);
3586
3587 err = igb_request_irq(adapter);
3588 if (err)
3589 goto err_req_irq;
3590
Alexander Duyck0c2cc022012-09-25 00:31:22 +00003591 /* Notify the stack of the actual queue counts. */
3592 err = netif_set_real_num_tx_queues(adapter->netdev,
3593 adapter->num_tx_queues);
3594 if (err)
3595 goto err_set_queues;
3596
3597 err = netif_set_real_num_rx_queues(adapter->netdev,
3598 adapter->num_rx_queues);
3599 if (err)
3600 goto err_set_queues;
3601
Auke Kok9d5c8242008-01-24 02:22:38 -08003602 /* From here on the code is the same as igb_up() */
3603 clear_bit(__IGB_DOWN, &adapter->state);
3604
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003605 for (i = 0; i < adapter->num_q_vectors; i++)
3606 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08003607
3608 /* Clear any pending interrupts. */
3609 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003610
3611 igb_irq_enable(adapter);
3612
Alexander Duyckd4960302009-10-27 15:53:45 +00003613 /* notify VFs that reset has been completed */
3614 if (adapter->vfs_allocated_count) {
3615 u32 reg_data = rd32(E1000_CTRL_EXT);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003616
Alexander Duyckd4960302009-10-27 15:53:45 +00003617 reg_data |= E1000_CTRL_EXT_PFRSTD;
3618 wr32(E1000_CTRL_EXT, reg_data);
3619 }
3620
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07003621 netif_tx_start_all_queues(netdev);
3622
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003623 if (!resuming)
3624 pm_runtime_put(&pdev->dev);
3625
Alexander Duyck25568a52009-10-27 23:49:59 +00003626 /* start the watchdog. */
3627 hw->mac.get_link_status = 1;
3628 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003629
3630 return 0;
3631
Alexander Duyck0c2cc022012-09-25 00:31:22 +00003632err_set_queues:
3633 igb_free_irq(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003634err_req_irq:
3635 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00003636 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003637 igb_free_all_rx_resources(adapter);
3638err_setup_rx:
3639 igb_free_all_tx_resources(adapter);
3640err_setup_tx:
3641 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003642 if (!resuming)
3643 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003644
3645 return err;
3646}
3647
Stefan Assmann46eafa52016-02-03 09:20:50 +01003648int igb_open(struct net_device *netdev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003649{
3650 return __igb_open(netdev, false);
3651}
3652
Auke Kok9d5c8242008-01-24 02:22:38 -08003653/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003654 * igb_close - Disables a network interface
3655 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003656 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003657 * Returns 0, this is not allowed to fail
Auke Kok9d5c8242008-01-24 02:22:38 -08003658 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003659 * The close entry point is called when an interface is de-activated
3660 * by the OS. The hardware is still under the driver's control, but
3661 * needs to be disabled. A global MAC reset is issued to stop the
3662 * hardware, and all transmit and receive resources are freed.
Auke Kok9d5c8242008-01-24 02:22:38 -08003663 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003664static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08003665{
3666 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003667 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003668
3669 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08003670
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003671 if (!suspending)
3672 pm_runtime_get_sync(&pdev->dev);
3673
3674 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003675 igb_free_irq(adapter);
3676
3677 igb_free_all_tx_resources(adapter);
3678 igb_free_all_rx_resources(adapter);
3679
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003680 if (!suspending)
3681 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003682 return 0;
3683}
3684
Stefan Assmann46eafa52016-02-03 09:20:50 +01003685int igb_close(struct net_device *netdev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003686{
Todd Fujinaka94749332016-11-15 08:54:26 -08003687 if (netif_device_present(netdev))
3688 return __igb_close(netdev, false);
3689 return 0;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003690}
3691
Auke Kok9d5c8242008-01-24 02:22:38 -08003692/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003693 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
3694 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08003695 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003696 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003697 **/
Alexander Duyck80785292009-10-27 15:51:47 +00003698int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003699{
Alexander Duyck59d71982010-04-27 13:09:25 +00003700 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003701 int size;
3702
Alexander Duyck06034642011-08-26 07:44:22 +00003703 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003704
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08003705 tx_ring->tx_buffer_info = vmalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00003706 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003707 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003708
3709 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08003710 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 tx_ring->size = ALIGN(tx_ring->size, 4096);
3712
Alexander Duyck5536d212012-09-25 00:31:17 +00003713 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3714 &tx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 if (!tx_ring->desc)
3716 goto err;
3717
Auke Kok9d5c8242008-01-24 02:22:38 -08003718 tx_ring->next_to_use = 0;
3719 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00003720
Auke Kok9d5c8242008-01-24 02:22:38 -08003721 return 0;
3722
3723err:
Alexander Duyck06034642011-08-26 07:44:22 +00003724 vfree(tx_ring->tx_buffer_info);
Alexander Duyckf33005a2012-09-13 06:27:55 +00003725 tx_ring->tx_buffer_info = NULL;
3726 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003727 return -ENOMEM;
3728}
3729
3730/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003731 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
3732 * (Descriptors) for all queues
3733 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003734 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003735 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003736 **/
3737static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3738{
Alexander Duyck439705e2009-10-27 23:49:20 +00003739 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003740 int i, err = 0;
3741
3742 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003743 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003744 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003745 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003746 "Allocation for Tx Queue %u failed\n", i);
3747 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003748 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003749 break;
3750 }
3751 }
3752
3753 return err;
3754}
3755
3756/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003757 * igb_setup_tctl - configure the transmit control registers
3758 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003759 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003760void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003761{
Auke Kok9d5c8242008-01-24 02:22:38 -08003762 struct e1000_hw *hw = &adapter->hw;
3763 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08003764
Alexander Duyck85b430b2009-10-27 15:50:29 +00003765 /* disable queue 0 which is enabled by default on 82575 and 82576 */
3766 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003767
3768 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08003769 tctl = rd32(E1000_TCTL);
3770 tctl &= ~E1000_TCTL_CT;
3771 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
3772 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3773
3774 igb_config_collision_dist(hw);
3775
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 /* Enable transmits */
3777 tctl |= E1000_TCTL_EN;
3778
3779 wr32(E1000_TCTL, tctl);
3780}
3781
3782/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003783 * igb_configure_tx_ring - Configure transmit ring after Reset
3784 * @adapter: board private structure
3785 * @ring: tx ring to configure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003786 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003787 * Configure a transmit ring after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003788 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003789void igb_configure_tx_ring(struct igb_adapter *adapter,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003790 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003791{
3792 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00003793 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003794 u64 tdba = ring->dma;
3795 int reg_idx = ring->reg_idx;
3796
3797 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003798 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003799 wrfl();
3800 mdelay(10);
3801
3802 wr32(E1000_TDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003803 ring->count * sizeof(union e1000_adv_tx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00003804 wr32(E1000_TDBAL(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003805 tdba & 0x00000000ffffffffULL);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003806 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3807
Cao jin629823b2016-11-08 15:06:20 +08003808 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003809 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003810 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003811
3812 txdctl |= IGB_TX_PTHRESH;
3813 txdctl |= IGB_TX_HTHRESH << 8;
3814 txdctl |= IGB_TX_WTHRESH << 16;
3815
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08003816 /* reinitialize tx_buffer_info */
3817 memset(ring->tx_buffer_info, 0,
3818 sizeof(struct igb_tx_buffer) * ring->count);
3819
Alexander Duyck85b430b2009-10-27 15:50:29 +00003820 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3821 wr32(E1000_TXDCTL(reg_idx), txdctl);
3822}
3823
3824/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003825 * igb_configure_tx - Configure transmit Unit after Reset
3826 * @adapter: board private structure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003827 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003828 * Configure the Tx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003829 **/
3830static void igb_configure_tx(struct igb_adapter *adapter)
3831{
3832 int i;
3833
3834 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003835 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003836}
3837
3838/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003839 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
3840 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08003841 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003842 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003843 **/
Alexander Duyck80785292009-10-27 15:51:47 +00003844int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003845{
Alexander Duyck59d71982010-04-27 13:09:25 +00003846 struct device *dev = rx_ring->dev;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003847 int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08003848
Alexander Duyck06034642011-08-26 07:44:22 +00003849 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003850
Alexander Duyckd2bead52017-02-06 18:25:50 -08003851 rx_ring->rx_buffer_info = vmalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00003852 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003853 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003854
Auke Kok9d5c8242008-01-24 02:22:38 -08003855 /* Round up to nearest 4K */
Alexander Duyckf33005a2012-09-13 06:27:55 +00003856 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003857 rx_ring->size = ALIGN(rx_ring->size, 4096);
3858
Alexander Duyck5536d212012-09-25 00:31:17 +00003859 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3860 &rx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003861 if (!rx_ring->desc)
3862 goto err;
3863
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003864 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003865 rx_ring->next_to_clean = 0;
3866 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003867
Auke Kok9d5c8242008-01-24 02:22:38 -08003868 return 0;
3869
3870err:
Alexander Duyck06034642011-08-26 07:44:22 +00003871 vfree(rx_ring->rx_buffer_info);
3872 rx_ring->rx_buffer_info = NULL;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003873 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003874 return -ENOMEM;
3875}
3876
3877/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003878 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3879 * (Descriptors) for all queues
3880 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003881 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003882 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003883 **/
3884static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3885{
Alexander Duyck439705e2009-10-27 23:49:20 +00003886 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003887 int i, err = 0;
3888
3889 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003890 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003891 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003892 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003893 "Allocation for Rx Queue %u failed\n", i);
3894 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003895 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003896 break;
3897 }
3898 }
3899
3900 return err;
3901}
3902
3903/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003904 * igb_setup_mrqc - configure the multiple receive queue control registers
3905 * @adapter: Board private structure
Alexander Duyck06cf2662009-10-27 15:53:25 +00003906 **/
3907static void igb_setup_mrqc(struct igb_adapter *adapter)
3908{
3909 struct e1000_hw *hw = &adapter->hw;
3910 u32 mrqc, rxcsum;
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003911 u32 j, num_rx_queues;
Eric Dumazeteb31f842014-11-16 06:23:14 -08003912 u32 rss_key[10];
Alexander Duyck06cf2662009-10-27 15:53:25 +00003913
Eric Dumazeteb31f842014-11-16 06:23:14 -08003914 netdev_rss_key_fill(rss_key, sizeof(rss_key));
Alexander Duycka57fe232012-09-13 06:28:16 +00003915 for (j = 0; j < 10; j++)
Eric Dumazeteb31f842014-11-16 06:23:14 -08003916 wr32(E1000_RSSRK(j), rss_key[j]);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003917
Alexander Duycka99955f2009-11-12 18:37:19 +00003918 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003919
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003920 switch (hw->mac.type) {
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003921 case e1000_82576:
3922 /* 82576 supports 2 RSS queues for SR-IOV */
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003923 if (adapter->vfs_allocated_count)
Alexander Duyck06cf2662009-10-27 15:53:25 +00003924 num_rx_queues = 2;
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003925 break;
3926 default:
3927 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003928 }
3929
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003930 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3931 for (j = 0; j < IGB_RETA_SIZE; j++)
Carolyn Wybornyc502ea22014-04-11 01:46:33 +00003932 adapter->rss_indir_tbl[j] =
3933 (j * num_rx_queues) / IGB_RETA_SIZE;
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003934 adapter->rss_indir_tbl_init = num_rx_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003935 }
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003936 igb_write_rss_indir_tbl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003937
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003938 /* Disable raw packet checksumming so that RSS hash is placed in
Alexander Duyck06cf2662009-10-27 15:53:25 +00003939 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3940 * offloads as they are enabled by default
3941 */
3942 rxcsum = rd32(E1000_RXCSUM);
3943 rxcsum |= E1000_RXCSUM_PCSD;
3944
3945 if (adapter->hw.mac.type >= e1000_82576)
3946 /* Enable Receive Checksum Offload for SCTP */
3947 rxcsum |= E1000_RXCSUM_CRCOFL;
3948
3949 /* Don't need to set TUOFL or IPOFL, they default to 1 */
3950 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003951
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003952 /* Generate RSS hash based on packet types, TCP/UDP
3953 * port numbers and/or IPv4/v6 src and dst addresses
3954 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003955 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3956 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3957 E1000_MRQC_RSS_FIELD_IPV6 |
3958 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3959 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003960
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003961 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3962 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3963 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3964 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3965
Alexander Duyck06cf2662009-10-27 15:53:25 +00003966 /* If VMDq is enabled then we set the appropriate mode for that, else
3967 * we default to RSS so that an RSS hash is calculated per packet even
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003968 * if we are only using one queue
3969 */
Alexander Duyck06cf2662009-10-27 15:53:25 +00003970 if (adapter->vfs_allocated_count) {
3971 if (hw->mac.type > e1000_82575) {
3972 /* Set the default pool for the PF's first queue */
3973 u32 vtctl = rd32(E1000_VT_CTL);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003974
Alexander Duyck06cf2662009-10-27 15:53:25 +00003975 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3976 E1000_VT_CTL_DISABLE_DEF_POOL);
3977 vtctl |= adapter->vfs_allocated_count <<
3978 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3979 wr32(E1000_VT_CTL, vtctl);
3980 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003981 if (adapter->rss_queues > 1)
Todd Fujinakac883de92016-01-11 09:34:50 -08003982 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003983 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003984 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003985 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003986 if (hw->mac.type != e1000_i211)
Todd Fujinakac883de92016-01-11 09:34:50 -08003987 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003988 }
3989 igb_vmm_control(adapter);
3990
Alexander Duyck06cf2662009-10-27 15:53:25 +00003991 wr32(E1000_MRQC, mrqc);
3992}
3993
3994/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003995 * igb_setup_rctl - configure the receive control registers
3996 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003997 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003998void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003999{
4000 struct e1000_hw *hw = &adapter->hw;
4001 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08004002
4003 rctl = rd32(E1000_RCTL);
4004
4005 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08004006 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004007
Alexander Duyck69d728b2008-11-25 01:04:03 -08004008 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00004009 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08004010
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004011 /* enable stripping of CRC. It's unlikely this will break BMC
Auke Kok87cb7e82008-07-08 15:08:29 -07004012 * redirection as it did with e1000. Newer features require
4013 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004014 */
Auke Kok87cb7e82008-07-08 15:08:29 -07004015 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004016
Alexander Duyck559e9c42009-10-27 23:52:50 +00004017 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08004018 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08004019
Alexander Duyck45693bc2016-01-06 23:10:39 -08004020 /* enable LPE to allow for reception of jumbo frames */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004021 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08004022
Alexander Duyck952f72a2009-10-27 15:51:07 +00004023 /* disable queue 0 to prevent tail write w/o re-config */
4024 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004025
Alexander Duycke1739522009-02-19 20:39:44 -08004026 /* Attention!!! For SR-IOV PF driver operations you must enable
4027 * queue drop for all VF and PF queues to prevent head of line blocking
4028 * if an un-trusted VF does not provide descriptors to hardware.
4029 */
4030 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08004031 /* set all queue drop enable bits */
4032 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08004033 }
4034
Ben Greear89eaefb2012-03-06 09:41:58 +00004035 /* This is useful for sniffing bad packets. */
4036 if (adapter->netdev->features & NETIF_F_RXALL) {
4037 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004038 * in e1000e_set_rx_mode
4039 */
Ben Greear89eaefb2012-03-06 09:41:58 +00004040 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4041 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4042 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4043
Alexander Duyck16903ca2016-01-06 23:11:18 -08004044 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
Ben Greear89eaefb2012-03-06 09:41:58 +00004045 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4046 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4047 * and that breaks VLANs.
4048 */
4049 }
4050
Auke Kok9d5c8242008-01-24 02:22:38 -08004051 wr32(E1000_RCTL, rctl);
4052}
4053
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004054static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004055 int vfn)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004056{
4057 struct e1000_hw *hw = &adapter->hw;
4058 u32 vmolr;
4059
Alexander Duyckd3836f82016-01-06 23:10:47 -08004060 if (size > MAX_JUMBO_FRAME_SIZE)
4061 size = MAX_JUMBO_FRAME_SIZE;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004062
4063 vmolr = rd32(E1000_VMOLR(vfn));
4064 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4065 vmolr |= size | E1000_VMOLR_LPE;
4066 wr32(E1000_VMOLR(vfn), vmolr);
4067
4068 return 0;
4069}
4070
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004071static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4072 int vfn, bool enable)
Alexander Duycke1739522009-02-19 20:39:44 -08004073{
Alexander Duycke1739522009-02-19 20:39:44 -08004074 struct e1000_hw *hw = &adapter->hw;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004075 u32 val, reg;
Alexander Duycke1739522009-02-19 20:39:44 -08004076
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004077 if (hw->mac.type < e1000_82576)
4078 return;
Alexander Duycke1739522009-02-19 20:39:44 -08004079
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004080 if (hw->mac.type == e1000_i350)
4081 reg = E1000_DVMOLR(vfn);
4082 else
4083 reg = E1000_VMOLR(vfn);
4084
4085 val = rd32(reg);
4086 if (enable)
4087 val |= E1000_VMOLR_STRVLAN;
4088 else
4089 val &= ~(E1000_VMOLR_STRVLAN);
4090 wr32(reg, val);
Alexander Duycke1739522009-02-19 20:39:44 -08004091}
4092
Williams, Mitch A8151d292010-02-10 01:44:24 +00004093static inline void igb_set_vmolr(struct igb_adapter *adapter,
4094 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004095{
4096 struct e1000_hw *hw = &adapter->hw;
4097 u32 vmolr;
4098
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004099 /* This register exists only on 82576 and newer so if we are older then
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004100 * we should exit and do nothing
4101 */
4102 if (hw->mac.type < e1000_82576)
4103 return;
4104
4105 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00004106 if (aupe)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004107 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004108 else
4109 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004110
4111 /* clear all bits that might not be set */
4112 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4113
Alexander Duycka99955f2009-11-12 18:37:19 +00004114 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004115 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004116 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004117 * multicast packets
4118 */
4119 if (vfn <= adapter->vfs_allocated_count)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004120 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004121
4122 wr32(E1000_VMOLR(vfn), vmolr);
4123}
4124
Alexander Duycke1739522009-02-19 20:39:44 -08004125/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004126 * igb_configure_rx_ring - Configure a receive ring after Reset
4127 * @adapter: board private structure
4128 * @ring: receive ring to be configured
Alexander Duyck85b430b2009-10-27 15:50:29 +00004129 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004130 * Configure the Rx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00004131 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00004132void igb_configure_rx_ring(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004133 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00004134{
4135 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck7ec01162017-02-06 18:25:41 -08004136 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004137 u64 rdba = ring->dma;
4138 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00004139 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004140
4141 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00004142 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00004143
4144 /* Set DMA base address registers */
4145 wr32(E1000_RDBAL(reg_idx),
4146 rdba & 0x00000000ffffffffULL);
4147 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4148 wr32(E1000_RDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004149 ring->count * sizeof(union e1000_adv_rx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00004150
4151 /* initialize head and tail */
Cao jin629823b2016-11-08 15:06:20 +08004152 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00004153 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00004154 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00004155
Alexander Duyck952f72a2009-10-27 15:51:07 +00004156 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00004157 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck8649aae2017-02-06 18:27:03 -08004158 if (ring_uses_large_buffer(ring))
4159 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4160 else
4161 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00004162 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
Alexander Duyck06218a82011-08-26 07:46:55 +00004163 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00004164 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00004165 /* Only set Drop Enable if we are supporting multiple queues */
4166 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4167 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00004168
4169 wr32(E1000_SRRCTL(reg_idx), srrctl);
4170
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004171 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004172 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004173
Alexander Duyck85b430b2009-10-27 15:50:29 +00004174 rxdctl |= IGB_RX_PTHRESH;
4175 rxdctl |= IGB_RX_HTHRESH << 8;
4176 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00004177
Alexander Duyckd2bead52017-02-06 18:25:50 -08004178 /* initialize rx_buffer_info */
4179 memset(ring->rx_buffer_info, 0,
4180 sizeof(struct igb_rx_buffer) * ring->count);
4181
Alexander Duyck7ec01162017-02-06 18:25:41 -08004182 /* initialize Rx descriptor 0 */
4183 rx_desc = IGB_RX_DESC(ring, 0);
4184 rx_desc->wb.upper.length = 0;
4185
Alexander Duycka74420e2011-08-26 07:43:27 +00004186 /* enable receive descriptor fetching */
4187 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004188 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4189}
4190
Alexander Duyck8649aae2017-02-06 18:27:03 -08004191static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4192 struct igb_ring *rx_ring)
4193{
4194 /* set build_skb and buffer size flags */
Alexander Duycke3cdf682017-02-06 18:27:14 -08004195 clear_ring_build_skb_enabled(rx_ring);
Alexander Duyck8649aae2017-02-06 18:27:03 -08004196 clear_ring_uses_large_buffer(rx_ring);
4197
4198 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4199 return;
4200
Alexander Duycke3cdf682017-02-06 18:27:14 -08004201 set_ring_build_skb_enabled(rx_ring);
4202
Alexander Duyck8649aae2017-02-06 18:27:03 -08004203#if (PAGE_SIZE < 8192)
4204 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4205 return;
4206
4207 set_ring_uses_large_buffer(rx_ring);
4208#endif
4209}
4210
Alexander Duyck85b430b2009-10-27 15:50:29 +00004211/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004212 * igb_configure_rx - Configure receive Unit after Reset
4213 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004214 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004215 * Configure the Rx unit of the MAC after a reset.
Auke Kok9d5c8242008-01-24 02:22:38 -08004216 **/
4217static void igb_configure_rx(struct igb_adapter *adapter)
4218{
Hannes Eder91075842009-02-18 19:36:04 -08004219 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004220
Alexander Duyck26ad9172009-10-05 06:32:49 +00004221 /* set the correct pool for the PF default MAC address in entry 0 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004222 igb_set_default_mac_filter(adapter);
Alexander Duyck26ad9172009-10-05 06:32:49 +00004223
Alexander Duyck06cf2662009-10-27 15:53:25 +00004224 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004225 * the Base and Length of the Rx Descriptor Ring
4226 */
Alexander Duyck8649aae2017-02-06 18:27:03 -08004227 for (i = 0; i < adapter->num_rx_queues; i++) {
4228 struct igb_ring *rx_ring = adapter->rx_ring[i];
4229
4230 igb_set_rx_buffer_len(adapter, rx_ring);
4231 igb_configure_rx_ring(adapter, rx_ring);
4232 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004233}
4234
4235/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004236 * igb_free_tx_resources - Free Tx Resources per Queue
4237 * @tx_ring: Tx descriptor ring for a specific queue
Auke Kok9d5c8242008-01-24 02:22:38 -08004238 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004239 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004240 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08004241void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004242{
Mitch Williams3b644cf2008-06-27 10:59:48 -07004243 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004244
Alexander Duyck06034642011-08-26 07:44:22 +00004245 vfree(tx_ring->tx_buffer_info);
4246 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08004247
Alexander Duyck439705e2009-10-27 23:49:20 +00004248 /* if not set, then don't free */
4249 if (!tx_ring->desc)
4250 return;
4251
Alexander Duyck59d71982010-04-27 13:09:25 +00004252 dma_free_coherent(tx_ring->dev, tx_ring->size,
4253 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08004254
4255 tx_ring->desc = NULL;
4256}
4257
4258/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004259 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4260 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004261 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004262 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004263 **/
4264static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4265{
4266 int i;
4267
4268 for (i = 0; i < adapter->num_tx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004269 if (adapter->tx_ring[i])
4270 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004271}
4272
Auke Kok9d5c8242008-01-24 02:22:38 -08004273/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004274 * igb_clean_tx_ring - Free Tx Buffers
4275 * @tx_ring: ring to be cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08004276 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07004277static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004278{
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004279 u16 i = tx_ring->next_to_clean;
4280 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004281
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004282 while (i != tx_ring->next_to_use) {
4283 union e1000_adv_tx_desc *eop_desc, *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004284
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004285 /* Free all the Tx ring sk_buffs */
4286 dev_kfree_skb_any(tx_buffer->skb);
4287
4288 /* unmap skb header data */
4289 dma_unmap_single(tx_ring->dev,
4290 dma_unmap_addr(tx_buffer, dma),
4291 dma_unmap_len(tx_buffer, len),
4292 DMA_TO_DEVICE);
4293
4294 /* check for eop_desc to determine the end of the packet */
4295 eop_desc = tx_buffer->next_to_watch;
4296 tx_desc = IGB_TX_DESC(tx_ring, i);
4297
4298 /* unmap remaining buffers */
4299 while (tx_desc != eop_desc) {
4300 tx_buffer++;
4301 tx_desc++;
4302 i++;
4303 if (unlikely(i == tx_ring->count)) {
4304 i = 0;
4305 tx_buffer = tx_ring->tx_buffer_info;
4306 tx_desc = IGB_TX_DESC(tx_ring, 0);
4307 }
4308
4309 /* unmap any remaining paged data */
4310 if (dma_unmap_len(tx_buffer, len))
4311 dma_unmap_page(tx_ring->dev,
4312 dma_unmap_addr(tx_buffer, dma),
4313 dma_unmap_len(tx_buffer, len),
4314 DMA_TO_DEVICE);
4315 }
4316
4317 /* move us one more past the eop_desc for start of next pkt */
4318 tx_buffer++;
4319 i++;
4320 if (unlikely(i == tx_ring->count)) {
4321 i = 0;
4322 tx_buffer = tx_ring->tx_buffer_info;
4323 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004324 }
4325
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004326 /* reset BQL for queue */
John Fastabenddad8a3b2012-04-23 12:22:39 +00004327 netdev_tx_reset_queue(txring_txq(tx_ring));
4328
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004329 /* reset next_to_use and next_to_clean */
Auke Kok9d5c8242008-01-24 02:22:38 -08004330 tx_ring->next_to_use = 0;
4331 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004332}
4333
4334/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004335 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
4336 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004337 **/
4338static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4339{
4340 int i;
4341
4342 for (i = 0; i < adapter->num_tx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004343 if (adapter->tx_ring[i])
4344 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004345}
4346
4347/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004348 * igb_free_rx_resources - Free Rx Resources
4349 * @rx_ring: ring to clean the resources from
Auke Kok9d5c8242008-01-24 02:22:38 -08004350 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004351 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004352 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08004353void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004354{
Mitch Williams3b644cf2008-06-27 10:59:48 -07004355 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004356
Alexander Duyck06034642011-08-26 07:44:22 +00004357 vfree(rx_ring->rx_buffer_info);
4358 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08004359
Alexander Duyck439705e2009-10-27 23:49:20 +00004360 /* if not set, then don't free */
4361 if (!rx_ring->desc)
4362 return;
4363
Alexander Duyck59d71982010-04-27 13:09:25 +00004364 dma_free_coherent(rx_ring->dev, rx_ring->size,
4365 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08004366
4367 rx_ring->desc = NULL;
4368}
4369
4370/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004371 * igb_free_all_rx_resources - Free Rx Resources for All Queues
4372 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004373 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004374 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004375 **/
4376static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4377{
4378 int i;
4379
4380 for (i = 0; i < adapter->num_rx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004381 if (adapter->rx_ring[i])
4382 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004383}
4384
4385/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004386 * igb_clean_rx_ring - Free Rx Buffers per Queue
4387 * @rx_ring: ring to free buffers from
Auke Kok9d5c8242008-01-24 02:22:38 -08004388 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07004389static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004390{
Alexander Duyckd2bead52017-02-06 18:25:50 -08004391 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08004392
Alexander Duyck1a1c2252012-09-25 00:30:52 +00004393 if (rx_ring->skb)
4394 dev_kfree_skb(rx_ring->skb);
4395 rx_ring->skb = NULL;
4396
Auke Kok9d5c8242008-01-24 02:22:38 -08004397 /* Free all the Rx ring sk_buffs */
Alexander Duyckd2bead52017-02-06 18:25:50 -08004398 while (i != rx_ring->next_to_alloc) {
Alexander Duyck06034642011-08-26 07:44:22 +00004399 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004400
Alexander Duyck5be59552016-12-14 15:05:30 -08004401 /* Invalidate cache lines that may have been written to by
4402 * device so that we avoid corrupting memory.
4403 */
4404 dma_sync_single_range_for_cpu(rx_ring->dev,
4405 buffer_info->dma,
4406 buffer_info->page_offset,
Alexander Duyck8649aae2017-02-06 18:27:03 -08004407 igb_rx_bufsz(rx_ring),
Alexander Duyck5be59552016-12-14 15:05:30 -08004408 DMA_FROM_DEVICE);
4409
4410 /* free resources associated with mapping */
4411 dma_unmap_page_attrs(rx_ring->dev,
4412 buffer_info->dma,
Alexander Duyck8649aae2017-02-06 18:27:03 -08004413 igb_rx_pg_size(rx_ring),
Alexander Duyck5be59552016-12-14 15:05:30 -08004414 DMA_FROM_DEVICE,
Alexander Duyck7bd17592017-02-06 18:25:26 -08004415 IGB_RX_DMA_ATTR);
Alexander Duyck2976db82017-01-10 16:58:09 -08004416 __page_frag_cache_drain(buffer_info->page,
4417 buffer_info->pagecnt_bias);
Alexander Duyckcbc8e552012-09-25 00:31:02 +00004418
Alexander Duyckd2bead52017-02-06 18:25:50 -08004419 i++;
4420 if (i == rx_ring->count)
4421 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004422 }
4423
Alexander Duyckcbc8e552012-09-25 00:31:02 +00004424 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004425 rx_ring->next_to_clean = 0;
4426 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004427}
4428
4429/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004430 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
4431 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004432 **/
4433static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4434{
4435 int i;
4436
4437 for (i = 0; i < adapter->num_rx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004438 if (adapter->rx_ring[i])
4439 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004440}
4441
4442/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004443 * igb_set_mac - Change the Ethernet Address of the NIC
4444 * @netdev: network interface device structure
4445 * @p: pointer to an address structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004446 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004447 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08004448 **/
4449static int igb_set_mac(struct net_device *netdev, void *p)
4450{
4451 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00004452 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004453 struct sockaddr *addr = p;
4454
4455 if (!is_valid_ether_addr(addr->sa_data))
4456 return -EADDRNOTAVAIL;
4457
4458 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00004459 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08004460
Alexander Duyck26ad9172009-10-05 06:32:49 +00004461 /* set the correct pool for the new PF MAC address in entry 0 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004462 igb_set_default_mac_filter(adapter);
Alexander Duycke1739522009-02-19 20:39:44 -08004463
Auke Kok9d5c8242008-01-24 02:22:38 -08004464 return 0;
4465}
4466
4467/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004468 * igb_write_mc_addr_list - write multicast addresses to MTA
4469 * @netdev: network interface device structure
Alexander Duyck68d480c2009-10-05 06:33:08 +00004470 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004471 * Writes multicast address list to the MTA hash table.
4472 * Returns: -ENOMEM on failure
4473 * 0 on no addresses written
4474 * X on writing X addresses to MTA
Alexander Duyck68d480c2009-10-05 06:33:08 +00004475 **/
4476static int igb_write_mc_addr_list(struct net_device *netdev)
4477{
4478 struct igb_adapter *adapter = netdev_priv(netdev);
4479 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004480 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004481 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004482 int i;
4483
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004484 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00004485 /* nothing to program, so clear mc list */
4486 igb_update_mc_addr_list(hw, NULL, 0);
4487 igb_restore_vf_multicasts(adapter);
4488 return 0;
4489 }
4490
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004491 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004492 if (!mta_list)
4493 return -ENOMEM;
4494
Alexander Duyck68d480c2009-10-05 06:33:08 +00004495 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00004496 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004497 netdev_for_each_mc_addr(ha, netdev)
4498 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004499
Alexander Duyck68d480c2009-10-05 06:33:08 +00004500 igb_update_mc_addr_list(hw, mta_list, i);
4501 kfree(mta_list);
4502
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004503 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004504}
4505
Alexander Duyck16903ca2016-01-06 23:11:18 -08004506static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4507{
4508 struct e1000_hw *hw = &adapter->hw;
4509 u32 i, pf_id;
4510
4511 switch (hw->mac.type) {
4512 case e1000_i210:
4513 case e1000_i211:
4514 case e1000_i350:
4515 /* VLAN filtering needed for VLAN prio filter */
4516 if (adapter->netdev->features & NETIF_F_NTUPLE)
4517 break;
4518 /* fall through */
4519 case e1000_82576:
4520 case e1000_82580:
4521 case e1000_i354:
4522 /* VLAN filtering needed for pool filtering */
4523 if (adapter->vfs_allocated_count)
4524 break;
4525 /* fall through */
4526 default:
4527 return 1;
4528 }
4529
4530 /* We are already in VLAN promisc, nothing to do */
4531 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4532 return 0;
4533
4534 if (!adapter->vfs_allocated_count)
4535 goto set_vfta;
4536
4537 /* Add PF to all active pools */
4538 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4539
4540 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4541 u32 vlvf = rd32(E1000_VLVF(i));
4542
Jacob Kellera51d8c22016-04-13 16:08:28 -07004543 vlvf |= BIT(pf_id);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004544 wr32(E1000_VLVF(i), vlvf);
4545 }
4546
4547set_vfta:
4548 /* Set all bits in the VLAN filter table array */
4549 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4550 hw->mac.ops.write_vfta(hw, i, ~0U);
4551
4552 /* Set flag so we don't redo unnecessary work */
4553 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4554
4555 return 0;
4556}
4557
4558#define VFTA_BLOCK_SIZE 8
4559static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4560{
4561 struct e1000_hw *hw = &adapter->hw;
4562 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4563 u32 vid_start = vfta_offset * 32;
4564 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4565 u32 i, vid, word, bits, pf_id;
4566
4567 /* guarantee that we don't scrub out management VLAN */
4568 vid = adapter->mng_vlan_id;
4569 if (vid >= vid_start && vid < vid_end)
Jacob Kellera51d8c22016-04-13 16:08:28 -07004570 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004571
4572 if (!adapter->vfs_allocated_count)
4573 goto set_vfta;
4574
4575 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4576
4577 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4578 u32 vlvf = rd32(E1000_VLVF(i));
4579
4580 /* pull VLAN ID from VLVF */
4581 vid = vlvf & VLAN_VID_MASK;
4582
4583 /* only concern ourselves with a certain range */
4584 if (vid < vid_start || vid >= vid_end)
4585 continue;
4586
4587 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4588 /* record VLAN ID in VFTA */
Jacob Kellera51d8c22016-04-13 16:08:28 -07004589 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004590
4591 /* if PF is part of this then continue */
4592 if (test_bit(vid, adapter->active_vlans))
4593 continue;
4594 }
4595
4596 /* remove PF from the pool */
Jacob Kellera51d8c22016-04-13 16:08:28 -07004597 bits = ~BIT(pf_id);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004598 bits &= rd32(E1000_VLVF(i));
4599 wr32(E1000_VLVF(i), bits);
4600 }
4601
4602set_vfta:
4603 /* extract values from active_vlans and write back to VFTA */
4604 for (i = VFTA_BLOCK_SIZE; i--;) {
4605 vid = (vfta_offset + i) * 32;
4606 word = vid / BITS_PER_LONG;
4607 bits = vid % BITS_PER_LONG;
4608
4609 vfta[i] |= adapter->active_vlans[word] >> bits;
4610
4611 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4612 }
4613}
4614
4615static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4616{
4617 u32 i;
4618
4619 /* We are not in VLAN promisc, nothing to do */
4620 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4621 return;
4622
4623 /* Set flag so we don't redo unnecessary work */
4624 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4625
4626 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4627 igb_scrub_vfta(adapter, i);
4628}
4629
Alexander Duyck68d480c2009-10-05 06:33:08 +00004630/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004631 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4632 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004633 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004634 * The set_rx_mode entry point is called whenever the unicast or multicast
4635 * address lists or the network interface flags are updated. This routine is
4636 * responsible for configuring the hardware for proper unicast, multicast,
4637 * promiscuous mode, and all-multi behavior.
Auke Kok9d5c8242008-01-24 02:22:38 -08004638 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004639static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004640{
4641 struct igb_adapter *adapter = netdev_priv(netdev);
4642 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004643 unsigned int vfn = adapter->vfs_allocated_count;
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004644 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004645 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08004646
4647 /* Check for Promiscuous and All Multicast modes */
Patrick McHardy746b9f02008-07-16 20:15:45 -07004648 if (netdev->flags & IFF_PROMISC) {
Alexander Duyck16903ca2016-01-06 23:11:18 -08004649 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
Alexander Duyckbf456ab2016-01-06 23:11:43 -08004650 vmolr |= E1000_VMOLR_MPME;
4651
4652 /* enable use of UTA filter to force packets to default pool */
4653 if (hw->mac.type == e1000_82576)
4654 vmolr |= E1000_VMOLR_ROPE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07004655 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00004656 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07004657 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004658 vmolr |= E1000_VMOLR_MPME;
4659 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004660 /* Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004661 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00004662 * that we can at least receive multicast traffic
4663 */
4664 count = igb_write_mc_addr_list(netdev);
4665 if (count < 0) {
4666 rctl |= E1000_RCTL_MPE;
4667 vmolr |= E1000_VMOLR_MPME;
4668 } else if (count) {
4669 vmolr |= E1000_VMOLR_ROMPE;
4670 }
4671 }
Patrick McHardy746b9f02008-07-16 20:15:45 -07004672 }
Alexander Duyck268f9d32016-01-06 23:11:34 -08004673
4674 /* Write addresses to available RAR registers, if there is not
4675 * sufficient space to store all the addresses then enable
4676 * unicast promiscuous mode
4677 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004678 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
Alexander Duyck268f9d32016-01-06 23:11:34 -08004679 rctl |= E1000_RCTL_UPE;
4680 vmolr |= E1000_VMOLR_ROPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08004681 }
Alexander Duyck16903ca2016-01-06 23:11:18 -08004682
4683 /* enable VLAN filtering by default */
4684 rctl |= E1000_RCTL_VFE;
4685
4686 /* disable VLAN filtering for modes that require it */
4687 if ((netdev->flags & IFF_PROMISC) ||
4688 (netdev->features & NETIF_F_RXALL)) {
4689 /* if we fail to set all rules then just clear VFE */
4690 if (igb_vlan_promisc_enable(adapter))
4691 rctl &= ~E1000_RCTL_VFE;
4692 } else {
4693 igb_vlan_promisc_disable(adapter);
4694 }
4695
4696 /* update state of unicast, multicast, and VLAN filtering modes */
4697 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
4698 E1000_RCTL_VFE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004699 wr32(E1000_RCTL, rctl);
4700
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004701#if (PAGE_SIZE < 8192)
4702 if (!adapter->vfs_allocated_count) {
4703 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4704 rlpml = IGB_MAX_FRAME_BUILD_SKB;
4705 }
4706#endif
4707 wr32(E1000_RLPML, rlpml);
4708
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004709 /* In order to support SR-IOV and eventually VMDq it is necessary to set
Alexander Duyck68d480c2009-10-05 06:33:08 +00004710 * the VMOLR to enable the appropriate modes. Without this workaround
4711 * we will have issues with VLAN tag stripping not being done for frames
4712 * that are only arriving because we are the default pool
4713 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00004714 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004715 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004716
Alexander Duyckbf456ab2016-01-06 23:11:43 -08004717 /* set UTA to appropriate mode */
4718 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
4719
Alexander Duyck68d480c2009-10-05 06:33:08 +00004720 vmolr |= rd32(E1000_VMOLR(vfn)) &
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004721 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
Alexander Duyck45693bc2016-01-06 23:10:39 -08004722
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004723 /* enable Rx jumbo frames, restrict as needed to support build_skb */
Alexander Duyck45693bc2016-01-06 23:10:39 -08004724 vmolr &= ~E1000_VMOLR_RLPML_MASK;
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004725#if (PAGE_SIZE < 8192)
4726 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4727 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
4728 else
4729#endif
4730 vmolr |= MAX_JUMBO_FRAME_SIZE;
4731 vmolr |= E1000_VMOLR_LPE;
Alexander Duyck45693bc2016-01-06 23:10:39 -08004732
Alexander Duyck68d480c2009-10-05 06:33:08 +00004733 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck45693bc2016-01-06 23:10:39 -08004734
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004735 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004736}
4737
Greg Rose13800462010-11-06 02:08:26 +00004738static void igb_check_wvbr(struct igb_adapter *adapter)
4739{
4740 struct e1000_hw *hw = &adapter->hw;
4741 u32 wvbr = 0;
4742
4743 switch (hw->mac.type) {
4744 case e1000_82576:
4745 case e1000_i350:
Carolyn Wyborny81ad8072014-04-11 01:46:13 +00004746 wvbr = rd32(E1000_WVBR);
4747 if (!wvbr)
Greg Rose13800462010-11-06 02:08:26 +00004748 return;
4749 break;
4750 default:
4751 break;
4752 }
4753
4754 adapter->wvbr |= wvbr;
4755}
4756
4757#define IGB_STAGGERED_QUEUE_OFFSET 8
4758
4759static void igb_spoof_check(struct igb_adapter *adapter)
4760{
4761 int j;
4762
4763 if (!adapter->wvbr)
4764 return;
4765
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004766 for (j = 0; j < adapter->vfs_allocated_count; j++) {
Jacob Kellera51d8c22016-04-13 16:08:28 -07004767 if (adapter->wvbr & BIT(j) ||
4768 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
Greg Rose13800462010-11-06 02:08:26 +00004769 dev_warn(&adapter->pdev->dev,
4770 "Spoof event(s) detected on VF %d\n", j);
4771 adapter->wvbr &=
Jacob Kellera51d8c22016-04-13 16:08:28 -07004772 ~(BIT(j) |
4773 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
Greg Rose13800462010-11-06 02:08:26 +00004774 }
4775 }
4776}
4777
Auke Kok9d5c8242008-01-24 02:22:38 -08004778/* Need to wait a few seconds after link up to get diagnostic information from
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004779 * the phy
4780 */
Kees Cook26566ea2017-10-16 17:29:35 -07004781static void igb_update_phy_info(struct timer_list *t)
Auke Kok9d5c8242008-01-24 02:22:38 -08004782{
Kees Cook26566ea2017-10-16 17:29:35 -07004783 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08004784 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08004785}
4786
4787/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004788 * igb_has_link - check shared code for link and determine up/down
4789 * @adapter: pointer to driver private info
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004790 **/
Nick Nunley31455352010-02-17 01:01:21 +00004791bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004792{
4793 struct e1000_hw *hw = &adapter->hw;
4794 bool link_active = false;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004795
4796 /* get_link_status is set on LSC (link status) interrupt or
4797 * rx sequence error interrupt. get_link_status will stay
4798 * false until the e1000_check_for_link establishes link
4799 * for copper adapters ONLY
4800 */
4801 switch (hw->phy.media_type) {
4802 case e1000_media_type_copper:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00004803 if (!hw->mac.get_link_status)
4804 return true;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004805 case e1000_media_type_internal_serdes:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00004806 hw->mac.ops.check_for_link(hw);
4807 link_active = !hw->mac.get_link_status;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004808 break;
4809 default:
4810 case e1000_media_type_unknown:
4811 break;
4812 }
4813
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00004814 if (((hw->mac.type == e1000_i210) ||
4815 (hw->mac.type == e1000_i211)) &&
4816 (hw->phy.id == I210_I_PHY_ID)) {
4817 if (!netif_carrier_ok(adapter->netdev)) {
4818 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4819 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
4820 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
4821 adapter->link_check_timeout = jiffies;
4822 }
4823 }
4824
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004825 return link_active;
4826}
4827
Stefan Assmann563988d2011-04-05 04:27:15 +00004828static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
4829{
4830 bool ret = false;
4831 u32 ctrl_ext, thstat;
4832
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00004833 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00004834 if (hw->mac.type == e1000_i350) {
4835 thstat = rd32(E1000_THSTAT);
4836 ctrl_ext = rd32(E1000_CTRL_EXT);
4837
4838 if ((hw->phy.media_type == e1000_media_type_copper) &&
Akeem G. Abodunrin5c17a202013-01-29 10:15:31 +00004839 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
Stefan Assmann563988d2011-04-05 04:27:15 +00004840 ret = !!(thstat & event);
Stefan Assmann563988d2011-04-05 04:27:15 +00004841 }
4842
4843 return ret;
4844}
4845
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004846/**
Carolyn Wyborny1516f0a2014-07-09 04:55:45 +00004847 * igb_check_lvmmc - check for malformed packets received
4848 * and indicated in LVMMC register
4849 * @adapter: pointer to adapter
4850 **/
4851static void igb_check_lvmmc(struct igb_adapter *adapter)
4852{
4853 struct e1000_hw *hw = &adapter->hw;
4854 u32 lvmmc;
4855
4856 lvmmc = rd32(E1000_LVMMC);
4857 if (lvmmc) {
4858 if (unlikely(net_ratelimit())) {
4859 netdev_warn(adapter->netdev,
4860 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
4861 lvmmc);
4862 }
4863 }
4864}
4865
4866/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004867 * igb_watchdog - Timer Call-back
4868 * @data: pointer to adapter cast into an unsigned long
Auke Kok9d5c8242008-01-24 02:22:38 -08004869 **/
Kees Cook26566ea2017-10-16 17:29:35 -07004870static void igb_watchdog(struct timer_list *t)
Auke Kok9d5c8242008-01-24 02:22:38 -08004871{
Kees Cook26566ea2017-10-16 17:29:35 -07004872 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
Auke Kok9d5c8242008-01-24 02:22:38 -08004873 /* Do the rest outside of interrupt context */
4874 schedule_work(&adapter->watchdog_task);
4875}
4876
4877static void igb_watchdog_task(struct work_struct *work)
4878{
4879 struct igb_adapter *adapter = container_of(work,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004880 struct igb_adapter,
4881 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08004882 struct e1000_hw *hw = &adapter->hw;
Koki Sanagic0ba4772013-01-16 11:05:53 +00004883 struct e1000_phy_info *phy = &hw->phy;
Auke Kok9d5c8242008-01-24 02:22:38 -08004884 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00004885 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07004886 int i;
Carolyn Wyborny56cec242013-10-17 05:36:26 +00004887 u32 connsw;
Takuma Uebab72f3f72015-12-31 14:58:14 +09004888 u16 phy_data, retry_count = 20;
Auke Kok9d5c8242008-01-24 02:22:38 -08004889
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004890 link = igb_has_link(adapter);
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00004891
4892 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
4893 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4894 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4895 else
4896 link = false;
4897 }
4898
Carolyn Wyborny56cec242013-10-17 05:36:26 +00004899 /* Force link down if we have fiber to swap to */
4900 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4901 if (hw->phy.media_type == e1000_media_type_copper) {
4902 connsw = rd32(E1000_CONNSW);
4903 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
4904 link = 0;
4905 }
4906 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004907 if (link) {
Carolyn Wyborny2bdfc4e2013-10-17 05:23:01 +00004908 /* Perform a reset if the media type changed. */
4909 if (hw->dev_spec._82575.media_changed) {
4910 hw->dev_spec._82575.media_changed = false;
4911 adapter->flags |= IGB_FLAG_MEDIA_RESET;
4912 igb_reset(adapter);
4913 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00004914 /* Cancel scheduled suspend requests. */
4915 pm_runtime_resume(netdev->dev.parent);
4916
Auke Kok9d5c8242008-01-24 02:22:38 -08004917 if (!netif_carrier_ok(netdev)) {
4918 u32 ctrl;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004919
Alexander Duyck330a6d62009-10-27 23:51:35 +00004920 hw->mac.ops.get_speed_and_duplex(hw,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004921 &adapter->link_speed,
4922 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08004923
4924 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08004925 /* Links status message must follow this format */
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00004926 netdev_info(netdev,
4927 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00004928 netdev->name,
4929 adapter->link_speed,
4930 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00004931 "Full" : "Half",
4932 (ctrl & E1000_CTRL_TFCE) &&
4933 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
4934 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4935 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08004936
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00004937 /* disable EEE if enabled */
4938 if ((adapter->flags & IGB_FLAG_EEE) &&
4939 (adapter->link_duplex == HALF_DUPLEX)) {
4940 dev_info(&adapter->pdev->dev,
4941 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4942 adapter->hw.dev_spec._82575.eee_disable = true;
4943 adapter->flags &= ~IGB_FLAG_EEE;
4944 }
4945
Koki Sanagic0ba4772013-01-16 11:05:53 +00004946 /* check if SmartSpeed worked */
4947 igb_check_downshift(hw);
4948 if (phy->speed_downgraded)
4949 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4950
Stefan Assmann563988d2011-04-05 04:27:15 +00004951 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00004952 if (igb_thermal_sensor_event(hw,
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00004953 E1000_THSTAT_LINK_THROTTLE))
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00004954 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
Stefan Assmann563988d2011-04-05 04:27:15 +00004955
Emil Tantilovd07f3e32010-03-23 18:34:57 +00004956 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08004957 adapter->tx_timeout_factor = 1;
4958 switch (adapter->link_speed) {
4959 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08004960 adapter->tx_timeout_factor = 14;
4961 break;
4962 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08004963 /* maybe add some timeout factor ? */
4964 break;
4965 }
4966
Takuma Uebab72f3f72015-12-31 14:58:14 +09004967 if (adapter->link_speed != SPEED_1000)
4968 goto no_wait;
4969
4970 /* wait for Remote receiver status OK */
4971retry_read_status:
4972 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
4973 &phy_data)) {
4974 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4975 retry_count) {
4976 msleep(100);
4977 retry_count--;
4978 goto retry_read_status;
4979 } else if (!retry_count) {
4980 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
4981 }
4982 } else {
4983 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
4984 }
4985no_wait:
Auke Kok9d5c8242008-01-24 02:22:38 -08004986 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08004987
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004988 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00004989 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004990
Alexander Duyck4b1a9872009-02-06 23:19:50 +00004991 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08004992 if (!test_bit(__IGB_DOWN, &adapter->state))
4993 mod_timer(&adapter->phy_info_timer,
4994 round_jiffies(jiffies + 2 * HZ));
4995 }
4996 } else {
4997 if (netif_carrier_ok(netdev)) {
4998 adapter->link_speed = 0;
4999 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00005000
5001 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00005002 if (igb_thermal_sensor_event(hw,
5003 E1000_THSTAT_PWR_DOWN)) {
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00005004 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00005005 }
Stefan Assmann563988d2011-04-05 04:27:15 +00005006
Alexander Duyck527d47c2008-11-27 00:21:39 -08005007 /* Links status message must follow this format */
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00005008 netdev_info(netdev, "igb: %s NIC Link is Down\n",
Alexander Duyck527d47c2008-11-27 00:21:39 -08005009 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08005010 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00005011
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005012 igb_ping_all_vfs(adapter);
5013
Alexander Duyck4b1a9872009-02-06 23:19:50 +00005014 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08005015 if (!test_bit(__IGB_DOWN, &adapter->state))
5016 mod_timer(&adapter->phy_info_timer,
5017 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00005018
Carolyn Wyborny56cec242013-10-17 05:36:26 +00005019 /* link is down, time to check for alternate media */
5020 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5021 igb_check_swap_media(adapter);
5022 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5023 schedule_work(&adapter->reset_task);
5024 /* return immediately */
5025 return;
5026 }
5027 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00005028 pm_schedule_suspend(netdev->dev.parent,
5029 MSEC_PER_SEC * 5);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00005030
5031 /* also check for alternate media here */
5032 } else if (!netif_carrier_ok(netdev) &&
5033 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5034 igb_check_swap_media(adapter);
5035 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5036 schedule_work(&adapter->reset_task);
5037 /* return immediately */
5038 return;
5039 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005040 }
5041 }
5042
Eric Dumazet12dcd862010-10-15 17:27:10 +00005043 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005044 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005045 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08005046
Alexander Duyckdbabb062009-11-12 18:38:16 +00005047 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005048 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00005049 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005050 /* We've lost link, so the controller stops DMA,
5051 * but we've got queued Tx work that's never going
5052 * to get done, so reset controller to flush Tx.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005053 * (Do the reset outside of interrupt context).
5054 */
Alexander Duyckdbabb062009-11-12 18:38:16 +00005055 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5056 adapter->tx_timeout_count++;
5057 schedule_work(&adapter->reset_task);
5058 /* return immediately since reset is imminent */
5059 return;
5060 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005061 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005062
Alexander Duyckdbabb062009-11-12 18:38:16 +00005063 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005064 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00005065 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005066
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005067 /* Cause software interrupt to ensure Rx ring is cleaned */
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00005068 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005069 u32 eics = 0;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00005070
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00005071 for (i = 0; i < adapter->num_q_vectors; i++)
5072 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07005073 wr32(E1000_EICS, eics);
5074 } else {
5075 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5076 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005077
Greg Rose13800462010-11-06 02:08:26 +00005078 igb_spoof_check(adapter);
Matthew Vickfc580752012-12-13 07:20:35 +00005079 igb_ptp_rx_hang(adapter);
Jacob Kellere5f36ad2017-05-03 10:29:03 -07005080 igb_ptp_tx_hang(adapter);
Greg Rose13800462010-11-06 02:08:26 +00005081
Carolyn Wyborny1516f0a2014-07-09 04:55:45 +00005082 /* Check LVMMC register on i350/i354 only */
5083 if ((adapter->hw.mac.type == e1000_i350) ||
5084 (adapter->hw.mac.type == e1000_i354))
5085 igb_check_lvmmc(adapter);
5086
Auke Kok9d5c8242008-01-24 02:22:38 -08005087 /* Reset the timer */
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00005088 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5089 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5090 mod_timer(&adapter->watchdog_timer,
5091 round_jiffies(jiffies + HZ));
5092 else
5093 mod_timer(&adapter->watchdog_timer,
5094 round_jiffies(jiffies + 2 * HZ));
5095 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005096}
5097
5098enum latency_range {
5099 lowest_latency = 0,
5100 low_latency = 1,
5101 bulk_latency = 2,
5102 latency_invalid = 255
5103};
5104
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005105/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005106 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5107 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005108 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005109 * Stores a new ITR value based on strictly on packet size. This
5110 * algorithm is less sophisticated than that used in igb_update_itr,
5111 * due to the difficulty of synchronizing statistics across multiple
5112 * receive rings. The divisors and thresholds used by this function
5113 * were determined based on theoretical maximum wire speed and testing
5114 * data, in order to minimize response time while increasing bulk
5115 * throughput.
Fernando Luis Vazquez Cao406d4962014-03-18 00:26:48 -07005116 * This functionality is controlled by ethtool's coalescing settings.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005117 * NOTE: This function is called only when operating in a multiqueue
5118 * receive environment.
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005119 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005120static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005121{
Alexander Duyck047e0032009-10-27 15:49:27 +00005122 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005123 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00005124 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005125 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005126
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005127 /* For non-gigabit speeds, just fix the interrupt rate at 4000
5128 * ints/sec - ITR timer value of 120 ticks.
5129 */
5130 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005131 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005132 goto set_itr_val;
5133 }
Alexander Duyck047e0032009-10-27 15:49:27 +00005134
Alexander Duyck0ba82992011-08-26 07:45:47 +00005135 packets = q_vector->rx.total_packets;
5136 if (packets)
5137 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005138
Alexander Duyck0ba82992011-08-26 07:45:47 +00005139 packets = q_vector->tx.total_packets;
5140 if (packets)
5141 avg_wire_size = max_t(u32, avg_wire_size,
5142 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00005143
5144 /* if avg_wire_size isn't set no work was done */
5145 if (!avg_wire_size)
5146 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005147
5148 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5149 avg_wire_size += 24;
5150
5151 /* Don't starve jumbo frames */
5152 avg_wire_size = min(avg_wire_size, 3000);
5153
5154 /* Give a little boost to mid-size frames */
5155 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5156 new_val = avg_wire_size / 3;
5157 else
5158 new_val = avg_wire_size / 2;
5159
Alexander Duyck0ba82992011-08-26 07:45:47 +00005160 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5161 if (new_val < IGB_20K_ITR &&
5162 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5163 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5164 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00005165
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005166set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00005167 if (new_val != q_vector->itr_val) {
5168 q_vector->itr_val = new_val;
5169 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005170 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005171clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005172 q_vector->rx.total_bytes = 0;
5173 q_vector->rx.total_packets = 0;
5174 q_vector->tx.total_bytes = 0;
5175 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005176}
5177
5178/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005179 * igb_update_itr - update the dynamic ITR value based on statistics
5180 * @q_vector: pointer to q_vector
5181 * @ring_container: ring info to update the itr for
5182 *
5183 * Stores a new ITR value based on packets and byte
5184 * counts during the last interrupt. The advantage of per interrupt
5185 * computation is faster updates and more accurate ITR for the current
5186 * traffic pattern. Constants in this function were computed
5187 * based on theoretical maximum wire speed and thresholds were set based
5188 * on testing data as well as attempting to minimize response time
5189 * while increasing bulk throughput.
Fernando Luis Vazquez Cao406d4962014-03-18 00:26:48 -07005190 * This functionality is controlled by ethtool's coalescing settings.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005191 * NOTE: These calculations are only valid when operating in a single-
5192 * queue environment.
Auke Kok9d5c8242008-01-24 02:22:38 -08005193 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00005194static void igb_update_itr(struct igb_q_vector *q_vector,
5195 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08005196{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005197 unsigned int packets = ring_container->total_packets;
5198 unsigned int bytes = ring_container->total_bytes;
5199 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08005200
Alexander Duyck0ba82992011-08-26 07:45:47 +00005201 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08005202 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005203 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08005204
Alexander Duyck0ba82992011-08-26 07:45:47 +00005205 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005206 case lowest_latency:
5207 /* handle TSO and jumbo frames */
5208 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005209 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005210 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005211 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005212 break;
5213 case low_latency: /* 50 usec aka 20000 ints/s */
5214 if (bytes > 10000) {
5215 /* this if handles the TSO accounting */
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005216 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005217 itrval = bulk_latency;
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005218 else if ((packets < 10) || ((bytes/packets) > 1200))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005219 itrval = bulk_latency;
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005220 else if ((packets > 35))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005221 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005222 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005223 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005224 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005225 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005226 }
5227 break;
5228 case bulk_latency: /* 250 usec aka 4000 ints/s */
5229 if (bytes > 25000) {
5230 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005231 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00005232 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005233 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005234 }
5235 break;
5236 }
5237
Alexander Duyck0ba82992011-08-26 07:45:47 +00005238 /* clear work counters since we have the values we need */
5239 ring_container->total_bytes = 0;
5240 ring_container->total_packets = 0;
5241
5242 /* write updated itr to ring container */
5243 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08005244}
5245
Alexander Duyck0ba82992011-08-26 07:45:47 +00005246static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005247{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005248 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00005249 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005250 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005251
5252 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5253 if (adapter->link_speed != SPEED_1000) {
5254 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005255 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08005256 goto set_itr_now;
5257 }
5258
Alexander Duyck0ba82992011-08-26 07:45:47 +00005259 igb_update_itr(q_vector, &q_vector->tx);
5260 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08005261
Alexander Duyck0ba82992011-08-26 07:45:47 +00005262 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08005263
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005264 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00005265 if (current_itr == lowest_latency &&
5266 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5267 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005268 current_itr = low_latency;
5269
Auke Kok9d5c8242008-01-24 02:22:38 -08005270 switch (current_itr) {
5271 /* counts and packets in update_itr are dependent on these numbers */
5272 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005273 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005274 break;
5275 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005276 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005277 break;
5278 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005279 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005280 break;
5281 default:
5282 break;
5283 }
5284
5285set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00005286 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005287 /* this attempts to bias the interrupt rate towards Bulk
5288 * by adding intermediate steps when interrupt rate is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005289 * increasing
5290 */
Alexander Duyck047e0032009-10-27 15:49:27 +00005291 new_itr = new_itr > q_vector->itr_val ?
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005292 max((new_itr * q_vector->itr_val) /
5293 (new_itr + (q_vector->itr_val >> 2)),
5294 new_itr) : new_itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08005295 /* Don't write the value here; it resets the adapter's
5296 * internal timer, and causes us to delay far longer than
5297 * we should between interrupts. Instead, we write the ITR
5298 * value at the beginning of the next interrupt so the timing
5299 * ends up being correct.
5300 */
Alexander Duyck047e0032009-10-27 15:49:27 +00005301 q_vector->itr_val = new_itr;
5302 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005303 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005304}
5305
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005306static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
5307 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005308{
5309 struct e1000_adv_tx_context_desc *context_desc;
5310 u16 i = tx_ring->next_to_use;
5311
5312 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5313
5314 i++;
5315 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5316
5317 /* set bits to identify this as an advanced context descriptor */
5318 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5319
5320 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00005321 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005322 mss_l4len_idx |= tx_ring->reg_idx << 4;
5323
5324 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5325 context_desc->seqnum_seed = 0;
5326 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5327 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5328}
5329
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005330static int igb_tso(struct igb_ring *tx_ring,
5331 struct igb_tx_buffer *first,
5332 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08005333{
Alexander Duycke10715d2016-04-14 17:19:38 -04005334 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005335 struct sk_buff *skb = first->skb;
Alexander Duycke10715d2016-04-14 17:19:38 -04005336 union {
5337 struct iphdr *v4;
5338 struct ipv6hdr *v6;
5339 unsigned char *hdr;
5340 } ip;
5341 union {
5342 struct tcphdr *tcp;
5343 unsigned char *hdr;
5344 } l4;
5345 u32 paylen, l4_offset;
Francois Romieu06c14e52014-03-30 03:14:11 +00005346 int err;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005347
Alexander Duycked6aa102012-11-13 04:03:22 +00005348 if (skb->ip_summed != CHECKSUM_PARTIAL)
5349 return 0;
5350
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005351 if (!skb_is_gso(skb))
5352 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005353
Francois Romieu06c14e52014-03-30 03:14:11 +00005354 err = skb_cow_head(skb, 0);
5355 if (err < 0)
5356 return err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005357
Alexander Duycke10715d2016-04-14 17:19:38 -04005358 ip.hdr = skb_network_header(skb);
5359 l4.hdr = skb_checksum_start(skb);
5360
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005361 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5362 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08005363
Alexander Duycke10715d2016-04-14 17:19:38 -04005364 /* initialize outer IP header fields */
5365 if (ip.v4->version == 4) {
Alexander Duyck516165a2016-11-28 10:42:23 -05005366 unsigned char *csum_start = skb_checksum_start(skb);
5367 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5368
Alexander Duycke10715d2016-04-14 17:19:38 -04005369 /* IP header will have to cancel out any data that
5370 * is not a part of the outer IP header
5371 */
Alexander Duyck516165a2016-11-28 10:42:23 -05005372 ip.v4->check = csum_fold(csum_partial(trans_start,
5373 csum_start - trans_start,
5374 0));
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005375 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duycke10715d2016-04-14 17:19:38 -04005376
5377 ip.v4->tot_len = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005378 first->tx_flags |= IGB_TX_FLAGS_TSO |
5379 IGB_TX_FLAGS_CSUM |
5380 IGB_TX_FLAGS_IPV4;
Alexander Duycke10715d2016-04-14 17:19:38 -04005381 } else {
5382 ip.v6->payload_len = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005383 first->tx_flags |= IGB_TX_FLAGS_TSO |
5384 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005385 }
5386
Alexander Duycke10715d2016-04-14 17:19:38 -04005387 /* determine offset of inner transport header */
5388 l4_offset = l4.hdr - skb->data;
5389
5390 /* compute length of segmentation header */
5391 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5392
5393 /* remove payload length from inner checksum */
5394 paylen = skb->len - l4_offset;
5395 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Auke Kok9d5c8242008-01-24 02:22:38 -08005396
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005397 /* update gso size and bytecount with header size */
5398 first->gso_segs = skb_shinfo(skb)->gso_segs;
5399 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5400
Auke Kok9d5c8242008-01-24 02:22:38 -08005401 /* MSS L4LEN IDX */
Alexander Duycke10715d2016-04-14 17:19:38 -04005402 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005403 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08005404
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005405 /* VLAN MACLEN IPLEN */
Alexander Duycke10715d2016-04-14 17:19:38 -04005406 vlan_macip_lens = l4.hdr - ip.hdr;
5407 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005408 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005409
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005410 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08005411
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005412 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005413}
5414
Alexander Duyck6e033702016-01-13 07:31:23 -08005415static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5416{
5417 unsigned int offset = 0;
5418
5419 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5420
5421 return offset == skb_checksum_start_offset(skb);
5422}
5423
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005424static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08005425{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005426 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005427 u32 vlan_macip_lens = 0;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005428 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005429
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005430 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck6e033702016-01-13 07:31:23 -08005431csum_failed:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005432 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
5433 return;
Alexander Duyck6e033702016-01-13 07:31:23 -08005434 goto no_csum;
Auke Kok9d5c8242008-01-24 02:22:38 -08005435 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005436
Alexander Duyck6e033702016-01-13 07:31:23 -08005437 switch (skb->csum_offset) {
5438 case offsetof(struct tcphdr, check):
5439 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5440 /* fall through */
5441 case offsetof(struct udphdr, check):
5442 break;
5443 case offsetof(struct sctphdr, checksum):
5444 /* validate that this is actually an SCTP request */
5445 if (((first->protocol == htons(ETH_P_IP)) &&
5446 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5447 ((first->protocol == htons(ETH_P_IPV6)) &&
5448 igb_ipv6_csum_is_sctp(skb))) {
5449 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5450 break;
5451 }
5452 default:
5453 skb_checksum_help(skb);
5454 goto csum_failed;
5455 }
5456
5457 /* update TX checksum flag */
5458 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5459 vlan_macip_lens = skb_checksum_start_offset(skb) -
5460 skb_network_offset(skb);
5461no_csum:
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005462 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005463 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005464
Alexander Duyck6e033702016-01-13 07:31:23 -08005465 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005466}
5467
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005468#define IGB_SET_FLAG(_input, _flag, _result) \
5469 ((_flag <= _result) ? \
5470 ((u32)(_input & _flag) * (_result / _flag)) : \
5471 ((u32)(_input & _flag) / (_flag / _result)))
5472
5473static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
Alexander Duycke032afc2011-08-26 07:44:48 +00005474{
5475 /* set type for advanced descriptor with frame checksum insertion */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005476 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5477 E1000_ADVTXD_DCMD_DEXT |
5478 E1000_ADVTXD_DCMD_IFCS;
Alexander Duycke032afc2011-08-26 07:44:48 +00005479
5480 /* set HW vlan bit if vlan is present */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005481 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5482 (E1000_ADVTXD_DCMD_VLE));
Alexander Duycke032afc2011-08-26 07:44:48 +00005483
5484 /* set segmentation bits for TSO */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005485 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5486 (E1000_ADVTXD_DCMD_TSE));
5487
5488 /* set timestamp bit if present */
5489 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5490 (E1000_ADVTXD_MAC_TSTAMP));
5491
5492 /* insert frame checksum */
5493 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
Alexander Duycke032afc2011-08-26 07:44:48 +00005494
5495 return cmd_type;
5496}
5497
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005498static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5499 union e1000_adv_tx_desc *tx_desc,
5500 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00005501{
5502 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5503
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005504 /* 82575 requires a unique index per ring */
5505 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00005506 olinfo_status |= tx_ring->reg_idx << 4;
5507
5508 /* insert L4 checksum */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005509 olinfo_status |= IGB_SET_FLAG(tx_flags,
5510 IGB_TX_FLAGS_CSUM,
5511 (E1000_TXD_POPTS_TXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00005512
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005513 /* insert IPv4 checksum */
5514 olinfo_status |= IGB_SET_FLAG(tx_flags,
5515 IGB_TX_FLAGS_IPV4,
5516 (E1000_TXD_POPTS_IXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00005517
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005518 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00005519}
5520
David S. Miller6f19e122014-08-28 01:39:31 -07005521static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5522{
5523 struct net_device *netdev = tx_ring->netdev;
5524
5525 netif_stop_subqueue(netdev, tx_ring->queue_index);
5526
5527 /* Herbert's original patch had:
5528 * smp_mb__after_netif_stop_queue();
5529 * but since that doesn't exist yet, just open code it.
5530 */
5531 smp_mb();
5532
5533 /* We need to check again in a case another CPU has just
5534 * made room available.
5535 */
5536 if (igb_desc_unused(tx_ring) < size)
5537 return -EBUSY;
5538
5539 /* A reprieve! */
5540 netif_wake_subqueue(netdev, tx_ring->queue_index);
5541
5542 u64_stats_update_begin(&tx_ring->tx_syncp2);
5543 tx_ring->tx_stats.restart_queue2++;
5544 u64_stats_update_end(&tx_ring->tx_syncp2);
5545
5546 return 0;
5547}
5548
5549static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5550{
5551 if (igb_desc_unused(tx_ring) >= size)
5552 return 0;
5553 return __igb_maybe_stop_tx(tx_ring, size);
5554}
5555
Jacob Keller74344e32017-05-03 10:28:55 -07005556static int igb_tx_map(struct igb_ring *tx_ring,
5557 struct igb_tx_buffer *first,
5558 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08005559{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005560 struct sk_buff *skb = first->skb;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005561 struct igb_tx_buffer *tx_buffer;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005562 union e1000_adv_tx_desc *tx_desc;
Alexander Duyck80d07592012-11-13 04:03:24 +00005563 struct skb_frag_struct *frag;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005564 dma_addr_t dma;
Alexander Duyck80d07592012-11-13 04:03:24 +00005565 unsigned int data_len, size;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005566 u32 tx_flags = first->tx_flags;
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005567 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005568 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005569
5570 tx_desc = IGB_TX_DESC(tx_ring, i);
5571
Alexander Duyck80d07592012-11-13 04:03:24 +00005572 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5573
5574 size = skb_headlen(skb);
5575 data_len = skb->data_len;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005576
5577 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005578
Alexander Duyck80d07592012-11-13 04:03:24 +00005579 tx_buffer = first;
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005580
Alexander Duyck80d07592012-11-13 04:03:24 +00005581 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5582 if (dma_mapping_error(tx_ring->dev, dma))
5583 goto dma_error;
5584
5585 /* record length, and DMA address */
5586 dma_unmap_len_set(tx_buffer, len, size);
5587 dma_unmap_addr_set(tx_buffer, dma, dma);
5588
5589 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5590
Alexander Duyckebe42d12011-08-26 07:45:09 +00005591 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5592 tx_desc->read.cmd_type_len =
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005593 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08005594
Alexander Duyckebe42d12011-08-26 07:45:09 +00005595 i++;
5596 tx_desc++;
5597 if (i == tx_ring->count) {
5598 tx_desc = IGB_TX_DESC(tx_ring, 0);
5599 i = 0;
5600 }
Alexander Duyck80d07592012-11-13 04:03:24 +00005601 tx_desc->read.olinfo_status = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005602
5603 dma += IGB_MAX_DATA_PER_TXD;
5604 size -= IGB_MAX_DATA_PER_TXD;
5605
Alexander Duyckebe42d12011-08-26 07:45:09 +00005606 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5607 }
5608
5609 if (likely(!data_len))
5610 break;
5611
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005612 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005613
Alexander Duyck65689fe2009-03-20 00:17:43 +00005614 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005615 tx_desc++;
5616 if (i == tx_ring->count) {
5617 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00005618 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005619 }
Alexander Duyck80d07592012-11-13 04:03:24 +00005620 tx_desc->read.olinfo_status = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00005621
Eric Dumazet9e903e02011-10-18 21:00:24 +00005622 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005623 data_len -= size;
5624
5625 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
Alexander Duyck80d07592012-11-13 04:03:24 +00005626 size, DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00005627
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005628 tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005629 }
5630
Alexander Duyckebe42d12011-08-26 07:45:09 +00005631 /* write last descriptor with RS and EOP bits */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005632 cmd_type |= size | IGB_TXD_DCMD;
5633 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
Alexander Duyck8542db02011-08-26 07:44:43 +00005634
Alexander Duyck80d07592012-11-13 04:03:24 +00005635 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5636
Alexander Duyck8542db02011-08-26 07:44:43 +00005637 /* set the timestamp */
5638 first->time_stamp = jiffies;
5639
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005640 /* Force memory writes to complete before letting h/w know there
Alexander Duyckebe42d12011-08-26 07:45:09 +00005641 * are new descriptors to fetch. (Only applicable for weak-ordered
5642 * memory model archs, such as IA-64).
5643 *
5644 * We also need this memory barrier to make certain all of the
5645 * status bits have been updated before next_to_watch is written.
5646 */
Auke Kok9d5c8242008-01-24 02:22:38 -08005647 wmb();
5648
Alexander Duyckebe42d12011-08-26 07:45:09 +00005649 /* set next_to_watch value indicating a packet is present */
5650 first->next_to_watch = tx_desc;
5651
5652 i++;
5653 if (i == tx_ring->count)
5654 i = 0;
5655
Auke Kok9d5c8242008-01-24 02:22:38 -08005656 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005657
David S. Miller6f19e122014-08-28 01:39:31 -07005658 /* Make sure there is space in the ring for the next send. */
5659 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5660
5661 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
David S. Miller0b725a22014-08-25 15:51:53 -07005662 writel(i, tx_ring->tail);
5663
5664 /* we need this if more than one processor can write to our tail
5665 * at a time, it synchronizes IO on IA64/Altix systems
5666 */
5667 mmiowb();
5668 }
Jacob Keller74344e32017-05-03 10:28:55 -07005669 return 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005670
5671dma_error:
5672 dev_err(tx_ring->dev, "TX DMA map failed\n");
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005673 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyckebe42d12011-08-26 07:45:09 +00005674
5675 /* clear dma mappings for failed tx_buffer_info map */
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005676 while (tx_buffer != first) {
5677 if (dma_unmap_len(tx_buffer, len))
5678 dma_unmap_page(tx_ring->dev,
5679 dma_unmap_addr(tx_buffer, dma),
5680 dma_unmap_len(tx_buffer, len),
5681 DMA_TO_DEVICE);
5682 dma_unmap_len_set(tx_buffer, len, 0);
5683
Jean-Philippe Brucker104ba832017-10-19 20:07:36 +01005684 if (i-- == 0)
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005685 i += tx_ring->count;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005686 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyckebe42d12011-08-26 07:45:09 +00005687 }
5688
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005689 if (dma_unmap_len(tx_buffer, len))
5690 dma_unmap_single(tx_ring->dev,
5691 dma_unmap_addr(tx_buffer, dma),
5692 dma_unmap_len(tx_buffer, len),
5693 DMA_TO_DEVICE);
5694 dma_unmap_len_set(tx_buffer, len, 0);
5695
5696 dev_kfree_skb_any(tx_buffer->skb);
5697 tx_buffer->skb = NULL;
5698
Alexander Duyckebe42d12011-08-26 07:45:09 +00005699 tx_ring->next_to_use = i;
Jacob Keller74344e32017-05-03 10:28:55 -07005700
5701 return -1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005702}
5703
Alexander Duyckcd392f52011-08-26 07:43:59 +00005704netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5705 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08005706{
Alexander Duyck8542db02011-08-26 07:44:43 +00005707 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005708 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00005709 u32 tx_flags = 0;
Alexander Duyck2ee52ad2015-05-06 21:11:45 -07005710 unsigned short f;
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005711 u16 count = TXD_USE_COUNT(skb_headlen(skb));
Alexander Duyck31f6adb2011-08-26 07:44:53 +00005712 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00005713 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005714
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005715 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
5716 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
Auke Kok9d5c8242008-01-24 02:22:38 -08005717 * + 2 desc gap to keep tail from touching head,
Auke Kok9d5c8242008-01-24 02:22:38 -08005718 * + 1 desc for context descriptor,
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005719 * otherwise try next time
5720 */
Alexander Duyck2ee52ad2015-05-06 21:11:45 -07005721 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5722 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005723
5724 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005725 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08005726 return NETDEV_TX_BUSY;
5727 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005728
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005729 /* record the location of the first descriptor for this packet */
5730 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5731 first->skb = skb;
5732 first->bytecount = skb->len;
5733 first->gso_segs = 1;
5734
Alexander Duyckb646c222013-02-07 08:55:46 +00005735 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
5736 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Matthew Vick1f6e8172012-08-18 07:26:33 +00005737
Cliff Spradlin26bd4e22017-06-19 13:30:43 -07005738 if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON &&
5739 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
Jakub Kicinskied4420a2014-03-15 14:55:32 +00005740 &adapter->state)) {
Alexander Duyckb646c222013-02-07 08:55:46 +00005741 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5742 tx_flags |= IGB_TX_FLAGS_TSTAMP;
5743
5744 adapter->ptp_tx_skb = skb_get(skb);
5745 adapter->ptp_tx_start = jiffies;
5746 if (adapter->hw.mac.type == e1000_82576)
5747 schedule_work(&adapter->ptp_tx_work);
Jacob Kellerc3b8f852017-05-03 10:28:59 -07005748 } else {
5749 adapter->tx_hwtstamp_skipped++;
Alexander Duyckb646c222013-02-07 08:55:46 +00005750 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005751 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005752
Jakub Kicinskiafc835d2014-03-15 14:55:26 +00005753 skb_tx_timestamp(skb);
5754
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005755 if (skb_vlan_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005756 tx_flags |= IGB_TX_FLAGS_VLAN;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005757 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08005758 }
5759
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005760 /* record initial flags and protocol */
5761 first->tx_flags = tx_flags;
5762 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00005763
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005764 tso = igb_tso(tx_ring, first, &hdr_len);
5765 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005766 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005767 else if (!tso)
5768 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08005769
Jacob Keller74344e32017-05-03 10:28:55 -07005770 if (igb_tx_map(tx_ring, first, hdr_len))
5771 goto cleanup_tx_tstamp;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005772
Auke Kok9d5c8242008-01-24 02:22:38 -08005773 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005774
5775out_drop:
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005776 dev_kfree_skb_any(first->skb);
5777 first->skb = NULL;
Jacob Keller74344e32017-05-03 10:28:55 -07005778cleanup_tx_tstamp:
5779 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
5780 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5781
5782 dev_kfree_skb_any(adapter->ptp_tx_skb);
5783 adapter->ptp_tx_skb = NULL;
5784 if (adapter->hw.mac.type == e1000_82576)
5785 cancel_work_sync(&adapter->ptp_tx_work);
5786 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
5787 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005788
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005789 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005790}
5791
David S. Miller0b725a22014-08-25 15:51:53 -07005792static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5793 struct sk_buff *skb)
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005794{
David S. Miller0b725a22014-08-25 15:51:53 -07005795 unsigned int r_idx = skb->queue_mapping;
5796
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005797 if (r_idx >= adapter->num_tx_queues)
5798 r_idx = r_idx % adapter->num_tx_queues;
5799
5800 return adapter->tx_ring[r_idx];
5801}
5802
Alexander Duyckcd392f52011-08-26 07:43:59 +00005803static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5804 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08005805{
5806 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00005807
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005808 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005809 * in order to meet this minimum size requirement.
5810 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08005811 if (skb_put_padto(skb, 17))
5812 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005813
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005814 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08005815}
5816
5817/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005818 * igb_tx_timeout - Respond to a Tx Hang
5819 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005820 **/
5821static void igb_tx_timeout(struct net_device *netdev)
5822{
5823 struct igb_adapter *adapter = netdev_priv(netdev);
5824 struct e1000_hw *hw = &adapter->hw;
5825
5826 /* Do the reset outside of interrupt context */
5827 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005828
Alexander Duyck06218a82011-08-26 07:46:55 +00005829 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005830 hw->dev_spec._82575.global_device_reset = true;
5831
Auke Kok9d5c8242008-01-24 02:22:38 -08005832 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00005833 wr32(E1000_EICS,
5834 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08005835}
5836
5837static void igb_reset_task(struct work_struct *work)
5838{
5839 struct igb_adapter *adapter;
5840 adapter = container_of(work, struct igb_adapter, reset_task);
5841
Taku Izumic97ec422010-04-27 14:39:30 +00005842 igb_dump(adapter);
5843 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005844 igb_reinit_locked(adapter);
5845}
5846
5847/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005848 * igb_get_stats64 - Get System Network Statistics
5849 * @netdev: network interface device structure
5850 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08005851 **/
stephen hemmingerbc1f4472017-01-06 19:12:52 -08005852static void igb_get_stats64(struct net_device *netdev,
5853 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08005854{
Eric Dumazet12dcd862010-10-15 17:27:10 +00005855 struct igb_adapter *adapter = netdev_priv(netdev);
5856
5857 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005858 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005859 memcpy(stats, &adapter->stats64, sizeof(*stats));
5860 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08005861}
5862
5863/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005864 * igb_change_mtu - Change the Maximum Transfer Unit
5865 * @netdev: network interface device structure
5866 * @new_mtu: new value for maximum frame size
Auke Kok9d5c8242008-01-24 02:22:38 -08005867 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005868 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08005869 **/
5870static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5871{
5872 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005873 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00005874 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08005875
Alexander Duyck2ccd9942013-07-16 00:20:34 +00005876 /* adjust max frame to be at least the size of a standard frame */
5877 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5878 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5879
Auke Kok9d5c8242008-01-24 02:22:38 -08005880 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00005881 usleep_range(1000, 2000);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005882
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 /* igb_down has a dependency on max_frame_size */
5884 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00005885
Alexander Duyck4c844852009-10-27 15:52:07 +00005886 if (netif_running(netdev))
5887 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005888
Alexander Duyck090b1792009-10-27 23:51:55 +00005889 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08005890 netdev->mtu, new_mtu);
5891 netdev->mtu = new_mtu;
5892
5893 if (netif_running(netdev))
5894 igb_up(adapter);
5895 else
5896 igb_reset(adapter);
5897
5898 clear_bit(__IGB_RESETTING, &adapter->state);
5899
5900 return 0;
5901}
5902
5903/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005904 * igb_update_stats - Update the board statistics counters
5905 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005906 **/
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005907void igb_update_stats(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08005908{
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005909 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
Auke Kok9d5c8242008-01-24 02:22:38 -08005910 struct e1000_hw *hw = &adapter->hw;
5911 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00005912 u32 reg, mpc;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005913 int i;
5914 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005915 unsigned int start;
5916 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005917
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005918 /* Prevent stats update while adapter is being reset, or if the pci
Auke Kok9d5c8242008-01-24 02:22:38 -08005919 * connection is down.
5920 */
5921 if (adapter->link_speed == 0)
5922 return;
5923 if (pci_channel_offline(pdev))
5924 return;
5925
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005926 bytes = 0;
5927 packets = 0;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00005928
5929 rcu_read_lock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005930 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005931 struct igb_ring *ring = adapter->rx_ring[i];
Todd Fujinakae66c0832014-04-08 05:36:15 +00005932 u32 rqdpc = rd32(E1000_RQDPC(i));
5933 if (hw->mac.type >= e1000_i210)
5934 wr32(E1000_RQDPC(i), 0);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005935
Alexander Duyckae1c07a2012-08-08 05:23:22 +00005936 if (rqdpc) {
5937 ring->rx_stats.drops += rqdpc;
5938 net_stats->rx_fifo_errors += rqdpc;
5939 }
Eric Dumazet12dcd862010-10-15 17:27:10 +00005940
5941 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07005942 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005943 _bytes = ring->rx_stats.bytes;
5944 _packets = ring->rx_stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07005945 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
Eric Dumazet12dcd862010-10-15 17:27:10 +00005946 bytes += _bytes;
5947 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005948 }
5949
Alexander Duyck128e45e2009-11-12 18:37:38 +00005950 net_stats->rx_bytes = bytes;
5951 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005952
5953 bytes = 0;
5954 packets = 0;
5955 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005956 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00005957 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07005958 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005959 _bytes = ring->tx_stats.bytes;
5960 _packets = ring->tx_stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07005961 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
Eric Dumazet12dcd862010-10-15 17:27:10 +00005962 bytes += _bytes;
5963 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005964 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00005965 net_stats->tx_bytes = bytes;
5966 net_stats->tx_packets = packets;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00005967 rcu_read_unlock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005968
5969 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08005970 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
5971 adapter->stats.gprc += rd32(E1000_GPRC);
5972 adapter->stats.gorc += rd32(E1000_GORCL);
5973 rd32(E1000_GORCH); /* clear GORCL */
5974 adapter->stats.bprc += rd32(E1000_BPRC);
5975 adapter->stats.mprc += rd32(E1000_MPRC);
5976 adapter->stats.roc += rd32(E1000_ROC);
5977
5978 adapter->stats.prc64 += rd32(E1000_PRC64);
5979 adapter->stats.prc127 += rd32(E1000_PRC127);
5980 adapter->stats.prc255 += rd32(E1000_PRC255);
5981 adapter->stats.prc511 += rd32(E1000_PRC511);
5982 adapter->stats.prc1023 += rd32(E1000_PRC1023);
5983 adapter->stats.prc1522 += rd32(E1000_PRC1522);
5984 adapter->stats.symerrs += rd32(E1000_SYMERRS);
5985 adapter->stats.sec += rd32(E1000_SEC);
5986
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00005987 mpc = rd32(E1000_MPC);
5988 adapter->stats.mpc += mpc;
5989 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005990 adapter->stats.scc += rd32(E1000_SCC);
5991 adapter->stats.ecol += rd32(E1000_ECOL);
5992 adapter->stats.mcc += rd32(E1000_MCC);
5993 adapter->stats.latecol += rd32(E1000_LATECOL);
5994 adapter->stats.dc += rd32(E1000_DC);
5995 adapter->stats.rlec += rd32(E1000_RLEC);
5996 adapter->stats.xonrxc += rd32(E1000_XONRXC);
5997 adapter->stats.xontxc += rd32(E1000_XONTXC);
5998 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
5999 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6000 adapter->stats.fcruc += rd32(E1000_FCRUC);
6001 adapter->stats.gptc += rd32(E1000_GPTC);
6002 adapter->stats.gotc += rd32(E1000_GOTCL);
6003 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00006004 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08006005 adapter->stats.ruc += rd32(E1000_RUC);
6006 adapter->stats.rfc += rd32(E1000_RFC);
6007 adapter->stats.rjc += rd32(E1000_RJC);
6008 adapter->stats.tor += rd32(E1000_TORH);
6009 adapter->stats.tot += rd32(E1000_TOTH);
6010 adapter->stats.tpr += rd32(E1000_TPR);
6011
6012 adapter->stats.ptc64 += rd32(E1000_PTC64);
6013 adapter->stats.ptc127 += rd32(E1000_PTC127);
6014 adapter->stats.ptc255 += rd32(E1000_PTC255);
6015 adapter->stats.ptc511 += rd32(E1000_PTC511);
6016 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6017 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6018
6019 adapter->stats.mptc += rd32(E1000_MPTC);
6020 adapter->stats.bptc += rd32(E1000_BPTC);
6021
Nick Nunley2d0b0f62010-02-17 01:02:59 +00006022 adapter->stats.tpt += rd32(E1000_TPT);
6023 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08006024
6025 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00006026 /* read internal phy specific stats */
6027 reg = rd32(E1000_CTRL_EXT);
6028 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6029 adapter->stats.rxerrc += rd32(E1000_RXERRC);
Carolyn Wyborny3dbdf962012-09-12 04:36:24 +00006030
6031 /* this stat has invalid values on i210/i211 */
6032 if ((hw->mac.type != e1000_i210) &&
6033 (hw->mac.type != e1000_i211))
6034 adapter->stats.tncrs += rd32(E1000_TNCRS);
Nick Nunley43915c7c2010-02-17 01:03:58 +00006035 }
6036
Auke Kok9d5c8242008-01-24 02:22:38 -08006037 adapter->stats.tsctc += rd32(E1000_TSCTC);
6038 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6039
6040 adapter->stats.iac += rd32(E1000_IAC);
6041 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6042 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6043 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6044 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6045 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6046 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6047 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6048 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6049
6050 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006051 net_stats->multicast = adapter->stats.mprc;
6052 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006053
6054 /* Rx Errors */
6055
6056 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006057 * our own version based on RUC and ROC
6058 */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006059 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08006060 adapter->stats.crcerrs + adapter->stats.algnerrc +
6061 adapter->stats.ruc + adapter->stats.roc +
6062 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00006063 net_stats->rx_length_errors = adapter->stats.ruc +
6064 adapter->stats.roc;
6065 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6066 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6067 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006068
6069 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006070 net_stats->tx_errors = adapter->stats.ecol +
6071 adapter->stats.latecol;
6072 net_stats->tx_aborted_errors = adapter->stats.ecol;
6073 net_stats->tx_window_errors = adapter->stats.latecol;
6074 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08006075
6076 /* Tx Dropped needs to be maintained elsewhere */
6077
Auke Kok9d5c8242008-01-24 02:22:38 -08006078 /* Management Stats */
6079 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6080 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6081 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00006082
6083 /* OS2BMC Stats */
6084 reg = rd32(E1000_MANC);
6085 if (reg & E1000_MANC_EN_BMC2OS) {
6086 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6087 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6088 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6089 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6090 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006091}
6092
Richard Cochran61d7f752014-11-21 20:51:10 +00006093static void igb_tsync_interrupt(struct igb_adapter *adapter)
6094{
6095 struct e1000_hw *hw = &adapter->hw;
Richard Cochran00c65572014-11-21 20:51:20 +00006096 struct ptp_clock_event event;
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006097 struct timespec64 ts;
Richard Cochran720db4f2014-11-21 20:51:26 +00006098 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
Richard Cochran00c65572014-11-21 20:51:20 +00006099
6100 if (tsicr & TSINTR_SYS_WRAP) {
6101 event.type = PTP_CLOCK_PPS;
6102 if (adapter->ptp_caps.pps)
6103 ptp_clock_event(adapter->ptp_clock, &event);
Richard Cochran00c65572014-11-21 20:51:20 +00006104 ack |= TSINTR_SYS_WRAP;
6105 }
Richard Cochran61d7f752014-11-21 20:51:10 +00006106
6107 if (tsicr & E1000_TSICR_TXTS) {
Richard Cochran61d7f752014-11-21 20:51:10 +00006108 /* retrieve hardware timestamp */
6109 schedule_work(&adapter->ptp_tx_work);
Richard Cochran00c65572014-11-21 20:51:20 +00006110 ack |= E1000_TSICR_TXTS;
Richard Cochran61d7f752014-11-21 20:51:10 +00006111 }
Richard Cochran00c65572014-11-21 20:51:20 +00006112
Richard Cochran720db4f2014-11-21 20:51:26 +00006113 if (tsicr & TSINTR_TT0) {
6114 spin_lock(&adapter->tmreg_lock);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006115 ts = timespec64_add(adapter->perout[0].start,
6116 adapter->perout[0].period);
6117 /* u32 conversion of tv_sec is safe until y2106 */
Richard Cochran720db4f2014-11-21 20:51:26 +00006118 wr32(E1000_TRGTTIML0, ts.tv_nsec);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006119 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
Richard Cochran720db4f2014-11-21 20:51:26 +00006120 tsauxc = rd32(E1000_TSAUXC);
6121 tsauxc |= TSAUXC_EN_TT0;
6122 wr32(E1000_TSAUXC, tsauxc);
6123 adapter->perout[0].start = ts;
6124 spin_unlock(&adapter->tmreg_lock);
6125 ack |= TSINTR_TT0;
6126 }
6127
6128 if (tsicr & TSINTR_TT1) {
6129 spin_lock(&adapter->tmreg_lock);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006130 ts = timespec64_add(adapter->perout[1].start,
6131 adapter->perout[1].period);
Richard Cochran720db4f2014-11-21 20:51:26 +00006132 wr32(E1000_TRGTTIML1, ts.tv_nsec);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006133 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
Richard Cochran720db4f2014-11-21 20:51:26 +00006134 tsauxc = rd32(E1000_TSAUXC);
6135 tsauxc |= TSAUXC_EN_TT1;
6136 wr32(E1000_TSAUXC, tsauxc);
6137 adapter->perout[1].start = ts;
6138 spin_unlock(&adapter->tmreg_lock);
6139 ack |= TSINTR_TT1;
6140 }
6141
6142 if (tsicr & TSINTR_AUTT0) {
6143 nsec = rd32(E1000_AUXSTMPL0);
6144 sec = rd32(E1000_AUXSTMPH0);
6145 event.type = PTP_CLOCK_EXTTS;
6146 event.index = 0;
6147 event.timestamp = sec * 1000000000ULL + nsec;
6148 ptp_clock_event(adapter->ptp_clock, &event);
6149 ack |= TSINTR_AUTT0;
6150 }
6151
6152 if (tsicr & TSINTR_AUTT1) {
6153 nsec = rd32(E1000_AUXSTMPL1);
6154 sec = rd32(E1000_AUXSTMPH1);
6155 event.type = PTP_CLOCK_EXTTS;
6156 event.index = 1;
6157 event.timestamp = sec * 1000000000ULL + nsec;
6158 ptp_clock_event(adapter->ptp_clock, &event);
6159 ack |= TSINTR_AUTT1;
6160 }
6161
Richard Cochran00c65572014-11-21 20:51:20 +00006162 /* acknowledge the interrupts */
6163 wr32(E1000_TSICR, ack);
Richard Cochran61d7f752014-11-21 20:51:10 +00006164}
6165
Auke Kok9d5c8242008-01-24 02:22:38 -08006166static irqreturn_t igb_msix_other(int irq, void *data)
6167{
Alexander Duyck047e0032009-10-27 15:49:27 +00006168 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08006169 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006170 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006171 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00006172
Alexander Duyck7f081d42010-01-07 17:41:00 +00006173 if (icr & E1000_ICR_DRSTA)
6174 schedule_work(&adapter->reset_task);
6175
Alexander Duyck047e0032009-10-27 15:49:27 +00006176 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00006177 /* HW is reporting DMA is out of sync */
6178 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00006179 /* The DMA Out of Sync is also indication of a spoof event
6180 * in IOV mode. Check the Wrong VM Behavior register to
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006181 * see if it is really a spoof event.
6182 */
Greg Rose13800462010-11-06 02:08:26 +00006183 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00006184 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006185
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006186 /* Check for a mailbox event */
6187 if (icr & E1000_ICR_VMMB)
6188 igb_msg_task(adapter);
6189
6190 if (icr & E1000_ICR_LSC) {
6191 hw->mac.get_link_status = 1;
6192 /* guard against interrupt when we're going down */
6193 if (!test_bit(__IGB_DOWN, &adapter->state))
6194 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6195 }
6196
Richard Cochran61d7f752014-11-21 20:51:10 +00006197 if (icr & E1000_ICR_TS)
6198 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00006199
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006200 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08006201
6202 return IRQ_HANDLED;
6203}
6204
Alexander Duyck047e0032009-10-27 15:49:27 +00006205static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08006206{
Alexander Duyck26b39272010-02-17 01:00:41 +00006207 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00006208 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08006209
Alexander Duyck047e0032009-10-27 15:49:27 +00006210 if (!q_vector->set_itr)
6211 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00006212
Alexander Duyck047e0032009-10-27 15:49:27 +00006213 if (!itr_val)
6214 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006215
Alexander Duyck26b39272010-02-17 01:00:41 +00006216 if (adapter->hw.mac.type == e1000_82575)
6217 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006218 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00006219 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00006220
6221 writel(itr_val, q_vector->itr_register);
6222 q_vector->set_itr = 0;
6223}
6224
6225static irqreturn_t igb_msix_ring(int irq, void *data)
6226{
6227 struct igb_q_vector *q_vector = data;
6228
6229 /* Write the ITR value calculated from the previous interrupt. */
6230 igb_write_itr(q_vector);
6231
6232 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006233
Auke Kok9d5c8242008-01-24 02:22:38 -08006234 return IRQ_HANDLED;
6235}
6236
Jeff Kirsher421e02f2008-10-17 11:08:31 -07006237#ifdef CONFIG_IGB_DCA
Alexander Duyck6a050042012-09-25 00:31:27 +00006238static void igb_update_tx_dca(struct igb_adapter *adapter,
6239 struct igb_ring *tx_ring,
6240 int cpu)
6241{
6242 struct e1000_hw *hw = &adapter->hw;
6243 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6244
6245 if (hw->mac.type != e1000_82575)
6246 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6247
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006248 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00006249 * DCA is enabled. This is due to a known issue in some chipsets
6250 * which will cause the DCA tag to be cleared.
6251 */
6252 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6253 E1000_DCA_TXCTRL_DATA_RRO_EN |
6254 E1000_DCA_TXCTRL_DESC_DCA_EN;
6255
6256 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6257}
6258
6259static void igb_update_rx_dca(struct igb_adapter *adapter,
6260 struct igb_ring *rx_ring,
6261 int cpu)
6262{
6263 struct e1000_hw *hw = &adapter->hw;
6264 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6265
6266 if (hw->mac.type != e1000_82575)
6267 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6268
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006269 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00006270 * DCA is enabled. This is due to a known issue in some chipsets
6271 * which will cause the DCA tag to be cleared.
6272 */
6273 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6274 E1000_DCA_RXCTRL_DESC_DCA_EN;
6275
6276 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6277}
6278
Alexander Duyck047e0032009-10-27 15:49:27 +00006279static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006280{
Alexander Duyck047e0032009-10-27 15:49:27 +00006281 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006282 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006283
Alexander Duyck047e0032009-10-27 15:49:27 +00006284 if (q_vector->cpu == cpu)
6285 goto out_no_update;
6286
Alexander Duyck6a050042012-09-25 00:31:27 +00006287 if (q_vector->tx.ring)
6288 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6289
6290 if (q_vector->rx.ring)
6291 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6292
Alexander Duyck047e0032009-10-27 15:49:27 +00006293 q_vector->cpu = cpu;
6294out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006295 put_cpu();
6296}
6297
6298static void igb_setup_dca(struct igb_adapter *adapter)
6299{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00006300 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006301 int i;
6302
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006303 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006304 return;
6305
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00006306 /* Always use CB2 mode, difference is masked in the CB driver. */
6307 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6308
Alexander Duyck047e0032009-10-27 15:49:27 +00006309 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00006310 adapter->q_vector[i]->cpu = -1;
6311 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006312 }
6313}
6314
6315static int __igb_notify_dca(struct device *dev, void *data)
6316{
6317 struct net_device *netdev = dev_get_drvdata(dev);
6318 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00006319 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006320 struct e1000_hw *hw = &adapter->hw;
6321 unsigned long event = *(unsigned long *)data;
6322
6323 switch (event) {
6324 case DCA_PROVIDER_ADD:
6325 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006326 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006327 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006328 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08006329 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00006330 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006331 igb_setup_dca(adapter);
6332 break;
6333 }
6334 /* Fall Through since DCA is disabled. */
6335 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006336 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006337 /* without this a class_device is left
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006338 * hanging around in the sysfs model
6339 */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006340 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00006341 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006342 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08006343 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006344 }
6345 break;
6346 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08006347
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006348 return 0;
6349}
6350
6351static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006352 void *p)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006353{
6354 int ret_val;
6355
6356 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006357 __igb_notify_dca);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006358
6359 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6360}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07006361#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08006362
Greg Rose0224d662011-10-14 02:57:14 +00006363#ifdef CONFIG_PCI_IOV
6364static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6365{
6366 unsigned char mac_addr[ETH_ALEN];
Greg Rose0224d662011-10-14 02:57:14 +00006367
Mitch A Williams5ac6f912013-01-18 08:57:20 +00006368 eth_zero_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00006369 igb_set_vf_mac(adapter, vf, mac_addr);
6370
Lior Levy70ea4782013-03-03 20:27:48 +00006371 /* By default spoof check is enabled for all VFs */
6372 adapter->vf_data[vf].spoofchk_enabled = true;
6373
Stefan Assmannf5571472012-08-18 04:06:11 +00006374 return 0;
Greg Rose0224d662011-10-14 02:57:14 +00006375}
6376
Greg Rose0224d662011-10-14 02:57:14 +00006377#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006378static void igb_ping_all_vfs(struct igb_adapter *adapter)
6379{
6380 struct e1000_hw *hw = &adapter->hw;
6381 u32 ping;
6382 int i;
6383
6384 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6385 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006386 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006387 ping |= E1000_VT_MSGTYPE_CTS;
6388 igb_write_mbx(hw, &ping, 1, i);
6389 }
6390}
6391
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006392static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6393{
6394 struct e1000_hw *hw = &adapter->hw;
6395 u32 vmolr = rd32(E1000_VMOLR(vf));
6396 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6397
Alexander Duyckd85b90042010-09-22 17:56:20 +00006398 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006399 IGB_VF_FLAG_MULTI_PROMISC);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006400 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6401
6402 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6403 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00006404 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006405 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6406 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006407 /* if we have hashes and we are clearing a multicast promisc
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006408 * flag we need to write the hashes to the MTA as this step
6409 * was previously skipped
6410 */
6411 if (vf_data->num_vf_mc_hashes > 30) {
6412 vmolr |= E1000_VMOLR_MPME;
6413 } else if (vf_data->num_vf_mc_hashes) {
6414 int j;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006415
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006416 vmolr |= E1000_VMOLR_ROMPE;
6417 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6418 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6419 }
6420 }
6421
6422 wr32(E1000_VMOLR(vf), vmolr);
6423
6424 /* there are flags left unprocessed, likely not supported */
6425 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6426 return -EINVAL;
6427
6428 return 0;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006429}
6430
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006431static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6432 u32 *msgbuf, u32 vf)
6433{
6434 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6435 u16 *hash_list = (u16 *)&msgbuf[1];
6436 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6437 int i;
6438
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006439 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006440 * to this VF for later use to restore when the PF multi cast
6441 * list changes
6442 */
6443 vf_data->num_vf_mc_hashes = n;
6444
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006445 /* only up to 30 hash values supported */
6446 if (n > 30)
6447 n = 30;
6448
6449 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006450 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07006451 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006452
6453 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006454 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006455
6456 return 0;
6457}
6458
6459static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6460{
6461 struct e1000_hw *hw = &adapter->hw;
6462 struct vf_data_storage *vf_data;
6463 int i, j;
6464
6465 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006466 u32 vmolr = rd32(E1000_VMOLR(i));
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006467
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006468 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6469
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006470 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006471
6472 if ((vf_data->num_vf_mc_hashes > 30) ||
6473 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6474 vmolr |= E1000_VMOLR_MPME;
6475 } else if (vf_data->num_vf_mc_hashes) {
6476 vmolr |= E1000_VMOLR_ROMPE;
6477 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6478 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6479 }
6480 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006481 }
6482}
6483
6484static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6485{
6486 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck16903ca2016-01-06 23:11:18 -08006487 u32 pool_mask, vlvf_mask, i;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006488
Alexander Duyck16903ca2016-01-06 23:11:18 -08006489 /* create mask for VF and other pools */
6490 pool_mask = E1000_VLVF_POOLSEL_MASK;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006491 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
Alexander Duyck16903ca2016-01-06 23:11:18 -08006492
6493 /* drop PF from pool bits */
Jacob Kellera51d8c22016-04-13 16:08:28 -07006494 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6495 adapter->vfs_allocated_count);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006496
6497 /* Find the vlan filter for this id */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006498 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6499 u32 vlvf = rd32(E1000_VLVF(i));
6500 u32 vfta_mask, vid, vfta;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006501
6502 /* remove the vf from the pool */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006503 if (!(vlvf & vlvf_mask))
6504 continue;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006505
Alexander Duyck16903ca2016-01-06 23:11:18 -08006506 /* clear out bit from VLVF */
6507 vlvf ^= vlvf_mask;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006508
Alexander Duyck16903ca2016-01-06 23:11:18 -08006509 /* if other pools are present, just remove ourselves */
6510 if (vlvf & pool_mask)
6511 goto update_vlvfb;
6512
6513 /* if PF is present, leave VFTA */
6514 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6515 goto update_vlvf;
6516
6517 vid = vlvf & E1000_VLVF_VLANID_MASK;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006518 vfta_mask = BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08006519
6520 /* clear bit from VFTA */
6521 vfta = adapter->shadow_vfta[vid / 32];
6522 if (vfta & vfta_mask)
6523 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6524update_vlvf:
6525 /* clear pool selection enable */
6526 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6527 vlvf &= E1000_VLVF_POOLSEL_MASK;
6528 else
6529 vlvf = 0;
6530update_vlvfb:
6531 /* clear pool bits */
6532 wr32(E1000_VLVF(i), vlvf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006533 }
6534}
6535
Alexander Duyck16903ca2016-01-06 23:11:18 -08006536static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006537{
Alexander Duyck16903ca2016-01-06 23:11:18 -08006538 u32 vlvf;
6539 int idx;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006540
Alexander Duyck16903ca2016-01-06 23:11:18 -08006541 /* short cut the special case */
6542 if (vlan == 0)
6543 return 0;
Alexander Duyck51466232009-10-27 23:47:35 +00006544
Alexander Duyck16903ca2016-01-06 23:11:18 -08006545 /* Search for the VLAN id in the VLVF entries */
6546 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6547 vlvf = rd32(E1000_VLVF(idx));
6548 if ((vlvf & VLAN_VID_MASK) == vlan)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006549 break;
6550 }
6551
Alexander Duyck16903ca2016-01-06 23:11:18 -08006552 return idx;
6553}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006554
Jacob Keller8008f682016-04-13 16:08:29 -07006555static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
Alexander Duyck16903ca2016-01-06 23:11:18 -08006556{
6557 struct e1000_hw *hw = &adapter->hw;
6558 u32 bits, pf_id;
6559 int idx;
Alexander Duyckae641bd2009-09-03 14:49:33 +00006560
Alexander Duyck16903ca2016-01-06 23:11:18 -08006561 idx = igb_find_vlvf_entry(hw, vid);
6562 if (!idx)
6563 return;
Alexander Duyckae641bd2009-09-03 14:49:33 +00006564
Alexander Duyck16903ca2016-01-06 23:11:18 -08006565 /* See if any other pools are set for this VLAN filter
6566 * entry other than the PF.
6567 */
6568 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006569 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
Alexander Duyck16903ca2016-01-06 23:11:18 -08006570 bits &= rd32(E1000_VLVF(idx));
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006571
Alexander Duyck16903ca2016-01-06 23:11:18 -08006572 /* Disable the filter so this falls into the default pool. */
6573 if (!bits) {
6574 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
Jacob Kellera51d8c22016-04-13 16:08:28 -07006575 wr32(E1000_VLVF(idx), BIT(pf_id));
Alexander Duyck16903ca2016-01-06 23:11:18 -08006576 else
6577 wr32(E1000_VLVF(idx), 0);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006578 }
Greg Rose6f3dc3192013-03-26 06:19:41 +00006579}
6580
Alexander Duycka15d9252016-01-06 23:11:11 -08006581static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6582 bool add, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006583{
Alexander Duycka15d9252016-01-06 23:11:11 -08006584 int pf_id = adapter->vfs_allocated_count;
Greg Rose6f3dc3192013-03-26 06:19:41 +00006585 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka15d9252016-01-06 23:11:11 -08006586 int err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006587
Alexander Duycka15d9252016-01-06 23:11:11 -08006588 /* If VLAN overlaps with one the PF is currently monitoring make
6589 * sure that we are able to allocate a VLVF entry. This may be
6590 * redundant but it guarantees PF will maintain visibility to
6591 * the VLAN.
Greg Rose6f3dc3192013-03-26 06:19:41 +00006592 */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006593 if (add && test_bit(vid, adapter->active_vlans)) {
Alexander Duycka15d9252016-01-06 23:11:11 -08006594 err = igb_vfta_set(hw, vid, pf_id, true, false);
6595 if (err)
6596 return err;
6597 }
Greg Rose6f3dc3192013-03-26 06:19:41 +00006598
Alexander Duycka15d9252016-01-06 23:11:11 -08006599 err = igb_vfta_set(hw, vid, vf, add, false);
Greg Rose6f3dc3192013-03-26 06:19:41 +00006600
Alexander Duyck16903ca2016-01-06 23:11:18 -08006601 if (add && !err)
6602 return err;
Greg Rose6f3dc3192013-03-26 06:19:41 +00006603
Alexander Duyck16903ca2016-01-06 23:11:18 -08006604 /* If we failed to add the VF VLAN or we are removing the VF VLAN
6605 * we may need to drop the PF pool bit in order to allow us to free
6606 * up the VLVF resources.
Greg Rose6f3dc3192013-03-26 06:19:41 +00006607 */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006608 if (test_bit(vid, adapter->active_vlans) ||
6609 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6610 igb_update_pf_vlvf(adapter, vid);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006611
Greg Rose6f3dc3192013-03-26 06:19:41 +00006612 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006613}
6614
6615static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6616{
6617 struct e1000_hw *hw = &adapter->hw;
6618
6619 if (vid)
6620 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6621 else
6622 wr32(E1000_VMVIR(vf), 0);
6623}
6624
Alexander Duycka15d9252016-01-06 23:11:11 -08006625static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6626 u16 vlan, u8 qos)
6627{
6628 int err;
6629
6630 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6631 if (err)
6632 return err;
6633
6634 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6635 igb_set_vmolr(adapter, vf, !vlan);
6636
6637 /* revoke access to previous VLAN */
6638 if (vlan != adapter->vf_data[vf].pf_vlan)
6639 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6640 false, vf);
6641
6642 adapter->vf_data[vf].pf_vlan = vlan;
6643 adapter->vf_data[vf].pf_qos = qos;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006644 igb_set_vf_vlan_strip(adapter, vf, true);
Alexander Duycka15d9252016-01-06 23:11:11 -08006645 dev_info(&adapter->pdev->dev,
6646 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
6647 if (test_bit(__IGB_DOWN, &adapter->state)) {
6648 dev_warn(&adapter->pdev->dev,
6649 "The VF VLAN has been set, but the PF device is not up.\n");
6650 dev_warn(&adapter->pdev->dev,
6651 "Bring the PF device up before attempting to use the VF device.\n");
6652 }
6653
6654 return err;
6655}
6656
6657static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
6658{
6659 /* Restore tagless access via VLAN 0 */
6660 igb_set_vf_vlan(adapter, 0, true, vf);
6661
6662 igb_set_vmvir(adapter, 0, vf);
6663 igb_set_vmolr(adapter, vf, true);
6664
6665 /* Remove any PF assigned VLAN */
6666 if (adapter->vf_data[vf].pf_vlan)
6667 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6668 false, vf);
6669
6670 adapter->vf_data[vf].pf_vlan = 0;
6671 adapter->vf_data[vf].pf_qos = 0;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006672 igb_set_vf_vlan_strip(adapter, vf, false);
Alexander Duycka15d9252016-01-06 23:11:11 -08006673
6674 return 0;
6675}
6676
Moshe Shemesh79aab092016-09-22 12:11:15 +03006677static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
6678 u16 vlan, u8 qos, __be16 vlan_proto)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006679{
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006680 struct igb_adapter *adapter = netdev_priv(netdev);
6681
6682 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
6683 return -EINVAL;
Alexander Duycka15d9252016-01-06 23:11:11 -08006684
Moshe Shemesh79aab092016-09-22 12:11:15 +03006685 if (vlan_proto != htons(ETH_P_8021Q))
6686 return -EPROTONOSUPPORT;
6687
Alexander Duycka15d9252016-01-06 23:11:11 -08006688 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
6689 igb_disable_port_vlan(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006690}
6691
Alexander Duycka15d9252016-01-06 23:11:11 -08006692static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006693{
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006694 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6695 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006696 int ret;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006697
Alexander Duycka15d9252016-01-06 23:11:11 -08006698 if (adapter->vf_data[vf].pf_vlan)
6699 return -1;
Mitch A Williams5ac6f912013-01-18 08:57:20 +00006700
Alexander Duycka15d9252016-01-06 23:11:11 -08006701 /* VLAN 0 is a special case, don't allow it to be removed */
6702 if (!vid && !add)
6703 return 0;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006704
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006705 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
6706 if (!ret)
6707 igb_set_vf_vlan_strip(adapter, vf, !!vid);
6708 return ret;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006709}
6710
6711static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
6712{
Alexander Duycka15d9252016-01-06 23:11:11 -08006713 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006714
Alexander Duycka15d9252016-01-06 23:11:11 -08006715 /* clear flags - except flag that indicates PF has set the MAC */
6716 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
6717 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006718
6719 /* reset vlans for device */
6720 igb_clear_vf_vfta(adapter, vf);
Alexander Duycka15d9252016-01-06 23:11:11 -08006721 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
6722 igb_set_vmvir(adapter, vf_data->pf_vlan |
6723 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
6724 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006725 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006726
6727 /* reset multicast table array for vf */
6728 adapter->vf_data[vf].num_vf_mc_hashes = 0;
6729
6730 /* Flush and reset the mta with the new values */
6731 igb_set_rx_mode(adapter->netdev);
6732}
6733
6734static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006735{
6736 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6737
6738 /* clear mac address as we were hotplug removed/added */
6739 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
6740 eth_zero_addr(vf_mac);
6741
6742 /* process remaining reset events */
6743 igb_vf_reset(adapter, vf);
6744}
6745
6746static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006747{
6748 struct e1000_hw *hw = &adapter->hw;
6749 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6750 u32 reg, msgbuf[3];
6751 u8 *addr = (u8 *)(&msgbuf[1]);
6752
6753 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006754 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006755
6756 /* set vf mac address */
Yury Kylulin83c21332017-03-07 11:20:25 +03006757 igb_set_vf_mac(adapter, vf, vf_mac);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006758
6759 /* enable transmit and receive for vf */
6760 reg = rd32(E1000_VFTE);
Jacob Kellera51d8c22016-04-13 16:08:28 -07006761 wr32(E1000_VFTE, reg | BIT(vf));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006762 reg = rd32(E1000_VFRE);
Jacob Kellera51d8c22016-04-13 16:08:28 -07006763 wr32(E1000_VFRE, reg | BIT(vf));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006764
Greg Rose8fa7e0f2010-11-06 05:43:21 +00006765 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006766
6767 /* reply to reset with ack and vf mac address */
Alexander Graf6ddbc4c2014-10-09 05:33:55 +00006768 if (!is_zero_ether_addr(vf_mac)) {
6769 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6770 memcpy(addr, vf_mac, ETH_ALEN);
6771 } else {
6772 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6773 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006774 igb_write_mbx(hw, msgbuf, 3, vf);
6775}
6776
Yury Kylulin83c21332017-03-07 11:20:25 +03006777static void igb_flush_mac_table(struct igb_adapter *adapter)
6778{
6779 struct e1000_hw *hw = &adapter->hw;
6780 int i;
6781
6782 for (i = 0; i < hw->mac.rar_entry_count; i++) {
6783 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6784 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6785 adapter->mac_table[i].queue = 0;
6786 igb_rar_set_index(adapter, i);
6787 }
6788}
6789
6790static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
6791{
6792 struct e1000_hw *hw = &adapter->hw;
6793 /* do not count rar entries reserved for VFs MAC addresses */
6794 int rar_entries = hw->mac.rar_entry_count -
6795 adapter->vfs_allocated_count;
6796 int i, count = 0;
6797
6798 for (i = 0; i < rar_entries; i++) {
6799 /* do not count default entries */
6800 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
6801 continue;
6802
6803 /* do not count "in use" entries for different queues */
6804 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
6805 (adapter->mac_table[i].queue != queue))
6806 continue;
6807
6808 count++;
6809 }
6810
6811 return count;
6812}
6813
6814/* Set default MAC address for the PF in the first RAR entry */
6815static void igb_set_default_mac_filter(struct igb_adapter *adapter)
6816{
6817 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
6818
6819 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
6820 mac_table->queue = adapter->vfs_allocated_count;
6821 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
6822
6823 igb_rar_set_index(adapter, 0);
6824}
6825
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006826static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6827 const u8 queue)
Yury Kylulin83c21332017-03-07 11:20:25 +03006828{
6829 struct e1000_hw *hw = &adapter->hw;
6830 int rar_entries = hw->mac.rar_entry_count -
6831 adapter->vfs_allocated_count;
6832 int i;
6833
6834 if (is_zero_ether_addr(addr))
6835 return -EINVAL;
6836
6837 /* Search for the first empty entry in the MAC table.
6838 * Do not touch entries at the end of the table reserved for the VF MAC
6839 * addresses.
6840 */
6841 for (i = 0; i < rar_entries; i++) {
6842 if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
6843 continue;
6844
6845 ether_addr_copy(adapter->mac_table[i].addr, addr);
6846 adapter->mac_table[i].queue = queue;
6847 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
6848
6849 igb_rar_set_index(adapter, i);
6850 return i;
6851 }
6852
6853 return -ENOSPC;
6854}
6855
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006856static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6857 const u8 queue)
Yury Kylulin83c21332017-03-07 11:20:25 +03006858{
6859 struct e1000_hw *hw = &adapter->hw;
6860 int rar_entries = hw->mac.rar_entry_count -
6861 adapter->vfs_allocated_count;
6862 int i;
6863
6864 if (is_zero_ether_addr(addr))
6865 return -EINVAL;
6866
6867 /* Search for matching entry in the MAC table based on given address
6868 * and queue. Do not touch entries at the end of the table reserved
6869 * for the VF MAC addresses.
6870 */
6871 for (i = 0; i < rar_entries; i++) {
6872 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
6873 continue;
6874 if (adapter->mac_table[i].queue != queue)
6875 continue;
6876 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
6877 continue;
6878
6879 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6880 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6881 adapter->mac_table[i].queue = 0;
6882
6883 igb_rar_set_index(adapter, i);
6884 return 0;
6885 }
6886
6887 return -ENOENT;
6888}
6889
6890static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
6891{
6892 struct igb_adapter *adapter = netdev_priv(netdev);
6893 int ret;
6894
6895 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6896
6897 return min_t(int, ret, 0);
6898}
6899
6900static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
6901{
6902 struct igb_adapter *adapter = netdev_priv(netdev);
6903
6904 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6905
6906 return 0;
6907}
6908
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006909static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
6910 const u32 info, const u8 *addr)
Yury Kylulin4827cc32017-03-07 11:20:26 +03006911{
6912 struct pci_dev *pdev = adapter->pdev;
6913 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6914 struct list_head *pos;
6915 struct vf_mac_filter *entry = NULL;
6916 int ret = 0;
6917
6918 switch (info) {
6919 case E1000_VF_MAC_FILTER_CLR:
6920 /* remove all unicast MAC filters related to the current VF */
6921 list_for_each(pos, &adapter->vf_macs.l) {
6922 entry = list_entry(pos, struct vf_mac_filter, l);
6923 if (entry->vf == vf) {
6924 entry->vf = -1;
6925 entry->free = true;
6926 igb_del_mac_filter(adapter, entry->vf_mac, vf);
6927 }
6928 }
6929 break;
6930 case E1000_VF_MAC_FILTER_ADD:
6931 if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
6932 dev_warn(&pdev->dev,
6933 "VF %d requested MAC filter but is administratively denied\n",
6934 vf);
6935 return -EINVAL;
6936 }
6937
6938 if (!is_valid_ether_addr(addr)) {
6939 dev_warn(&pdev->dev,
6940 "VF %d attempted to set invalid MAC filter\n",
6941 vf);
6942 return -EINVAL;
6943 }
6944
6945 /* try to find empty slot in the list */
6946 list_for_each(pos, &adapter->vf_macs.l) {
6947 entry = list_entry(pos, struct vf_mac_filter, l);
6948 if (entry->free)
6949 break;
6950 }
6951
6952 if (entry && entry->free) {
6953 entry->free = false;
6954 entry->vf = vf;
6955 ether_addr_copy(entry->vf_mac, addr);
6956
6957 ret = igb_add_mac_filter(adapter, addr, vf);
6958 ret = min_t(int, ret, 0);
6959 } else {
6960 ret = -ENOSPC;
6961 }
6962
6963 if (ret == -ENOSPC)
6964 dev_warn(&pdev->dev,
6965 "VF %d has requested MAC filter but there is no space for it\n",
6966 vf);
6967 break;
6968 default:
6969 ret = -EINVAL;
6970 break;
6971 }
6972
6973 return ret;
6974}
6975
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006976static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
6977{
Yury Kylulin4827cc32017-03-07 11:20:26 +03006978 struct pci_dev *pdev = adapter->pdev;
6979 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6980 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
6981
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006982 /* The VF MAC Address is stored in a packed array of bytes
Greg Rosede42edd2010-07-01 13:39:23 +00006983 * starting at the second 32 bit word of the msg array
6984 */
Yury Kylulin4827cc32017-03-07 11:20:26 +03006985 unsigned char *addr = (unsigned char *)&msg[1];
6986 int ret = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006987
Yury Kylulin4827cc32017-03-07 11:20:26 +03006988 if (!info) {
6989 if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
6990 dev_warn(&pdev->dev,
6991 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
6992 vf);
6993 return -EINVAL;
6994 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006995
Yury Kylulin4827cc32017-03-07 11:20:26 +03006996 if (!is_valid_ether_addr(addr)) {
6997 dev_warn(&pdev->dev,
6998 "VF %d attempted to set invalid MAC\n",
6999 vf);
7000 return -EINVAL;
7001 }
7002
7003 ret = igb_set_vf_mac(adapter, vf, addr);
7004 } else {
7005 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7006 }
7007
7008 return ret;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007009}
7010
7011static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7012{
7013 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007014 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007015 u32 msg = E1000_VT_MSGTYPE_NACK;
7016
7017 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007018 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7019 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007020 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007021 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007022 }
7023}
7024
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007025static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007026{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007027 struct pci_dev *pdev = adapter->pdev;
7028 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007029 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007030 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007031 s32 retval;
7032
Greg Edwards46b3bb92017-06-28 09:22:26 -06007033 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007034
Alexander Duyckfef45f42009-12-11 22:57:34 -08007035 if (retval) {
7036 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007037 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08007038 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7039 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007040 goto unlock;
Alexander Duyckfef45f42009-12-11 22:57:34 -08007041 goto out;
7042 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007043
7044 /* this is a message we already processed, do nothing */
7045 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007046 goto unlock;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007047
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007048 /* until the vf completes a reset it should not be
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007049 * allowed to start any configuration.
7050 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007051 if (msgbuf[0] == E1000_VF_RESET) {
Greg Edwards46b3bb92017-06-28 09:22:26 -06007052 /* unlocks mailbox */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007053 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007054 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007055 }
7056
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007057 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08007058 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007059 goto unlock;
Alexander Duyckfef45f42009-12-11 22:57:34 -08007060 retval = -1;
7061 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007062 }
7063
7064 switch ((msgbuf[0] & 0xFFFF)) {
7065 case E1000_VF_SET_MAC_ADDR:
Yury Kylulin4827cc32017-03-07 11:20:26 +03007066 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007067 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00007068 case E1000_VF_SET_PROMISC:
7069 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7070 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007071 case E1000_VF_SET_MULTICAST:
7072 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7073 break;
7074 case E1000_VF_SET_LPE:
7075 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7076 break;
7077 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00007078 retval = -1;
7079 if (vf_data->pf_vlan)
7080 dev_warn(&pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007081 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7082 vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00007083 else
Alexander Duycka15d9252016-01-06 23:11:11 -08007084 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007085 break;
7086 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00007087 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007088 retval = -1;
7089 break;
7090 }
7091
Alexander Duyckfef45f42009-12-11 22:57:34 -08007092 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7093out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007094 /* notify the VF of the results of what it sent us */
7095 if (retval)
7096 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7097 else
7098 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7099
Greg Edwards46b3bb92017-06-28 09:22:26 -06007100 /* unlocks mailbox */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007101 igb_write_mbx(hw, msgbuf, 1, vf);
Greg Edwards46b3bb92017-06-28 09:22:26 -06007102 return;
7103
7104unlock:
7105 igb_unlock_mbx(hw, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007106}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007107
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007108static void igb_msg_task(struct igb_adapter *adapter)
7109{
7110 struct e1000_hw *hw = &adapter->hw;
7111 u32 vf;
7112
7113 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7114 /* process any reset requests */
7115 if (!igb_check_for_rst(hw, vf))
7116 igb_vf_reset_event(adapter, vf);
7117
7118 /* process any messages pending */
7119 if (!igb_check_for_msg(hw, vf))
7120 igb_rcv_msg_from_vf(adapter, vf);
7121
7122 /* process any acks */
7123 if (!igb_check_for_ack(hw, vf))
7124 igb_rcv_ack_from_vf(adapter, vf);
7125 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007126}
7127
Auke Kok9d5c8242008-01-24 02:22:38 -08007128/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00007129 * igb_set_uta - Set unicast filter table address
7130 * @adapter: board private structure
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007131 * @set: boolean indicating if we are setting or clearing bits
Alexander Duyck68d480c2009-10-05 06:33:08 +00007132 *
7133 * The unicast table address is a register array of 32-bit registers.
7134 * The table is meant to be used in a way similar to how the MTA is used
7135 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007136 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
7137 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00007138 **/
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007139static void igb_set_uta(struct igb_adapter *adapter, bool set)
Alexander Duyck68d480c2009-10-05 06:33:08 +00007140{
7141 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007142 u32 uta = set ? ~0 : 0;
Alexander Duyck68d480c2009-10-05 06:33:08 +00007143 int i;
7144
Alexander Duyck68d480c2009-10-05 06:33:08 +00007145 /* we only need to do this if VMDq is enabled */
7146 if (!adapter->vfs_allocated_count)
7147 return;
7148
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007149 for (i = hw->mac.uta_reg_count; i--;)
7150 array_wr32(E1000_UTA, i, uta);
Alexander Duyck68d480c2009-10-05 06:33:08 +00007151}
7152
7153/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007154 * igb_intr_msi - Interrupt Handler
7155 * @irq: interrupt number
7156 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08007157 **/
7158static irqreturn_t igb_intr_msi(int irq, void *data)
7159{
Alexander Duyck047e0032009-10-27 15:49:27 +00007160 struct igb_adapter *adapter = data;
7161 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08007162 struct e1000_hw *hw = &adapter->hw;
7163 /* read ICR disables interrupts using IAM */
7164 u32 icr = rd32(E1000_ICR);
7165
Alexander Duyck047e0032009-10-27 15:49:27 +00007166 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08007167
Alexander Duyck7f081d42010-01-07 17:41:00 +00007168 if (icr & E1000_ICR_DRSTA)
7169 schedule_work(&adapter->reset_task);
7170
Alexander Duyck047e0032009-10-27 15:49:27 +00007171 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00007172 /* HW is reporting DMA is out of sync */
7173 adapter->stats.doosync++;
7174 }
7175
Auke Kok9d5c8242008-01-24 02:22:38 -08007176 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7177 hw->mac.get_link_status = 1;
7178 if (!test_bit(__IGB_DOWN, &adapter->state))
7179 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7180 }
7181
Richard Cochran61d7f752014-11-21 20:51:10 +00007182 if (icr & E1000_ICR_TS)
7183 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00007184
Alexander Duyck047e0032009-10-27 15:49:27 +00007185 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08007186
7187 return IRQ_HANDLED;
7188}
7189
7190/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007191 * igb_intr - Legacy Interrupt Handler
7192 * @irq: interrupt number
7193 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08007194 **/
7195static irqreturn_t igb_intr(int irq, void *data)
7196{
Alexander Duyck047e0032009-10-27 15:49:27 +00007197 struct igb_adapter *adapter = data;
7198 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08007199 struct e1000_hw *hw = &adapter->hw;
7200 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007201 * need for the IMC write
7202 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007203 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08007204
7205 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007206 * not set, then the adapter didn't send an interrupt
7207 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007208 if (!(icr & E1000_ICR_INT_ASSERTED))
7209 return IRQ_NONE;
7210
Alexander Duyck0ba82992011-08-26 07:45:47 +00007211 igb_write_itr(q_vector);
7212
Alexander Duyck7f081d42010-01-07 17:41:00 +00007213 if (icr & E1000_ICR_DRSTA)
7214 schedule_work(&adapter->reset_task);
7215
Alexander Duyck047e0032009-10-27 15:49:27 +00007216 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00007217 /* HW is reporting DMA is out of sync */
7218 adapter->stats.doosync++;
7219 }
7220
Auke Kok9d5c8242008-01-24 02:22:38 -08007221 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7222 hw->mac.get_link_status = 1;
7223 /* guard against interrupt when we're going down */
7224 if (!test_bit(__IGB_DOWN, &adapter->state))
7225 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7226 }
7227
Richard Cochran61d7f752014-11-21 20:51:10 +00007228 if (icr & E1000_ICR_TS)
7229 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00007230
Alexander Duyck047e0032009-10-27 15:49:27 +00007231 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08007232
7233 return IRQ_HANDLED;
7234}
7235
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00007236static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08007237{
Alexander Duyck047e0032009-10-27 15:49:27 +00007238 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08007239 struct e1000_hw *hw = &adapter->hw;
7240
Alexander Duyck0ba82992011-08-26 07:45:47 +00007241 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7242 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7243 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7244 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007245 else
Alexander Duyck047e0032009-10-27 15:49:27 +00007246 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007247 }
7248
7249 if (!test_bit(__IGB_DOWN, &adapter->state)) {
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00007250 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Alexander Duyck047e0032009-10-27 15:49:27 +00007251 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08007252 else
7253 igb_irq_enable(adapter);
7254 }
7255}
7256
Auke Kok9d5c8242008-01-24 02:22:38 -08007257/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007258 * igb_poll - NAPI Rx polling callback
7259 * @napi: napi polling structure
7260 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08007261 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07007262static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007263{
Alexander Duyck047e0032009-10-27 15:49:27 +00007264 struct igb_q_vector *q_vector = container_of(napi,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007265 struct igb_q_vector,
7266 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007267 bool clean_complete = true;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007268 int work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08007269
Jeff Kirsher421e02f2008-10-17 11:08:31 -07007270#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00007271 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7272 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07007273#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00007274 if (q_vector->tx.ring)
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007275 clean_complete = igb_clean_tx_irq(q_vector, budget);
Auke Kok9d5c8242008-01-24 02:22:38 -08007276
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007277 if (q_vector->rx.ring) {
7278 int cleaned = igb_clean_rx_irq(q_vector, budget);
7279
7280 work_done += cleaned;
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007281 if (cleaned >= budget)
7282 clean_complete = false;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007283 }
Alexander Duyck047e0032009-10-27 15:49:27 +00007284
Alexander Duyck16eb8812011-08-26 07:43:54 +00007285 /* If all work not completed, return budget and keep polling */
7286 if (!clean_complete)
7287 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08007288
Alexander Duyck46544252009-02-19 20:39:04 -08007289 /* If not enough Rx work done, exit the polling mode */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007290 napi_complete_done(napi, work_done);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007291 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007292
Alexander Duyck16eb8812011-08-26 07:43:54 +00007293 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08007294}
Al Viro6d8126f2008-03-16 22:23:24 +00007295
Patrick Ohly33af6bc2009-02-12 05:03:43 +00007296/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007297 * igb_clean_tx_irq - Reclaim resources after transmit completes
7298 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007299 * @napi_budget: Used to determine if we are in netpoll
Ben Hutchings49ce9c22012-07-10 10:56:00 +00007300 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007301 * returns true if ring is completely cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08007302 **/
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007303static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007304{
Alexander Duyck047e0032009-10-27 15:49:27 +00007305 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00007306 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00007307 struct igb_tx_buffer *tx_buffer;
Alexander Duyckf4128782012-09-13 06:28:01 +00007308 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08007309 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00007310 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00007311 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08007312
Alexander Duyck13fde972011-10-05 13:35:24 +00007313 if (test_bit(__IGB_DOWN, &adapter->state))
7314 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08007315
Alexander Duyck06034642011-08-26 07:44:22 +00007316 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00007317 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00007318 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08007319
Alexander Duyckf4128782012-09-13 06:28:01 +00007320 do {
7321 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duyck8542db02011-08-26 07:44:43 +00007322
7323 /* if next_to_watch is not set then there is no work pending */
7324 if (!eop_desc)
7325 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00007326
Alexander Duyckf4128782012-09-13 06:28:01 +00007327 /* prevent any other reads prior to eop_desc */
Brian Kingc4cb9912017-11-17 11:05:47 -06007328 smp_rmb();
Alexander Duyckf4128782012-09-13 06:28:01 +00007329
Alexander Duyck13fde972011-10-05 13:35:24 +00007330 /* if DD is not set pending work has not been completed */
7331 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7332 break;
7333
Alexander Duyck8542db02011-08-26 07:44:43 +00007334 /* clear next_to_watch to prevent false hangs */
7335 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00007336
Alexander Duyckebe42d12011-08-26 07:45:09 +00007337 /* update the statistics for this packet */
7338 total_bytes += tx_buffer->bytecount;
7339 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00007340
Alexander Duyckebe42d12011-08-26 07:45:09 +00007341 /* free the skb */
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007342 napi_consume_skb(tx_buffer->skb, napi_budget);
Alexander Duyckebe42d12011-08-26 07:45:09 +00007343
7344 /* unmap skb header data */
7345 dma_unmap_single(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007346 dma_unmap_addr(tx_buffer, dma),
7347 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00007348 DMA_TO_DEVICE);
7349
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007350 /* clear tx_buffer data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007351 dma_unmap_len_set(tx_buffer, len, 0);
7352
Alexander Duyckebe42d12011-08-26 07:45:09 +00007353 /* clear last DMA location and unmap remaining buffers */
7354 while (tx_desc != eop_desc) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007355 tx_buffer++;
7356 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08007357 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00007358 if (unlikely(!i)) {
7359 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00007360 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00007361 tx_desc = IGB_TX_DESC(tx_ring, 0);
7362 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00007363
7364 /* unmap any remaining paged data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007365 if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00007366 dma_unmap_page(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007367 dma_unmap_addr(tx_buffer, dma),
7368 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00007369 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007370 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00007371 }
7372 }
7373
Alexander Duyckebe42d12011-08-26 07:45:09 +00007374 /* move us one more past the eop_desc for start of next pkt */
7375 tx_buffer++;
7376 tx_desc++;
7377 i++;
7378 if (unlikely(!i)) {
7379 i -= tx_ring->count;
7380 tx_buffer = tx_ring->tx_buffer_info;
7381 tx_desc = IGB_TX_DESC(tx_ring, 0);
7382 }
Alexander Duyckf4128782012-09-13 06:28:01 +00007383
7384 /* issue prefetch for next Tx descriptor */
7385 prefetch(tx_desc);
7386
7387 /* update budget accounting */
7388 budget--;
7389 } while (likely(budget));
Alexander Duyck0e014cb2008-12-26 01:33:18 -08007390
Eric Dumazetbdbc0632012-01-04 20:23:36 +00007391 netdev_tx_completed_queue(txring_txq(tx_ring),
7392 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00007393 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08007394 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00007395 u64_stats_update_begin(&tx_ring->tx_syncp);
7396 tx_ring->tx_stats.bytes += total_bytes;
7397 tx_ring->tx_stats.packets += total_packets;
7398 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00007399 q_vector->tx.total_bytes += total_bytes;
7400 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08007401
Alexander Duyck6d095fa2011-08-26 07:46:19 +00007402 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007403 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00007404
Auke Kok9d5c8242008-01-24 02:22:38 -08007405 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007406 * check with the clearing of time_stamp and movement of i
7407 */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00007408 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckf4128782012-09-13 06:28:01 +00007409 if (tx_buffer->next_to_watch &&
Alexander Duyck8542db02011-08-26 07:44:43 +00007410 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00007411 (adapter->tx_timeout_factor * HZ)) &&
7412 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007413
Auke Kok9d5c8242008-01-24 02:22:38 -08007414 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00007415 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08007416 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07007417 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007418 " TDH <%x>\n"
7419 " TDT <%x>\n"
7420 " next_to_use <%x>\n"
7421 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007422 "buffer_info[next_to_clean]\n"
7423 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00007424 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007425 " jiffies <%lx>\n"
7426 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07007427 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00007428 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00007429 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08007430 tx_ring->next_to_use,
7431 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00007432 tx_buffer->time_stamp,
Alexander Duyckf4128782012-09-13 06:28:01 +00007433 tx_buffer->next_to_watch,
Auke Kok9d5c8242008-01-24 02:22:38 -08007434 jiffies,
Alexander Duyckf4128782012-09-13 06:28:01 +00007435 tx_buffer->next_to_watch->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00007436 netif_stop_subqueue(tx_ring->netdev,
7437 tx_ring->queue_index);
7438
7439 /* we are about to reset, no point in enabling stuff */
7440 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08007441 }
7442 }
Alexander Duyck13fde972011-10-05 13:35:24 +00007443
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00007444#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyck13fde972011-10-05 13:35:24 +00007445 if (unlikely(total_packets &&
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007446 netif_carrier_ok(tx_ring->netdev) &&
7447 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007448 /* Make sure that anybody stopping the queue after this
7449 * sees the new next_to_clean.
7450 */
7451 smp_mb();
7452 if (__netif_subqueue_stopped(tx_ring->netdev,
7453 tx_ring->queue_index) &&
7454 !(test_bit(__IGB_DOWN, &adapter->state))) {
7455 netif_wake_subqueue(tx_ring->netdev,
7456 tx_ring->queue_index);
7457
7458 u64_stats_update_begin(&tx_ring->tx_syncp);
7459 tx_ring->tx_stats.restart_queue++;
7460 u64_stats_update_end(&tx_ring->tx_syncp);
7461 }
7462 }
7463
7464 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08007465}
7466
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007467/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007468 * igb_reuse_rx_page - page flip buffer and store it back on the ring
7469 * @rx_ring: rx descriptor ring to store buffers on
7470 * @old_buff: donor buffer to have page reused
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007471 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007472 * Synchronizes page for reuse by the adapter
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007473 **/
7474static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7475 struct igb_rx_buffer *old_buff)
7476{
7477 struct igb_rx_buffer *new_buff;
7478 u16 nta = rx_ring->next_to_alloc;
7479
7480 new_buff = &rx_ring->rx_buffer_info[nta];
7481
7482 /* update, and store next to alloc */
7483 nta++;
7484 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7485
Alexander Duycke0142722017-02-06 18:27:26 -08007486 /* Transfer page from old buffer to new buffer.
7487 * Move each member individually to avoid possible store
7488 * forwarding stalls.
7489 */
7490 new_buff->dma = old_buff->dma;
7491 new_buff->page = old_buff->page;
7492 new_buff->page_offset = old_buff->page_offset;
7493 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007494}
7495
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007496static inline bool igb_page_is_reserved(struct page *page)
7497{
Michal Hocko2f064f32015-08-21 14:11:51 -07007498 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007499}
7500
Alexander Duycke0142722017-02-06 18:27:26 -08007501static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007502{
Alexander Duycke0142722017-02-06 18:27:26 -08007503 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7504 struct page *page = rx_buffer->page;
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007505
Alexander Duyck74e238e2013-02-02 05:07:11 +00007506 /* avoid re-using remote pages */
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007507 if (unlikely(igb_page_is_reserved(page)))
Roman Gushchinbc16e472014-10-23 03:32:27 +00007508 return false;
7509
Alexander Duyck74e238e2013-02-02 05:07:11 +00007510#if (PAGE_SIZE < 8192)
7511 /* if we are only owner of page we can reuse it */
Alexander Duycke0142722017-02-06 18:27:26 -08007512 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
Alexander Duyck74e238e2013-02-02 05:07:11 +00007513 return false;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007514#else
Alexander Duyck8649aae2017-02-06 18:27:03 -08007515#define IGB_LAST_OFFSET \
7516 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007517
Alexander Duyck8649aae2017-02-06 18:27:03 -08007518 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007519 return false;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007520#endif
7521
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007522 /* If we have drained the page fragment pool we need to update
7523 * the pagecnt_bias and page count so that we fully restock the
7524 * number of references the driver holds.
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007525 */
Alexander Duycke0142722017-02-06 18:27:26 -08007526 if (unlikely(!pagecnt_bias)) {
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007527 page_ref_add(page, USHRT_MAX);
7528 rx_buffer->pagecnt_bias = USHRT_MAX;
7529 }
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007530
Alexander Duyck74e238e2013-02-02 05:07:11 +00007531 return true;
7532}
7533
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007534/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007535 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
7536 * @rx_ring: rx descriptor ring to transact packets on
7537 * @rx_buffer: buffer containing page to add
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007538 * @skb: sk_buff to place the data into
Alexander Duycke0142722017-02-06 18:27:26 -08007539 * @size: size of buffer to be added
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007540 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007541 * This function will add the data contained in rx_buffer->page to the skb.
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007542 **/
Alexander Duycke0142722017-02-06 18:27:26 -08007543static void igb_add_rx_frag(struct igb_ring *rx_ring,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007544 struct igb_rx_buffer *rx_buffer,
Alexander Duycke0142722017-02-06 18:27:26 -08007545 struct sk_buff *skb,
7546 unsigned int size)
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007547{
Alexander Duyck74e238e2013-02-02 05:07:11 +00007548#if (PAGE_SIZE < 8192)
Alexander Duyck8649aae2017-02-06 18:27:03 -08007549 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007550#else
Alexander Duycke3cdf682017-02-06 18:27:14 -08007551 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7552 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7553 SKB_DATA_ALIGN(size);
Alexander Duyck74e238e2013-02-02 05:07:11 +00007554#endif
Alexander Duycke0142722017-02-06 18:27:26 -08007555 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7556 rx_buffer->page_offset, size, truesize);
7557#if (PAGE_SIZE < 8192)
7558 rx_buffer->page_offset ^= truesize;
7559#else
7560 rx_buffer->page_offset += truesize;
7561#endif
7562}
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007563
Alexander Duycke0142722017-02-06 18:27:26 -08007564static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
7565 struct igb_rx_buffer *rx_buffer,
7566 union e1000_adv_rx_desc *rx_desc,
7567 unsigned int size)
7568{
7569 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7570#if (PAGE_SIZE < 8192)
7571 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7572#else
7573 unsigned int truesize = SKB_DATA_ALIGN(size);
7574#endif
7575 unsigned int headlen;
7576 struct sk_buff *skb;
7577
7578 /* prefetch first cache line of first page */
7579 prefetch(va);
7580#if L1_CACHE_BYTES < 128
7581 prefetch(va + L1_CACHE_BYTES);
7582#endif
7583
7584 /* allocate a skb to store the frags */
7585 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
7586 if (unlikely(!skb))
7587 return NULL;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007588
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007589 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
7590 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
7591 va += IGB_TS_HDR_LEN;
7592 size -= IGB_TS_HDR_LEN;
7593 }
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007594
Alexander Duycke0142722017-02-06 18:27:26 -08007595 /* Determine available headroom for copy */
7596 headlen = size;
7597 if (headlen > IGB_RX_HDR_LEN)
7598 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007599
7600 /* align pull length to size of long to optimize memcpy performance */
Alexander Duycke0142722017-02-06 18:27:26 -08007601 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007602
7603 /* update all of the pointers */
Alexander Duycke0142722017-02-06 18:27:26 -08007604 size -= headlen;
7605 if (size) {
7606 skb_add_rx_frag(skb, 0, rx_buffer->page,
7607 (va + headlen) - page_address(rx_buffer->page),
7608 size, truesize);
7609#if (PAGE_SIZE < 8192)
7610 rx_buffer->page_offset ^= truesize;
7611#else
7612 rx_buffer->page_offset += truesize;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007613#endif
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007614 } else {
Alexander Duycke0142722017-02-06 18:27:26 -08007615 rx_buffer->pagecnt_bias++;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007616 }
7617
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007618 return skb;
7619}
7620
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007621static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
7622 struct igb_rx_buffer *rx_buffer,
7623 union e1000_adv_rx_desc *rx_desc,
7624 unsigned int size)
7625{
7626 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7627#if (PAGE_SIZE < 8192)
7628 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7629#else
7630 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
7631 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
7632#endif
7633 struct sk_buff *skb;
7634
7635 /* prefetch first cache line of first page */
7636 prefetch(va);
7637#if L1_CACHE_BYTES < 128
7638 prefetch(va + L1_CACHE_BYTES);
7639#endif
7640
Alexander Duyck3a1eb6d2017-02-15 09:15:59 -08007641 /* build an skb around the page buffer */
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007642 skb = build_skb(va - IGB_SKB_PAD, truesize);
7643 if (unlikely(!skb))
7644 return NULL;
7645
7646 /* update pointers within the skb to store the data */
7647 skb_reserve(skb, IGB_SKB_PAD);
7648 __skb_put(skb, size);
7649
7650 /* pull timestamp out of packet data */
7651 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
7652 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
7653 __skb_pull(skb, IGB_TS_HDR_LEN);
7654 }
7655
7656 /* update buffer offset */
7657#if (PAGE_SIZE < 8192)
7658 rx_buffer->page_offset ^= truesize;
7659#else
7660 rx_buffer->page_offset += truesize;
7661#endif
7662
7663 return skb;
7664}
7665
Alexander Duyckcd392f52011-08-26 07:43:59 +00007666static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007667 union e1000_adv_rx_desc *rx_desc,
7668 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08007669{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07007670 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08007671
Alexander Duyck294e7d72011-08-26 07:45:57 +00007672 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007673 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00007674 return;
7675
7676 /* Rx checksum disabled via ethtool */
7677 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08007678 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00007679
Auke Kok9d5c8242008-01-24 02:22:38 -08007680 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007681 if (igb_test_staterr(rx_desc,
7682 E1000_RXDEXT_STATERR_TCPE |
7683 E1000_RXDEXT_STATERR_IPE)) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007684 /* work around errata with sctp packets where the TCPE aka
Jesse Brandeburgb9473562009-04-27 22:36:13 +00007685 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
7686 * packets, (aka let the stack check the crc32c)
7687 */
Alexander Duyck866cff02011-08-26 07:45:36 +00007688 if (!((skb->len == 60) &&
7689 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00007690 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00007691 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00007692 u64_stats_update_end(&ring->rx_syncp);
7693 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007694 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08007695 return;
7696 }
7697 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007698 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
7699 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08007700 skb->ip_summed = CHECKSUM_UNNECESSARY;
7701
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007702 dev_dbg(ring->dev, "cksum success: bits %08X\n",
7703 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08007704}
7705
Alexander Duyck077887c2011-08-26 07:46:29 +00007706static inline void igb_rx_hash(struct igb_ring *ring,
7707 union e1000_adv_rx_desc *rx_desc,
7708 struct sk_buff *skb)
7709{
7710 if (ring->netdev->features & NETIF_F_RXHASH)
Tom Herbert42bdf082013-12-18 16:46:58 +00007711 skb_set_hash(skb,
7712 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
7713 PKT_HASH_TYPE_L3);
Alexander Duyck077887c2011-08-26 07:46:29 +00007714}
7715
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007716/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007717 * igb_is_non_eop - process handling of non-EOP buffers
7718 * @rx_ring: Rx ring being processed
7719 * @rx_desc: Rx descriptor for current buffer
7720 * @skb: current socket buffer containing buffer in progress
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007721 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007722 * This function updates next to clean. If the buffer is an EOP buffer
7723 * this function exits returning false, otherwise it will place the
7724 * sk_buff in the next buffer to be chained and return true indicating
7725 * that this is in fact a non-EOP buffer.
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007726 **/
7727static bool igb_is_non_eop(struct igb_ring *rx_ring,
7728 union e1000_adv_rx_desc *rx_desc)
7729{
7730 u32 ntc = rx_ring->next_to_clean + 1;
7731
7732 /* fetch, update, and store next to clean */
7733 ntc = (ntc < rx_ring->count) ? ntc : 0;
7734 rx_ring->next_to_clean = ntc;
7735
7736 prefetch(IGB_RX_DESC(rx_ring, ntc));
7737
7738 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
7739 return false;
7740
7741 return true;
7742}
7743
7744/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007745 * igb_cleanup_headers - Correct corrupted or empty headers
7746 * @rx_ring: rx descriptor ring packet is being transacted on
7747 * @rx_desc: pointer to the EOP Rx descriptor
7748 * @skb: pointer to current skb being fixed
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007749 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007750 * Address the case where we are pulling data in on pages only
7751 * and as such no data is present in the skb header.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007752 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007753 * In addition if skb is not at least 60 bytes we need to pad it so that
7754 * it is large enough to qualify as a valid Ethernet frame.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007755 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007756 * Returns true if an error was encountered and skb was freed.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007757 **/
7758static bool igb_cleanup_headers(struct igb_ring *rx_ring,
7759 union e1000_adv_rx_desc *rx_desc,
7760 struct sk_buff *skb)
7761{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007762 if (unlikely((igb_test_staterr(rx_desc,
7763 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
7764 struct net_device *netdev = rx_ring->netdev;
7765 if (!(netdev->features & NETIF_F_RXALL)) {
7766 dev_kfree_skb_any(skb);
7767 return true;
7768 }
7769 }
7770
Alexander Duycka94d9e22014-12-03 08:17:39 -08007771 /* if eth_skb_pad returns an error the skb was freed */
7772 if (eth_skb_pad(skb))
7773 return true;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007774
7775 return false;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00007776}
7777
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007778/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007779 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
7780 * @rx_ring: rx descriptor ring packet is being transacted on
7781 * @rx_desc: pointer to the EOP Rx descriptor
7782 * @skb: pointer to current skb being populated
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007783 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007784 * This function checks the ring, descriptor, and packet information in
7785 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
7786 * other fields within the skb.
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007787 **/
7788static void igb_process_skb_fields(struct igb_ring *rx_ring,
7789 union e1000_adv_rx_desc *rx_desc,
7790 struct sk_buff *skb)
7791{
7792 struct net_device *dev = rx_ring->netdev;
7793
7794 igb_rx_hash(rx_ring, rx_desc, skb);
7795
7796 igb_rx_checksum(rx_ring, rx_desc, skb);
7797
Jakub Kicinski5499a962014-04-02 10:33:33 +00007798 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
7799 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
7800 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007801
Patrick McHardyf6469682013-04-19 02:04:27 +00007802 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007803 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
7804 u16 vid;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00007805
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007806 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
7807 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
7808 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
7809 else
7810 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
7811
Patrick McHardy86a9bad2013-04-19 02:04:30 +00007812 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007813 }
7814
7815 skb_record_rx_queue(skb, rx_ring->queue_index);
7816
7817 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
7818}
7819
Alexander Duycke0142722017-02-06 18:27:26 -08007820static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
7821 const unsigned int size)
7822{
7823 struct igb_rx_buffer *rx_buffer;
7824
7825 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
7826 prefetchw(rx_buffer->page);
7827
7828 /* we are reusing so sync this buffer for CPU use */
7829 dma_sync_single_range_for_cpu(rx_ring->dev,
7830 rx_buffer->dma,
7831 rx_buffer->page_offset,
7832 size,
7833 DMA_FROM_DEVICE);
7834
7835 rx_buffer->pagecnt_bias--;
7836
7837 return rx_buffer;
7838}
7839
7840static void igb_put_rx_buffer(struct igb_ring *rx_ring,
7841 struct igb_rx_buffer *rx_buffer)
7842{
7843 if (igb_can_reuse_rx_page(rx_buffer)) {
7844 /* hand second half of page back to the ring */
7845 igb_reuse_rx_page(rx_ring, rx_buffer);
7846 } else {
7847 /* We are not reusing the buffer so unmap it and free
7848 * any references we are holding to it
7849 */
7850 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
7851 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
7852 IGB_RX_DMA_ATTR);
7853 __page_frag_cache_drain(rx_buffer->page,
7854 rx_buffer->pagecnt_bias);
7855 }
7856
7857 /* clear contents of rx_buffer */
7858 rx_buffer->page = NULL;
7859}
7860
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007861static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007862{
Alexander Duyck0ba82992011-08-26 07:45:47 +00007863 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007864 struct sk_buff *skb = rx_ring->skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08007865 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00007866 u16 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08007867
Eric W. Biederman57ba34c2014-03-14 18:00:06 -07007868 while (likely(total_packets < budget)) {
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007869 union e1000_adv_rx_desc *rx_desc;
Alexander Duycke0142722017-02-06 18:27:26 -08007870 struct igb_rx_buffer *rx_buffer;
7871 unsigned int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08007872
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007873 /* return some buffers to hardware, one at a time is too slow */
7874 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
7875 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7876 cleaned_count = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007877 }
7878
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007879 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
Alexander Duycke0142722017-02-06 18:27:26 -08007880 size = le16_to_cpu(rx_desc->wb.upper.length);
7881 if (!size)
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007882 break;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007883
Alexander Duyck74e238e2013-02-02 05:07:11 +00007884 /* This memory barrier is needed to keep us from reading
7885 * any other fields out of the rx_desc until we know the
Alexander Duyck124b74c2014-12-11 15:02:28 -08007886 * descriptor has been written back
Alexander Duyck74e238e2013-02-02 05:07:11 +00007887 */
Alexander Duyck124b74c2014-12-11 15:02:28 -08007888 dma_rmb();
Alexander Duyck74e238e2013-02-02 05:07:11 +00007889
Alexander Duycke0142722017-02-06 18:27:26 -08007890 rx_buffer = igb_get_rx_buffer(rx_ring, size);
7891
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007892 /* retrieve a buffer from the ring */
Alexander Duycke0142722017-02-06 18:27:26 -08007893 if (skb)
7894 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007895 else if (ring_uses_build_skb(rx_ring))
7896 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
Alexander Duycke0142722017-02-06 18:27:26 -08007897 else
7898 skb = igb_construct_skb(rx_ring, rx_buffer,
7899 rx_desc, size);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007900
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007901 /* exit if we failed to retrieve a buffer */
Alexander Duycke0142722017-02-06 18:27:26 -08007902 if (!skb) {
7903 rx_ring->rx_stats.alloc_failed++;
7904 rx_buffer->pagecnt_bias++;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007905 break;
Alexander Duycke0142722017-02-06 18:27:26 -08007906 }
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007907
Alexander Duycke0142722017-02-06 18:27:26 -08007908 igb_put_rx_buffer(rx_ring, rx_buffer);
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007909 cleaned_count++;
7910
7911 /* fetch next buffer in frame if non-eop */
7912 if (igb_is_non_eop(rx_ring, rx_desc))
7913 continue;
Alexander Duyck44390ca2011-08-26 07:43:38 +00007914
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007915 /* verify the packet layout is correct */
7916 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
7917 skb = NULL;
7918 continue;
Auke Kok9d5c8242008-01-24 02:22:38 -08007919 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007920
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007921 /* probably a little skewed due to removing CRC */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007922 total_bytes += skb->len;
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007923
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007924 /* populate checksum, timestamp, VLAN, and protocol */
7925 igb_process_skb_fields(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007926
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00007927 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08007928
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007929 /* reset skb pointer */
7930 skb = NULL;
7931
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007932 /* update budget accounting */
7933 total_packets++;
Eric W. Biederman57ba34c2014-03-14 18:00:06 -07007934 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007935
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007936 /* place incomplete frames back on ring for completion */
7937 rx_ring->skb = skb;
7938
Eric Dumazet12dcd862010-10-15 17:27:10 +00007939 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08007940 rx_ring->rx_stats.packets += total_packets;
7941 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00007942 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00007943 q_vector->rx.total_packets += total_packets;
7944 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007945
7946 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00007947 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00007948
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007949 return total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08007950}
7951
Alexander Duycke3cdf682017-02-06 18:27:14 -08007952static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
7953{
7954 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
7955}
7956
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007957static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7958 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00007959{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007960 struct page *page = bi->page;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007961 dma_addr_t dma;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007962
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007963 /* since we are recycling buffers we should seldom need to alloc */
7964 if (likely(page))
Alexander Duyckc023cd82011-08-26 07:43:43 +00007965 return true;
7966
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007967 /* alloc new page for storage */
Alexander Duyck8649aae2017-02-06 18:27:03 -08007968 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007969 if (unlikely(!page)) {
7970 rx_ring->rx_stats.alloc_failed++;
7971 return false;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007972 }
7973
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007974 /* map page for use */
Alexander Duyck8649aae2017-02-06 18:27:03 -08007975 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
7976 igb_rx_pg_size(rx_ring),
7977 DMA_FROM_DEVICE,
7978 IGB_RX_DMA_ATTR);
Alexander Duyckc023cd82011-08-26 07:43:43 +00007979
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007980 /* if mapping failed free memory back to system since
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007981 * there isn't much point in holding memory we can't use
7982 */
Alexander Duyckc023cd82011-08-26 07:43:43 +00007983 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyck8649aae2017-02-06 18:27:03 -08007984 __free_pages(page, igb_rx_pg_order(rx_ring));
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007985
Alexander Duyckc023cd82011-08-26 07:43:43 +00007986 rx_ring->rx_stats.alloc_failed++;
7987 return false;
7988 }
7989
7990 bi->dma = dma;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007991 bi->page = page;
Alexander Duycke3cdf682017-02-06 18:27:14 -08007992 bi->page_offset = igb_rx_offset(rx_ring);
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007993 bi->pagecnt_bias = 1;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007994
Alexander Duyckc023cd82011-08-26 07:43:43 +00007995 return true;
7996}
7997
Auke Kok9d5c8242008-01-24 02:22:38 -08007998/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007999 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
8000 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08008001 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00008002void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08008003{
Auke Kok9d5c8242008-01-24 02:22:38 -08008004 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00008005 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008006 u16 i = rx_ring->next_to_use;
Alexander Duyck8649aae2017-02-06 18:27:03 -08008007 u16 bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08008008
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008009 /* nothing to do */
8010 if (!cleaned_count)
8011 return;
8012
Alexander Duyck601369062011-08-26 07:44:05 +00008013 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00008014 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00008015 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08008016
Alexander Duyck8649aae2017-02-06 18:27:03 -08008017 bufsz = igb_rx_bufsz(rx_ring);
8018
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008019 do {
Alexander Duyck1a1c2252012-09-25 00:30:52 +00008020 if (!igb_alloc_mapped_page(rx_ring, bi))
Alexander Duyckc023cd82011-08-26 07:43:43 +00008021 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08008022
Alexander Duyck5be59552016-12-14 15:05:30 -08008023 /* sync the buffer for use by the device */
8024 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
Alexander Duyck8649aae2017-02-06 18:27:03 -08008025 bi->page_offset, bufsz,
Alexander Duyck5be59552016-12-14 15:05:30 -08008026 DMA_FROM_DEVICE);
8027
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008028 /* Refresh the desc even if buffer_addrs didn't change
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008029 * because each write-back erases this info.
8030 */
Alexander Duyckf9d40f62013-04-17 20:41:04 +00008031 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Auke Kok9d5c8242008-01-24 02:22:38 -08008032
Alexander Duyckc023cd82011-08-26 07:43:43 +00008033 rx_desc++;
8034 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08008035 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008036 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00008037 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00008038 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008039 i -= rx_ring->count;
8040 }
8041
Alexander Duyck7ec01162017-02-06 18:25:41 -08008042 /* clear the length for the next_to_use descriptor */
8043 rx_desc->wb.upper.length = 0;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008044
8045 cleaned_count--;
8046 } while (cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08008047
Alexander Duyckc023cd82011-08-26 07:43:43 +00008048 i += rx_ring->count;
8049
Auke Kok9d5c8242008-01-24 02:22:38 -08008050 if (rx_ring->next_to_use != i) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008051 /* record the next descriptor to use */
Auke Kok9d5c8242008-01-24 02:22:38 -08008052 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08008053
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008054 /* update next to alloc since we have filled the ring */
8055 rx_ring->next_to_alloc = i;
8056
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008057 /* Force memory writes to complete before letting h/w
Auke Kok9d5c8242008-01-24 02:22:38 -08008058 * know there are new descriptors to fetch. (Only
8059 * applicable for weak-ordered memory model archs,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008060 * such as IA-64).
8061 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008062 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00008063 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08008064 }
8065}
8066
8067/**
8068 * igb_mii_ioctl -
8069 * @netdev:
8070 * @ifreq:
8071 * @cmd:
8072 **/
8073static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8074{
8075 struct igb_adapter *adapter = netdev_priv(netdev);
8076 struct mii_ioctl_data *data = if_mii(ifr);
8077
8078 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8079 return -EOPNOTSUPP;
8080
8081 switch (cmd) {
8082 case SIOCGMIIPHY:
8083 data->phy_id = adapter->hw.phy.addr;
8084 break;
8085 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08008086 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00008087 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08008088 return -EIO;
8089 break;
8090 case SIOCSMIIREG:
8091 default:
8092 return -EOPNOTSUPP;
8093 }
8094 return 0;
8095}
8096
8097/**
8098 * igb_ioctl -
8099 * @netdev:
8100 * @ifreq:
8101 * @cmd:
8102 **/
8103static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8104{
8105 switch (cmd) {
8106 case SIOCGMIIPHY:
8107 case SIOCGMIIREG:
8108 case SIOCSMIIREG:
8109 return igb_mii_ioctl(netdev, ifr, cmd);
Jacob Keller6ab5f7b2014-01-11 07:20:06 +00008110 case SIOCGHWTSTAMP:
8111 return igb_ptp_get_ts_config(netdev, ifr);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00008112 case SIOCSHWTSTAMP:
Jacob Keller6ab5f7b2014-01-11 07:20:06 +00008113 return igb_ptp_set_ts_config(netdev, ifr);
Auke Kok9d5c8242008-01-24 02:22:38 -08008114 default:
8115 return -EOPNOTSUPP;
8116 }
8117}
8118
Todd Fujinaka94826482014-07-10 01:47:15 -07008119void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8120{
8121 struct igb_adapter *adapter = hw->back;
8122
8123 pci_read_config_word(adapter->pdev, reg, value);
8124}
8125
8126void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8127{
8128 struct igb_adapter *adapter = hw->back;
8129
8130 pci_write_config_word(adapter->pdev, reg, *value);
8131}
8132
Alexander Duyck009bc062009-07-23 18:08:35 +00008133s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8134{
8135 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00008136
Jiang Liu23d028c2012-08-20 13:32:20 -06008137 if (pcie_capability_read_word(adapter->pdev, reg, value))
Alexander Duyck009bc062009-07-23 18:08:35 +00008138 return -E1000_ERR_CONFIG;
8139
Alexander Duyck009bc062009-07-23 18:08:35 +00008140 return 0;
8141}
8142
8143s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8144{
8145 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00008146
Jiang Liu23d028c2012-08-20 13:32:20 -06008147 if (pcie_capability_write_word(adapter->pdev, reg, *value))
Alexander Duyck009bc062009-07-23 18:08:35 +00008148 return -E1000_ERR_CONFIG;
8149
Alexander Duyck009bc062009-07-23 18:08:35 +00008150 return 0;
8151}
8152
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008153static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08008154{
8155 struct igb_adapter *adapter = netdev_priv(netdev);
8156 struct e1000_hw *hw = &adapter->hw;
8157 u32 ctrl, rctl;
Patrick McHardyf6469682013-04-19 02:04:27 +00008158 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08008159
Alexander Duyck5faf0302011-08-26 07:46:08 +00008160 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008161 /* enable VLAN tag insert/strip */
8162 ctrl = rd32(E1000_CTRL);
8163 ctrl |= E1000_CTRL_VME;
8164 wr32(E1000_CTRL, ctrl);
8165
Alexander Duyck51466232009-10-27 23:47:35 +00008166 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08008167 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08008168 rctl &= ~E1000_RCTL_CFIEN;
8169 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08008170 } else {
8171 /* disable VLAN tag insert/strip */
8172 ctrl = rd32(E1000_CTRL);
8173 ctrl &= ~E1000_CTRL_VME;
8174 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08008175 }
8176
Corinna Vinschen030f9f52016-01-28 13:53:23 +01008177 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
Auke Kok9d5c8242008-01-24 02:22:38 -08008178}
8179
Patrick McHardy80d5c362013-04-19 02:04:28 +00008180static int igb_vlan_rx_add_vid(struct net_device *netdev,
8181 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08008182{
8183 struct igb_adapter *adapter = netdev_priv(netdev);
8184 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008185 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08008186
Alexander Duyck51466232009-10-27 23:47:35 +00008187 /* add the filter since PF can receive vlans w/o entry in vlvf */
Alexander Duyck16903ca2016-01-06 23:11:18 -08008188 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8189 igb_vfta_set(hw, vid, pf_id, true, !!vid);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00008190
8191 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05008192
8193 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08008194}
8195
Patrick McHardy80d5c362013-04-19 02:04:28 +00008196static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8197 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08008198{
8199 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008200 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08008201 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08008202
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08008203 /* remove VID from filter table */
Alexander Duyck16903ca2016-01-06 23:11:18 -08008204 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8205 igb_vfta_set(hw, vid, pf_id, false, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00008206
8207 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05008208
8209 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08008210}
8211
8212static void igb_restore_vlan(struct igb_adapter *adapter)
8213{
Alexander Duyck5982a552016-01-06 23:10:54 -08008214 u16 vid = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08008215
Alexander Duyck5faf0302011-08-26 07:46:08 +00008216 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
Alexander Duyck5982a552016-01-06 23:10:54 -08008217 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
Alexander Duyck5faf0302011-08-26 07:46:08 +00008218
Alexander Duyck5982a552016-01-06 23:10:54 -08008219 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00008220 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08008221}
8222
David Decotigny14ad2512011-04-27 18:32:43 +00008223int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08008224{
Alexander Duyck090b1792009-10-27 23:51:55 +00008225 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08008226 struct e1000_mac_info *mac = &adapter->hw.mac;
8227
8228 mac->autoneg = 0;
8229
David Decotigny14ad2512011-04-27 18:32:43 +00008230 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008231 * for the switch() below to work
8232 */
David Decotigny14ad2512011-04-27 18:32:43 +00008233 if ((spd & 1) || (dplx & ~1))
8234 goto err_inval;
8235
Akeem G. Abodunrinf502ef72013-04-05 16:49:06 +00008236 /* Fiber NIC's only allow 1000 gbps Full duplex
8237 * and 100Mbps Full duplex for 100baseFx sfp
8238 */
8239 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8240 switch (spd + dplx) {
8241 case SPEED_10 + DUPLEX_HALF:
8242 case SPEED_10 + DUPLEX_FULL:
8243 case SPEED_100 + DUPLEX_HALF:
8244 goto err_inval;
8245 default:
8246 break;
8247 }
8248 }
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00008249
David Decotigny14ad2512011-04-27 18:32:43 +00008250 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008251 case SPEED_10 + DUPLEX_HALF:
8252 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8253 break;
8254 case SPEED_10 + DUPLEX_FULL:
8255 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8256 break;
8257 case SPEED_100 + DUPLEX_HALF:
8258 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8259 break;
8260 case SPEED_100 + DUPLEX_FULL:
8261 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8262 break;
8263 case SPEED_1000 + DUPLEX_FULL:
8264 mac->autoneg = 1;
8265 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8266 break;
8267 case SPEED_1000 + DUPLEX_HALF: /* not supported */
8268 default:
David Decotigny14ad2512011-04-27 18:32:43 +00008269 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08008270 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00008271
8272 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
8273 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8274
Auke Kok9d5c8242008-01-24 02:22:38 -08008275 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00008276
8277err_inval:
8278 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8279 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08008280}
8281
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008282static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8283 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08008284{
8285 struct net_device *netdev = pci_get_drvdata(pdev);
8286 struct igb_adapter *adapter = netdev_priv(netdev);
8287 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07008288 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008289 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08008290#ifdef CONFIG_PM
8291 int retval = 0;
8292#endif
8293
Todd Fujinaka94749332016-11-15 08:54:26 -08008294 rtnl_lock();
Auke Kok9d5c8242008-01-24 02:22:38 -08008295 netif_device_detach(netdev);
8296
Alexander Duycka88f10e2008-07-08 15:13:38 -07008297 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008298 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07008299
Jacob Keller8646f7b2016-05-24 13:56:31 -07008300 igb_ptp_suspend(adapter);
8301
Alexander Duyck047e0032009-10-27 15:49:27 +00008302 igb_clear_interrupt_scheme(adapter);
Todd Fujinaka94749332016-11-15 08:54:26 -08008303 rtnl_unlock();
Auke Kok9d5c8242008-01-24 02:22:38 -08008304
8305#ifdef CONFIG_PM
8306 retval = pci_save_state(pdev);
8307 if (retval)
8308 return retval;
8309#endif
8310
8311 status = rd32(E1000_STATUS);
8312 if (status & E1000_STATUS_LU)
8313 wufc &= ~E1000_WUFC_LNKC;
8314
8315 if (wufc) {
8316 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008317 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008318
8319 /* turn on all-multi mode if wake on multicast is enabled */
8320 if (wufc & E1000_WUFC_MC) {
8321 rctl = rd32(E1000_RCTL);
8322 rctl |= E1000_RCTL_MPE;
8323 wr32(E1000_RCTL, rctl);
8324 }
8325
8326 ctrl = rd32(E1000_CTRL);
8327 /* advertise wake from D3Cold */
8328 #define E1000_CTRL_ADVD3WUC 0x00100000
8329 /* phy power management enable */
8330 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8331 ctrl |= E1000_CTRL_ADVD3WUC;
8332 wr32(E1000_CTRL, ctrl);
8333
Auke Kok9d5c8242008-01-24 02:22:38 -08008334 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00008335 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08008336
8337 wr32(E1000_WUC, E1000_WUC_PME_EN);
8338 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08008339 } else {
8340 wr32(E1000_WUC, 0);
8341 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08008342 }
8343
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008344 *enable_wake = wufc || adapter->en_mng_pt;
8345 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00008346 igb_power_down_link(adapter);
8347 else
8348 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08008349
8350 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008351 * would have already happened in close and is redundant.
8352 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008353 igb_release_hw_control(adapter);
8354
8355 pci_disable_device(pdev);
8356
Auke Kok9d5c8242008-01-24 02:22:38 -08008357 return 0;
8358}
8359
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008360static void igb_deliver_wake_packet(struct net_device *netdev)
8361{
8362 struct igb_adapter *adapter = netdev_priv(netdev);
8363 struct e1000_hw *hw = &adapter->hw;
8364 struct sk_buff *skb;
8365 u32 wupl;
8366
8367 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8368
8369 /* WUPM stores only the first 128 bytes of the wake packet.
8370 * Read the packet only if we have the whole thing.
8371 */
8372 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8373 return;
8374
8375 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8376 if (!skb)
8377 return;
8378
8379 skb_put(skb, wupl);
8380
8381 /* Ensure reads are 32-bit aligned */
8382 wupl = roundup(wupl, 4);
8383
8384 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8385
8386 skb->protocol = eth_type_trans(skb, netdev);
8387 netif_rx(skb);
8388}
8389
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008390static int __maybe_unused igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008391{
8392 int retval;
8393 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008394 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008395
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008396 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008397 if (retval)
8398 return retval;
8399
8400 if (wake) {
8401 pci_prepare_to_sleep(pdev);
8402 } else {
8403 pci_wake_from_d3(pdev, false);
8404 pci_set_power_state(pdev, PCI_D3hot);
8405 }
8406
8407 return 0;
8408}
8409
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008410static int __maybe_unused igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08008411{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008412 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008413 struct net_device *netdev = pci_get_drvdata(pdev);
8414 struct igb_adapter *adapter = netdev_priv(netdev);
8415 struct e1000_hw *hw = &adapter->hw;
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008416 u32 err, val;
Auke Kok9d5c8242008-01-24 02:22:38 -08008417
8418 pci_set_power_state(pdev, PCI_D0);
8419 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00008420 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09008421
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08008422 if (!pci_device_is_present(pdev))
8423 return -ENODEV;
Alexander Duyckaed5dec2009-02-06 23:16:04 +00008424 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008425 if (err) {
8426 dev_err(&pdev->dev,
8427 "igb: Cannot enable PCI device from suspend\n");
8428 return err;
8429 }
8430 pci_set_master(pdev);
8431
8432 pci_enable_wake(pdev, PCI_D3hot, 0);
8433 pci_enable_wake(pdev, PCI_D3cold, 0);
8434
Stefan Assmann53c7d062012-12-04 06:00:12 +00008435 if (igb_init_interrupt_scheme(adapter, true)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07008436 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8437 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08008438 }
8439
Auke Kok9d5c8242008-01-24 02:22:38 -08008440 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00008441
8442 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008443 * driver.
8444 */
Alexander Duycka8564f02009-02-06 23:21:10 +00008445 igb_get_hw_control(adapter);
8446
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008447 val = rd32(E1000_WUS);
8448 if (val & WAKE_PKT_WUS)
8449 igb_deliver_wake_packet(netdev);
8450
Auke Kok9d5c8242008-01-24 02:22:38 -08008451 wr32(E1000_WUS, ~0);
8452
Todd Fujinaka94749332016-11-15 08:54:26 -08008453 rtnl_lock();
8454 if (!err && netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008455 err = __igb_open(netdev, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08008456
Todd Fujinaka94749332016-11-15 08:54:26 -08008457 if (!err)
8458 netif_device_attach(netdev);
8459 rtnl_unlock();
8460
8461 return err;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008462}
8463
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008464static int __maybe_unused igb_runtime_idle(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008465{
8466 struct pci_dev *pdev = to_pci_dev(dev);
8467 struct net_device *netdev = pci_get_drvdata(pdev);
8468 struct igb_adapter *adapter = netdev_priv(netdev);
8469
8470 if (!igb_has_link(adapter))
8471 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8472
8473 return -EBUSY;
8474}
8475
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008476static int __maybe_unused igb_runtime_suspend(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008477{
8478 struct pci_dev *pdev = to_pci_dev(dev);
8479 int retval;
8480 bool wake;
8481
8482 retval = __igb_shutdown(pdev, &wake, 1);
8483 if (retval)
8484 return retval;
8485
8486 if (wake) {
8487 pci_prepare_to_sleep(pdev);
8488 } else {
8489 pci_wake_from_d3(pdev, false);
8490 pci_set_power_state(pdev, PCI_D3hot);
8491 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008492
Auke Kok9d5c8242008-01-24 02:22:38 -08008493 return 0;
8494}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008495
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008496static int __maybe_unused igb_runtime_resume(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008497{
8498 return igb_resume(dev);
8499}
Auke Kok9d5c8242008-01-24 02:22:38 -08008500
8501static void igb_shutdown(struct pci_dev *pdev)
8502{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008503 bool wake;
8504
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008505 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008506
8507 if (system_state == SYSTEM_POWER_OFF) {
8508 pci_wake_from_d3(pdev, wake);
8509 pci_set_power_state(pdev, PCI_D3hot);
8510 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008511}
8512
Greg Rosefa44f2f2013-01-17 01:03:06 -08008513#ifdef CONFIG_PCI_IOV
8514static int igb_sriov_reinit(struct pci_dev *dev)
8515{
8516 struct net_device *netdev = pci_get_drvdata(dev);
8517 struct igb_adapter *adapter = netdev_priv(netdev);
8518 struct pci_dev *pdev = adapter->pdev;
8519
8520 rtnl_lock();
8521
8522 if (netif_running(netdev))
8523 igb_close(netdev);
Stefan Assmann76252722014-07-10 03:29:39 -07008524 else
8525 igb_reset(adapter);
Greg Rosefa44f2f2013-01-17 01:03:06 -08008526
8527 igb_clear_interrupt_scheme(adapter);
8528
8529 igb_init_queue_configuration(adapter);
8530
8531 if (igb_init_interrupt_scheme(adapter, true)) {
Vasily Averinf468adc2015-07-07 18:53:45 +03008532 rtnl_unlock();
Greg Rosefa44f2f2013-01-17 01:03:06 -08008533 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8534 return -ENOMEM;
8535 }
8536
8537 if (netif_running(netdev))
8538 igb_open(netdev);
8539
8540 rtnl_unlock();
8541
8542 return 0;
8543}
8544
8545static int igb_pci_disable_sriov(struct pci_dev *dev)
8546{
8547 int err = igb_disable_sriov(dev);
8548
8549 if (!err)
8550 err = igb_sriov_reinit(dev);
8551
8552 return err;
8553}
8554
8555static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8556{
8557 int err = igb_enable_sriov(dev, num_vfs);
8558
8559 if (err)
8560 goto out;
8561
8562 err = igb_sriov_reinit(dev);
8563 if (!err)
8564 return num_vfs;
8565
8566out:
8567 return err;
8568}
8569
8570#endif
8571static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8572{
8573#ifdef CONFIG_PCI_IOV
8574 if (num_vfs == 0)
8575 return igb_pci_disable_sriov(dev);
8576 else
8577 return igb_pci_enable_sriov(dev, num_vfs);
8578#endif
8579 return 0;
8580}
8581
Auke Kok9d5c8242008-01-24 02:22:38 -08008582#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008583/* Polling 'interrupt' - used by things like netconsole to send skbs
Auke Kok9d5c8242008-01-24 02:22:38 -08008584 * without having to re-enable interrupts. It's not called while
8585 * the interrupt routine is executing.
8586 */
8587static void igb_netpoll(struct net_device *netdev)
8588{
8589 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00008590 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008591 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08008592 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08008593
Alexander Duyck047e0032009-10-27 15:49:27 +00008594 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008595 q_vector = adapter->q_vector[i];
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00008596 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008597 wr32(E1000_EIMC, q_vector->eims_value);
8598 else
8599 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00008600 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00008601 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008602}
8603#endif /* CONFIG_NET_POLL_CONTROLLER */
8604
8605/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008606 * igb_io_error_detected - called when PCI error is detected
8607 * @pdev: Pointer to PCI device
8608 * @state: The current pci connection state
Auke Kok9d5c8242008-01-24 02:22:38 -08008609 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008610 * This function is called after a PCI bus error affecting
8611 * this device has been detected.
8612 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08008613static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8614 pci_channel_state_t state)
8615{
8616 struct net_device *netdev = pci_get_drvdata(pdev);
8617 struct igb_adapter *adapter = netdev_priv(netdev);
8618
8619 netif_device_detach(netdev);
8620
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00008621 if (state == pci_channel_io_perm_failure)
8622 return PCI_ERS_RESULT_DISCONNECT;
8623
Auke Kok9d5c8242008-01-24 02:22:38 -08008624 if (netif_running(netdev))
8625 igb_down(adapter);
8626 pci_disable_device(pdev);
8627
8628 /* Request a slot slot reset. */
8629 return PCI_ERS_RESULT_NEED_RESET;
8630}
8631
8632/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008633 * igb_io_slot_reset - called after the pci bus has been reset.
8634 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08008635 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008636 * Restart the card from scratch, as if from a cold-boot. Implementation
8637 * resembles the first-half of the igb_resume routine.
8638 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08008639static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
8640{
8641 struct net_device *netdev = pci_get_drvdata(pdev);
8642 struct igb_adapter *adapter = netdev_priv(netdev);
8643 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08008644 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09008645 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08008646
Alexander Duyckaed5dec2009-02-06 23:16:04 +00008647 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008648 dev_err(&pdev->dev,
8649 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08008650 result = PCI_ERS_RESULT_DISCONNECT;
8651 } else {
8652 pci_set_master(pdev);
8653 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00008654 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08008655
8656 pci_enable_wake(pdev, PCI_D3hot, 0);
8657 pci_enable_wake(pdev, PCI_D3cold, 0);
8658
Guilherme G Piccoli69b97cf2016-11-10 16:46:43 -02008659 /* In case of PCI error, adapter lose its HW address
8660 * so we should re-assign it here.
8661 */
8662 hw->hw_addr = adapter->io_addr;
8663
Alexander Duyck40a914f2008-11-27 00:24:37 -08008664 igb_reset(adapter);
8665 wr32(E1000_WUS, ~0);
8666 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08008667 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008668
Jeff Kirsherea943d42008-12-11 20:34:19 -08008669 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8670 if (err) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008671 dev_err(&pdev->dev,
8672 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8673 err);
Jeff Kirsherea943d42008-12-11 20:34:19 -08008674 /* non-fatal, continue */
8675 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008676
Alexander Duyck40a914f2008-11-27 00:24:37 -08008677 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08008678}
8679
8680/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008681 * igb_io_resume - called when traffic can start flowing again.
8682 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08008683 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008684 * This callback is called when the error recovery driver tells us that
8685 * its OK to resume normal operation. Implementation resembles the
8686 * second-half of the igb_resume routine.
Auke Kok9d5c8242008-01-24 02:22:38 -08008687 */
8688static void igb_io_resume(struct pci_dev *pdev)
8689{
8690 struct net_device *netdev = pci_get_drvdata(pdev);
8691 struct igb_adapter *adapter = netdev_priv(netdev);
8692
Auke Kok9d5c8242008-01-24 02:22:38 -08008693 if (netif_running(netdev)) {
8694 if (igb_up(adapter)) {
8695 dev_err(&pdev->dev, "igb_up failed after reset\n");
8696 return;
8697 }
8698 }
8699
8700 netif_device_attach(netdev);
8701
8702 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008703 * driver.
8704 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008705 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08008706}
8707
Yury Kylulin83c21332017-03-07 11:20:25 +03008708/**
8709 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
8710 * @adapter: Pointer to adapter structure
8711 * @index: Index of the RAR entry which need to be synced with MAC table
8712 **/
8713static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
Alexander Duyck26ad9172009-10-05 06:32:49 +00008714{
Alexander Duyck26ad9172009-10-05 06:32:49 +00008715 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc3278582016-01-06 23:10:23 -08008716 u32 rar_low, rar_high;
Yury Kylulin83c21332017-03-07 11:20:25 +03008717 u8 *addr = adapter->mac_table[index].addr;
Alexander Duyck26ad9172009-10-05 06:32:49 +00008718
Alexander Duyck415cd2a2016-03-18 16:06:53 -07008719 /* HW expects these to be in network order when they are plugged
8720 * into the registers which are little endian. In order to guarantee
8721 * that ordering we need to do an leXX_to_cpup here in order to be
8722 * ready for the byteswap that occurs with writel
Alexander Duyck26ad9172009-10-05 06:32:49 +00008723 */
Alexander Duyck415cd2a2016-03-18 16:06:53 -07008724 rar_low = le32_to_cpup((__le32 *)(addr));
8725 rar_high = le16_to_cpup((__le16 *)(addr + 4));
Alexander Duyck26ad9172009-10-05 06:32:49 +00008726
8727 /* Indicate to hardware the Address is Valid. */
Yury Kylulin83c21332017-03-07 11:20:25 +03008728 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
Corinna Vinschen177132d2017-04-10 10:58:14 +02008729 if (is_valid_ether_addr(addr))
8730 rar_high |= E1000_RAH_AV;
Alexander Duyck26ad9172009-10-05 06:32:49 +00008731
Yury Kylulin83c21332017-03-07 11:20:25 +03008732 if (hw->mac.type == e1000_82575)
8733 rar_high |= E1000_RAH_POOL_1 *
8734 adapter->mac_table[index].queue;
8735 else
8736 rar_high |= E1000_RAH_POOL_1 <<
8737 adapter->mac_table[index].queue;
8738 }
Alexander Duyck26ad9172009-10-05 06:32:49 +00008739
8740 wr32(E1000_RAL(index), rar_low);
8741 wrfl();
8742 wr32(E1000_RAH(index), rar_high);
8743 wrfl();
8744}
8745
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008746static int igb_set_vf_mac(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008747 int vf, unsigned char *mac_addr)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008748{
8749 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008750 /* VF MAC addresses start at end of receive addresses and moves
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008751 * towards the first, as a result a collision should not be possible
8752 */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008753 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Yury Kylulin83c21332017-03-07 11:20:25 +03008754 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008755
Yury Kylulin83c21332017-03-07 11:20:25 +03008756 ether_addr_copy(vf_mac_addr, mac_addr);
8757 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
8758 adapter->mac_table[rar_entry].queue = vf;
8759 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
8760 igb_rar_set_index(adapter, rar_entry);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008761
8762 return 0;
8763}
8764
Williams, Mitch A8151d292010-02-10 01:44:24 +00008765static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
8766{
8767 struct igb_adapter *adapter = netdev_priv(netdev);
Corinna Vinschen177132d2017-04-10 10:58:14 +02008768
8769 if (vf >= adapter->vfs_allocated_count)
Williams, Mitch A8151d292010-02-10 01:44:24 +00008770 return -EINVAL;
Corinna Vinschen177132d2017-04-10 10:58:14 +02008771
8772 /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
8773 * flag and allows to overwrite the MAC via VF netdev. This
8774 * is necessary to allow libvirt a way to restore the original
8775 * MAC after unbinding vfio-pci and reloading igbvf after shutting
8776 * down a VM.
8777 */
8778 if (is_zero_ether_addr(mac)) {
8779 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
8780 dev_info(&adapter->pdev->dev,
8781 "remove administratively set MAC on VF %d\n",
8782 vf);
8783 } else if (is_valid_ether_addr(mac)) {
8784 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
8785 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
8786 mac, vf);
8787 dev_info(&adapter->pdev->dev,
8788 "Reload the VF driver to make this change effective.");
8789 /* Generate additional warning if PF is down */
8790 if (test_bit(__IGB_DOWN, &adapter->state)) {
8791 dev_warn(&adapter->pdev->dev,
8792 "The VF MAC address has been set, but the PF device is not up.\n");
8793 dev_warn(&adapter->pdev->dev,
8794 "Bring the PF device up before attempting to use the VF device.\n");
8795 }
8796 } else {
8797 return -EINVAL;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008798 }
8799 return igb_set_vf_mac(adapter, vf, mac);
8800}
8801
Lior Levy17dc5662011-02-08 02:28:46 +00008802static int igb_link_mbps(int internal_link_speed)
8803{
8804 switch (internal_link_speed) {
8805 case SPEED_100:
8806 return 100;
8807 case SPEED_1000:
8808 return 1000;
8809 default:
8810 return 0;
8811 }
8812}
8813
8814static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
8815 int link_speed)
8816{
8817 int rf_dec, rf_int;
8818 u32 bcnrc_val;
8819
8820 if (tx_rate != 0) {
8821 /* Calculate the rate factor values to set */
8822 rf_int = link_speed / tx_rate;
8823 rf_dec = (link_speed - (rf_int * tx_rate));
Jacob Kellera51d8c22016-04-13 16:08:28 -07008824 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008825 tx_rate;
Lior Levy17dc5662011-02-08 02:28:46 +00008826
8827 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008828 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
8829 E1000_RTTBCNRC_RF_INT_MASK);
Lior Levy17dc5662011-02-08 02:28:46 +00008830 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
8831 } else {
8832 bcnrc_val = 0;
8833 }
8834
8835 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008836 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
Lior Levyf00b0da2011-06-04 06:05:03 +00008837 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
8838 */
8839 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00008840 wr32(E1000_RTTBCNRC, bcnrc_val);
8841}
8842
8843static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
8844{
8845 int actual_link_speed, i;
8846 bool reset_rate = false;
8847
8848 /* VF TX rate limit was not set or not supported */
8849 if ((adapter->vf_rate_link_speed == 0) ||
8850 (adapter->hw.mac.type != e1000_82576))
8851 return;
8852
8853 actual_link_speed = igb_link_mbps(adapter->link_speed);
8854 if (actual_link_speed != adapter->vf_rate_link_speed) {
8855 reset_rate = true;
8856 adapter->vf_rate_link_speed = 0;
8857 dev_info(&adapter->pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008858 "Link speed has been changed. VF Transmit rate is disabled\n");
Lior Levy17dc5662011-02-08 02:28:46 +00008859 }
8860
8861 for (i = 0; i < adapter->vfs_allocated_count; i++) {
8862 if (reset_rate)
8863 adapter->vf_data[i].tx_rate = 0;
8864
8865 igb_set_vf_rate_limit(&adapter->hw, i,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008866 adapter->vf_data[i].tx_rate,
8867 actual_link_speed);
Lior Levy17dc5662011-02-08 02:28:46 +00008868 }
8869}
8870
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008871static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
8872 int min_tx_rate, int max_tx_rate)
Williams, Mitch A8151d292010-02-10 01:44:24 +00008873{
Lior Levy17dc5662011-02-08 02:28:46 +00008874 struct igb_adapter *adapter = netdev_priv(netdev);
8875 struct e1000_hw *hw = &adapter->hw;
8876 int actual_link_speed;
8877
8878 if (hw->mac.type != e1000_82576)
8879 return -EOPNOTSUPP;
8880
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008881 if (min_tx_rate)
8882 return -EINVAL;
8883
Lior Levy17dc5662011-02-08 02:28:46 +00008884 actual_link_speed = igb_link_mbps(adapter->link_speed);
8885 if ((vf >= adapter->vfs_allocated_count) ||
8886 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008887 (max_tx_rate < 0) ||
8888 (max_tx_rate > actual_link_speed))
Lior Levy17dc5662011-02-08 02:28:46 +00008889 return -EINVAL;
8890
8891 adapter->vf_rate_link_speed = actual_link_speed;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008892 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
8893 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
Lior Levy17dc5662011-02-08 02:28:46 +00008894
8895 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008896}
8897
Lior Levy70ea4782013-03-03 20:27:48 +00008898static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
8899 bool setting)
8900{
8901 struct igb_adapter *adapter = netdev_priv(netdev);
8902 struct e1000_hw *hw = &adapter->hw;
8903 u32 reg_val, reg_offset;
8904
8905 if (!adapter->vfs_allocated_count)
8906 return -EOPNOTSUPP;
8907
8908 if (vf >= adapter->vfs_allocated_count)
8909 return -EINVAL;
8910
8911 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
8912 reg_val = rd32(reg_offset);
8913 if (setting)
Jacob Kellera51d8c22016-04-13 16:08:28 -07008914 reg_val |= (BIT(vf) |
8915 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
Lior Levy70ea4782013-03-03 20:27:48 +00008916 else
Jacob Kellera51d8c22016-04-13 16:08:28 -07008917 reg_val &= ~(BIT(vf) |
8918 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
Lior Levy70ea4782013-03-03 20:27:48 +00008919 wr32(reg_offset, reg_val);
8920
8921 adapter->vf_data[vf].spoofchk_enabled = setting;
Todd Fujinaka23d87822014-06-04 07:12:15 +00008922 return 0;
Lior Levy70ea4782013-03-03 20:27:48 +00008923}
8924
Williams, Mitch A8151d292010-02-10 01:44:24 +00008925static int igb_ndo_get_vf_config(struct net_device *netdev,
8926 int vf, struct ifla_vf_info *ivi)
8927{
8928 struct igb_adapter *adapter = netdev_priv(netdev);
8929 if (vf >= adapter->vfs_allocated_count)
8930 return -EINVAL;
8931 ivi->vf = vf;
8932 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008933 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
8934 ivi->min_tx_rate = 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008935 ivi->vlan = adapter->vf_data[vf].pf_vlan;
8936 ivi->qos = adapter->vf_data[vf].pf_qos;
Lior Levy70ea4782013-03-03 20:27:48 +00008937 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008938 return 0;
8939}
8940
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008941static void igb_vmm_control(struct igb_adapter *adapter)
8942{
8943 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00008944 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008945
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008946 switch (hw->mac.type) {
8947 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00008948 case e1000_i210:
8949 case e1000_i211:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00008950 case e1000_i354:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008951 default:
8952 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008953 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008954 case e1000_82576:
8955 /* notify HW that the MAC is adding vlan tags */
8956 reg = rd32(E1000_DTXCTL);
8957 reg |= E1000_DTXCTL_VLAN_ADDED;
8958 wr32(E1000_DTXCTL, reg);
Carolyn Wybornyb26141d2014-04-17 04:10:13 +00008959 /* Fall through */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008960 case e1000_82580:
8961 /* enable replication vlan tag stripping */
8962 reg = rd32(E1000_RPLOLR);
8963 reg |= E1000_RPLOLR_STRVLAN;
8964 wr32(E1000_RPLOLR, reg);
Carolyn Wybornyb26141d2014-04-17 04:10:13 +00008965 /* Fall through */
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00008966 case e1000_i350:
8967 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008968 break;
8969 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00008970
Alexander Duyckd4960302009-10-27 15:53:45 +00008971 if (adapter->vfs_allocated_count) {
8972 igb_vmdq_set_loopback_pf(hw, true);
8973 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00008974 igb_vmdq_set_anti_spoofing_pf(hw, true,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008975 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00008976 } else {
8977 igb_vmdq_set_loopback_pf(hw, false);
8978 igb_vmdq_set_replication_pf(hw, false);
8979 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008980}
8981
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00008982static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8983{
8984 struct e1000_hw *hw = &adapter->hw;
8985 u32 dmac_thr;
8986 u16 hwm;
8987
8988 if (hw->mac.type > e1000_82580) {
8989 if (adapter->flags & IGB_FLAG_DMAC) {
8990 u32 reg;
8991
8992 /* force threshold to 0. */
8993 wr32(E1000_DMCTXTH, 0);
8994
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008995 /* DMA Coalescing high water mark needs to be greater
Matthew Vicke8c626e2011-11-17 08:33:12 +00008996 * than the Rx threshold. Set hwm to PBA - max frame
8997 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00008998 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08008999 hwm = 64 * (pba - 6);
Matthew Vicke8c626e2011-11-17 08:33:12 +00009000 reg = rd32(E1000_FCRTC);
9001 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9002 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9003 & E1000_FCRTC_RTH_COAL_MASK);
9004 wr32(E1000_FCRTC, reg);
9005
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009006 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
Matthew Vicke8c626e2011-11-17 08:33:12 +00009007 * frame size, capping it at PBA - 10KB.
9008 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08009009 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009010 reg = rd32(E1000_DMACR);
9011 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009012 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9013 & E1000_DMACR_DMACTHR_MASK);
9014
9015 /* transition to L0x or L1 if available..*/
9016 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9017
9018 /* watchdog timer= +-1000 usec in 32usec intervals */
9019 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00009020
9021 /* Disable BMC-to-OS Watchdog Enable */
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00009022 if (hw->mac.type != e1000_i354)
9023 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9024
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009025 wr32(E1000_DMACR, reg);
9026
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009027 /* no lower threshold to disable
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009028 * coalescing(smart fifb)-UTRESH=0
9029 */
9030 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009031
9032 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9033
9034 wr32(E1000_DMCTLX, reg);
9035
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009036 /* free space in tx packet buffer to wake from
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009037 * DMA coal
9038 */
9039 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9040 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9041
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009042 /* make low power state decision controlled
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009043 * by DMA coal
9044 */
9045 reg = rd32(E1000_PCIEMISC);
9046 reg &= ~E1000_PCIEMISC_LX_DECISION;
9047 wr32(E1000_PCIEMISC, reg);
9048 } /* endif adapter->dmac is not disabled */
9049 } else if (hw->mac.type == e1000_82580) {
9050 u32 reg = rd32(E1000_PCIEMISC);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00009051
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009052 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9053 wr32(E1000_DMACR, 0);
9054 }
9055}
9056
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009057/**
9058 * igb_read_i2c_byte - Reads 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009059 * @hw: pointer to hardware structure
9060 * @byte_offset: byte offset to read
9061 * @dev_addr: device address
9062 * @data: value read
9063 *
9064 * Performs byte read operation over I2C interface at
9065 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009066 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009067s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009068 u8 dev_addr, u8 *data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009069{
9070 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00009071 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009072 s32 status;
9073 u16 swfw_mask = 0;
9074
9075 if (!this_client)
9076 return E1000_ERR_I2C;
9077
9078 swfw_mask = E1000_SWFW_PHY0_SM;
9079
Todd Fujinaka23d87822014-06-04 07:12:15 +00009080 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009081 return E1000_ERR_SWFW_SYNC;
9082
9083 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9084 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9085
9086 if (status < 0)
9087 return E1000_ERR_I2C;
9088 else {
9089 *data = status;
Todd Fujinaka23d87822014-06-04 07:12:15 +00009090 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009091 }
9092}
9093
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009094/**
9095 * igb_write_i2c_byte - Writes 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009096 * @hw: pointer to hardware structure
9097 * @byte_offset: byte offset to write
9098 * @dev_addr: device address
9099 * @data: value to write
9100 *
9101 * Performs byte write operation over I2C interface at
9102 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009103 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009104s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009105 u8 dev_addr, u8 data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009106{
9107 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00009108 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009109 s32 status;
9110 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9111
9112 if (!this_client)
9113 return E1000_ERR_I2C;
9114
Todd Fujinaka23d87822014-06-04 07:12:15 +00009115 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009116 return E1000_ERR_SWFW_SYNC;
9117 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9118 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9119
9120 if (status)
9121 return E1000_ERR_I2C;
9122 else
Todd Fujinaka23d87822014-06-04 07:12:15 +00009123 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009124
9125}
Laura Mihaela Vasilescu907b7832013-10-01 04:33:56 -07009126
9127int igb_reinit_queues(struct igb_adapter *adapter)
9128{
9129 struct net_device *netdev = adapter->netdev;
9130 struct pci_dev *pdev = adapter->pdev;
9131 int err = 0;
9132
9133 if (netif_running(netdev))
9134 igb_close(netdev);
9135
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00009136 igb_reset_interrupt_capability(adapter);
Laura Mihaela Vasilescu907b7832013-10-01 04:33:56 -07009137
9138 if (igb_init_interrupt_scheme(adapter, true)) {
9139 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9140 return -ENOMEM;
9141 }
9142
9143 if (netif_running(netdev))
9144 err = igb_open(netdev);
9145
9146 return err;
9147}
Gangfeng Huang0e71def2016-07-06 13:22:54 +08009148
9149static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9150{
9151 struct igb_nfc_filter *rule;
9152
9153 spin_lock(&adapter->nfc_lock);
9154
9155 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9156 igb_erase_filter(adapter, rule);
9157
9158 spin_unlock(&adapter->nfc_lock);
9159}
9160
9161static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9162{
9163 struct igb_nfc_filter *rule;
9164
9165 spin_lock(&adapter->nfc_lock);
9166
9167 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9168 igb_add_filter(adapter, rule);
9169
9170 spin_unlock(&adapter->nfc_lock);
9171}
Auke Kok9d5c8242008-01-24 02:22:38 -08009172/* igb_main.c */