blob: 229b72aab17dbedc38f1f68f10fab8dc5b6b2b90 [file] [log] [blame]
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00001/* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
Auke Kok9d5c8242008-01-24 02:22:38 -080023
Jeff Kirsher876d2d62011-10-21 20:01:34 +000024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
Auke Kok9d5c8242008-01-24 02:22:38 -080026#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000029#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/vmalloc.h>
31#include <linux/pagemap.h>
32#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080033#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <net/checksum.h>
36#include <net/ip6_checksum.h>
Andre Guedes05f9d3e2017-10-16 18:01:28 -070037#include <net/pkt_sched.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000041#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/if_vlan.h>
43#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070044#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/delay.h>
46#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000047#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080050#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080051#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040052#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000053#include <linux/pm_runtime.h>
John Holland806ffb12016-02-18 12:10:52 +010054#include <linux/etherdevice.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070055#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070056#include <linux/dca.h>
57#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +000058#include <linux/i2c.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080059#include "igb.h"
60
Carolyn Wyborny67b1b902013-04-17 16:44:53 +000061#define MAJ 5
Todd Fujinaka07423372016-08-24 08:40:23 -070062#define MIN 4
Todd Fujinaka6fb46902015-05-20 15:40:20 -070063#define BUILD 0
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080064#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000065__stringify(BUILD) "-k"
Andre Guedes05f9d3e2017-10-16 18:01:28 -070066
67enum queue_mode {
68 QUEUE_MODE_STRICT_PRIORITY,
69 QUEUE_MODE_STREAM_RESERVATION,
70};
71
72enum tx_queue_prio {
73 TX_QUEUE_PRIO_HIGH,
74 TX_QUEUE_PRIO_LOW,
75};
76
Auke Kok9d5c8242008-01-24 02:22:38 -080077char igb_driver_name[] = "igb";
78char igb_driver_version[] = DRV_VERSION;
79static const char igb_driver_string[] =
80 "Intel(R) Gigabit Ethernet Network Driver";
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +000081static const char igb_copyright[] =
Carolyn Wyborny74cfb2e2014-02-25 17:58:57 -080082 "Copyright (c) 2007-2014 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080083
Auke Kok9d5c8242008-01-24 02:22:38 -080084static const struct e1000_info *igb_info_tbl[] = {
85 [board_82575] = &e1000_82575_info,
86};
87
Carolyn Wybornycd1631c2014-04-11 01:47:08 +000088static const struct pci_device_id igb_pci_tbl[] = {
Carolyn Wybornyceb5f132013-04-18 22:21:30 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Carolyn Wyborny53b87ce2013-07-16 19:18:36 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +0000103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +0000105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +0000106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +0000109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +0000111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +0000114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +0000115 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700116 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
117 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000119 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000120 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800121 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
122 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
123 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
124 /* required last entry */
125 {0, }
126};
127
128MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
129
Auke Kok9d5c8242008-01-24 02:22:38 -0800130static int igb_setup_all_tx_resources(struct igb_adapter *);
131static int igb_setup_all_rx_resources(struct igb_adapter *);
132static void igb_free_all_tx_resources(struct igb_adapter *);
133static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000134static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800135static int igb_probe(struct pci_dev *, const struct pci_device_id *);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500136static void igb_remove(struct pci_dev *pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -0800137static int igb_sw_init(struct igb_adapter *);
Stefan Assmann46eafa52016-02-03 09:20:50 +0100138int igb_open(struct net_device *);
139int igb_close(struct net_device *);
Stefan Assmann53c7d062012-12-04 06:00:12 +0000140static void igb_configure(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static void igb_configure_tx(struct igb_adapter *);
142static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800143static void igb_clean_all_tx_rings(struct igb_adapter *);
144static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700145static void igb_clean_tx_ring(struct igb_ring *);
146static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000147static void igb_set_rx_mode(struct net_device *);
Kees Cook26566ea2017-10-16 17:29:35 -0700148static void igb_update_phy_info(struct timer_list *);
149static void igb_watchdog(struct timer_list *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800150static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000151static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800152static void igb_get_stats64(struct net_device *dev,
153 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800154static int igb_change_mtu(struct net_device *, int);
155static int igb_set_mac(struct net_device *, void *);
Alexander Duyckbf456ab2016-01-06 23:11:43 -0800156static void igb_set_uta(struct igb_adapter *adapter, bool set);
Auke Kok9d5c8242008-01-24 02:22:38 -0800157static irqreturn_t igb_intr(int irq, void *);
158static irqreturn_t igb_intr_msi(int irq, void *);
159static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000160static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700161#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000162static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700163static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700164#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700165static int igb_poll(struct napi_struct *, int);
Alexander Duyck7f0ba842016-03-07 09:30:21 -0800166static bool igb_clean_tx_irq(struct igb_q_vector *, int);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -0700167static int igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
169static void igb_tx_timeout(struct net_device *);
170static void igb_reset_task(struct work_struct *);
Carolyn Wybornyc502ea22014-04-11 01:46:33 +0000171static void igb_vlan_mode(struct net_device *netdev,
172 netdev_features_t features);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000173static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
174static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800175static void igb_restore_vlan(struct igb_adapter *);
Yury Kylulin83c21332017-03-07 11:20:25 +0300176static void igb_rar_set_index(struct igb_adapter *, u32);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800177static void igb_ping_all_vfs(struct igb_adapter *);
178static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800179static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000180static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Yury Kylulin83c21332017-03-07 11:20:25 +0300181static void igb_flush_mac_table(struct igb_adapter *);
182static int igb_available_rars(struct igb_adapter *, u8);
183static void igb_set_default_mac_filter(struct igb_adapter *);
184static int igb_uc_sync(struct net_device *, const unsigned char *);
185static int igb_uc_unsync(struct net_device *, const unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800186static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000187static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
188static int igb_ndo_set_vf_vlan(struct net_device *netdev,
Moshe Shemesh79aab092016-09-22 12:11:15 +0300189 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -0400190static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
Lior Levy70ea4782013-03-03 20:27:48 +0000191static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
192 bool setting);
Corinna Vinschen1b8b0622018-01-17 11:53:39 +0100193static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
194 bool setting);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000195static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
196 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000197static void igb_check_vf_rate_limit(struct igb_adapter *);
Gangfeng Huang0e71def2016-07-06 13:22:54 +0800198static void igb_nfc_filter_exit(struct igb_adapter *adapter);
199static void igb_nfc_filter_restore(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000200
201#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000202static int igb_vf_configure(struct igb_adapter *adapter, int vf);
Stefan Assmann781798a2013-09-24 05:18:39 +0000203static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
Todd Fujinakaceee3452015-08-07 17:27:39 -0700204static int igb_disable_sriov(struct pci_dev *dev);
205static int igb_pci_disable_sriov(struct pci_dev *dev);
RongQing Li46a01692011-10-18 22:52:35 +0000206#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800207
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000208static int igb_suspend(struct device *);
209static int igb_resume(struct device *);
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000210static int igb_runtime_suspend(struct device *dev);
211static int igb_runtime_resume(struct device *dev);
212static int igb_runtime_idle(struct device *dev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000213static const struct dev_pm_ops igb_pm_ops = {
214 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
215 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
216 igb_runtime_idle)
217};
Auke Kok9d5c8242008-01-24 02:22:38 -0800218static void igb_shutdown(struct pci_dev *);
Greg Rosefa44f2f2013-01-17 01:03:06 -0800219static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700220#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700221static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
222static struct notifier_block dca_notifier = {
223 .notifier_call = igb_notify_dca,
224 .next = NULL,
225 .priority = 0
226};
227#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800228#ifdef CONFIG_NET_POLL_CONTROLLER
229/* for netdump / net console */
230static void igb_netpoll(struct net_device *);
231#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800232#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6dd6d2b2014-04-11 01:46:48 +0000233static unsigned int max_vfs;
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000234module_param(max_vfs, uint, 0);
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000235MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000236#endif /* CONFIG_PCI_IOV */
237
Auke Kok9d5c8242008-01-24 02:22:38 -0800238static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
239 pci_channel_state_t);
240static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
241static void igb_io_resume(struct pci_dev *);
242
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700243static const struct pci_error_handlers igb_err_handler = {
Auke Kok9d5c8242008-01-24 02:22:38 -0800244 .error_detected = igb_io_error_detected,
245 .slot_reset = igb_io_slot_reset,
246 .resume = igb_io_resume,
247};
248
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000249static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800250
251static struct pci_driver igb_driver = {
252 .name = igb_driver_name,
253 .id_table = igb_pci_tbl,
254 .probe = igb_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500255 .remove = igb_remove,
Auke Kok9d5c8242008-01-24 02:22:38 -0800256#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000257 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800258#endif
259 .shutdown = igb_shutdown,
Greg Rosefa44f2f2013-01-17 01:03:06 -0800260 .sriov_configure = igb_pci_sriov_configure,
Auke Kok9d5c8242008-01-24 02:22:38 -0800261 .err_handler = &igb_err_handler
262};
263
264MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
265MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
266MODULE_LICENSE("GPL");
267MODULE_VERSION(DRV_VERSION);
268
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000269#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
270static int debug = -1;
271module_param(debug, int, 0);
272MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
273
Taku Izumic97ec422010-04-27 14:39:30 +0000274struct igb_reg_info {
275 u32 ofs;
276 char *name;
277};
278
279static const struct igb_reg_info igb_reg_info_tbl[] = {
280
281 /* General Registers */
282 {E1000_CTRL, "CTRL"},
283 {E1000_STATUS, "STATUS"},
284 {E1000_CTRL_EXT, "CTRL_EXT"},
285
286 /* Interrupt Registers */
287 {E1000_ICR, "ICR"},
288
289 /* RX Registers */
290 {E1000_RCTL, "RCTL"},
291 {E1000_RDLEN(0), "RDLEN"},
292 {E1000_RDH(0), "RDH"},
293 {E1000_RDT(0), "RDT"},
294 {E1000_RXDCTL(0), "RXDCTL"},
295 {E1000_RDBAL(0), "RDBAL"},
296 {E1000_RDBAH(0), "RDBAH"},
297
298 /* TX Registers */
299 {E1000_TCTL, "TCTL"},
300 {E1000_TDBAL(0), "TDBAL"},
301 {E1000_TDBAH(0), "TDBAH"},
302 {E1000_TDLEN(0), "TDLEN"},
303 {E1000_TDH(0), "TDH"},
304 {E1000_TDT(0), "TDT"},
305 {E1000_TXDCTL(0), "TXDCTL"},
306 {E1000_TDFH, "TDFH"},
307 {E1000_TDFT, "TDFT"},
308 {E1000_TDFHS, "TDFHS"},
309 {E1000_TDFPC, "TDFPC"},
310
311 /* List Terminator */
312 {}
313};
314
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000315/* igb_regdump - register printout routine */
Taku Izumic97ec422010-04-27 14:39:30 +0000316static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
317{
318 int n = 0;
319 char rname[16];
320 u32 regs[8];
321
322 switch (reginfo->ofs) {
323 case E1000_RDLEN(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_RDLEN(n));
326 break;
327 case E1000_RDH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_RDH(n));
330 break;
331 case E1000_RDT(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_RDT(n));
334 break;
335 case E1000_RXDCTL(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_RXDCTL(n));
338 break;
339 case E1000_RDBAL(0):
340 for (n = 0; n < 4; n++)
341 regs[n] = rd32(E1000_RDBAL(n));
342 break;
343 case E1000_RDBAH(0):
344 for (n = 0; n < 4; n++)
345 regs[n] = rd32(E1000_RDBAH(n));
346 break;
347 case E1000_TDBAL(0):
348 for (n = 0; n < 4; n++)
349 regs[n] = rd32(E1000_RDBAL(n));
350 break;
351 case E1000_TDBAH(0):
352 for (n = 0; n < 4; n++)
353 regs[n] = rd32(E1000_TDBAH(n));
354 break;
355 case E1000_TDLEN(0):
356 for (n = 0; n < 4; n++)
357 regs[n] = rd32(E1000_TDLEN(n));
358 break;
359 case E1000_TDH(0):
360 for (n = 0; n < 4; n++)
361 regs[n] = rd32(E1000_TDH(n));
362 break;
363 case E1000_TDT(0):
364 for (n = 0; n < 4; n++)
365 regs[n] = rd32(E1000_TDT(n));
366 break;
367 case E1000_TXDCTL(0):
368 for (n = 0; n < 4; n++)
369 regs[n] = rd32(E1000_TXDCTL(n));
370 break;
371 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000372 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000373 return;
374 }
375
376 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000377 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
378 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000379}
380
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000381/* igb_dump - Print registers, Tx-rings and Rx-rings */
Taku Izumic97ec422010-04-27 14:39:30 +0000382static void igb_dump(struct igb_adapter *adapter)
383{
384 struct net_device *netdev = adapter->netdev;
385 struct e1000_hw *hw = &adapter->hw;
386 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000387 struct igb_ring *tx_ring;
388 union e1000_adv_tx_desc *tx_desc;
389 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000390 struct igb_ring *rx_ring;
391 union e1000_adv_rx_desc *rx_desc;
392 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000393 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000394
395 if (!netif_msg_hw(adapter))
396 return;
397
398 /* Print netdevice Info */
399 if (netdev) {
400 dev_info(&adapter->pdev->dev, "Net device Info\n");
Tobias Klauser4a7c9722017-01-18 17:45:01 +0100401 pr_info("Device Name state trans_start\n");
402 pr_info("%-15s %016lX %016lX\n", netdev->name,
403 netdev->state, dev_trans_start(netdev));
Taku Izumic97ec422010-04-27 14:39:30 +0000404 }
405
406 /* Print Registers */
407 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000408 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000409 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
410 reginfo->name; reginfo++) {
411 igb_regdump(hw, reginfo);
412 }
413
414 /* Print TX Ring Summary */
415 if (!netdev || !netif_running(netdev))
416 goto exit;
417
418 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000419 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000420 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000421 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000422 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000423 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000424 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
425 n, tx_ring->next_to_use, tx_ring->next_to_clean,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000426 (u64)dma_unmap_addr(buffer_info, dma),
427 dma_unmap_len(buffer_info, len),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000428 buffer_info->next_to_watch,
429 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000430 }
431
432 /* Print TX Rings */
433 if (!netif_msg_tx_done(adapter))
434 goto rx_ring_summary;
435
436 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
437
438 /* Transmit Descriptor Formats
439 *
440 * Advanced Transmit Descriptor
441 * +--------------------------------------------------------------+
442 * 0 | Buffer Address [63:0] |
443 * +--------------------------------------------------------------+
444 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
445 * +--------------------------------------------------------------+
446 * 63 46 45 40 39 38 36 35 32 31 24 15 0
447 */
448
449 for (n = 0; n < adapter->num_tx_queues; n++) {
450 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000451 pr_info("------------------------------------\n");
452 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
453 pr_info("------------------------------------\n");
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000454 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000455
456 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000457 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000458 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000459 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000460 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000461 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000462 if (i == tx_ring->next_to_use &&
463 i == tx_ring->next_to_clean)
464 next_desc = " NTC/U";
465 else if (i == tx_ring->next_to_use)
466 next_desc = " NTU";
467 else if (i == tx_ring->next_to_clean)
468 next_desc = " NTC";
469 else
470 next_desc = "";
471
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000472 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
473 i, le64_to_cpu(u0->a),
Taku Izumic97ec422010-04-27 14:39:30 +0000474 le64_to_cpu(u0->b),
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000475 (u64)dma_unmap_addr(buffer_info, dma),
476 dma_unmap_len(buffer_info, len),
Taku Izumic97ec422010-04-27 14:39:30 +0000477 buffer_info->next_to_watch,
478 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000479 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000480
Emil Tantilovb6695882012-07-28 05:07:48 +0000481 if (netif_msg_pktdata(adapter) && buffer_info->skb)
Taku Izumic97ec422010-04-27 14:39:30 +0000482 print_hex_dump(KERN_INFO, "",
483 DUMP_PREFIX_ADDRESS,
Emil Tantilovb6695882012-07-28 05:07:48 +0000484 16, 1, buffer_info->skb->data,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000485 dma_unmap_len(buffer_info, len),
486 true);
Taku Izumic97ec422010-04-27 14:39:30 +0000487 }
488 }
489
490 /* Print RX Rings Summary */
491rx_ring_summary:
492 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000493 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000494 for (n = 0; n < adapter->num_rx_queues; n++) {
495 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000496 pr_info(" %5d %5X %5X\n",
497 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000498 }
499
500 /* Print RX Rings */
501 if (!netif_msg_rx_status(adapter))
502 goto exit;
503
504 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
505
506 /* Advanced Receive Descriptor (Read) Format
507 * 63 1 0
508 * +-----------------------------------------------------+
509 * 0 | Packet Buffer Address [63:1] |A0/NSE|
510 * +----------------------------------------------+------+
511 * 8 | Header Buffer Address [63:1] | DD |
512 * +-----------------------------------------------------+
513 *
514 *
515 * Advanced Receive Descriptor (Write-Back) Format
516 *
517 * 63 48 47 32 31 30 21 20 17 16 4 3 0
518 * +------------------------------------------------------+
519 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
520 * | Checksum Ident | | | | Type | Type |
521 * +------------------------------------------------------+
522 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
523 * +------------------------------------------------------+
524 * 63 48 47 32 31 20 19 0
525 */
526
527 for (n = 0; n < adapter->num_rx_queues; n++) {
528 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000529 pr_info("------------------------------------\n");
530 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
531 pr_info("------------------------------------\n");
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +0000532 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
533 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000534
535 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000536 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000537 struct igb_rx_buffer *buffer_info;
538 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000539 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000540 u0 = (struct my_u0 *)rx_desc;
541 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000542
543 if (i == rx_ring->next_to_use)
544 next_desc = " NTU";
545 else if (i == rx_ring->next_to_clean)
546 next_desc = " NTC";
547 else
548 next_desc = "";
549
Taku Izumic97ec422010-04-27 14:39:30 +0000550 if (staterr & E1000_RXD_STAT_DD) {
551 /* Descriptor Done */
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000552 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
553 "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000554 le64_to_cpu(u0->a),
555 le64_to_cpu(u0->b),
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000556 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000557 } else {
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000558 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
559 "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000560 le64_to_cpu(u0->a),
561 le64_to_cpu(u0->b),
562 (u64)buffer_info->dma,
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000563 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000564
Emil Tantilovb6695882012-07-28 05:07:48 +0000565 if (netif_msg_pktdata(adapter) &&
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000566 buffer_info->dma && buffer_info->page) {
Alexander Duyck44390ca2011-08-26 07:43:38 +0000567 print_hex_dump(KERN_INFO, "",
568 DUMP_PREFIX_ADDRESS,
569 16, 1,
Emil Tantilovb6695882012-07-28 05:07:48 +0000570 page_address(buffer_info->page) +
571 buffer_info->page_offset,
Alexander Duyck8649aae2017-02-06 18:27:03 -0800572 igb_rx_bufsz(rx_ring), true);
Taku Izumic97ec422010-04-27 14:39:30 +0000573 }
574 }
Taku Izumic97ec422010-04-27 14:39:30 +0000575 }
576 }
577
578exit:
579 return;
580}
581
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000582/**
583 * igb_get_i2c_data - Reads the I2C SDA data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000584 * @hw: pointer to hardware structure
585 * @i2cctl: Current value of I2CCTL register
586 *
587 * Returns the I2C data bit value
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000588 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000589static int igb_get_i2c_data(void *data)
590{
591 struct igb_adapter *adapter = (struct igb_adapter *)data;
592 struct e1000_hw *hw = &adapter->hw;
593 s32 i2cctl = rd32(E1000_I2CPARAMS);
594
Carolyn Wybornyda1f1df2014-04-11 02:11:17 +0000595 return !!(i2cctl & E1000_I2C_DATA_IN);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000596}
597
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000598/**
599 * igb_set_i2c_data - Sets the I2C data bit
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000600 * @data: pointer to hardware structure
601 * @state: I2C data value (0 or 1) to set
602 *
603 * Sets the I2C data bit
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000604 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000605static void igb_set_i2c_data(void *data, int state)
606{
607 struct igb_adapter *adapter = (struct igb_adapter *)data;
608 struct e1000_hw *hw = &adapter->hw;
609 s32 i2cctl = rd32(E1000_I2CPARAMS);
610
611 if (state)
612 i2cctl |= E1000_I2C_DATA_OUT;
613 else
614 i2cctl &= ~E1000_I2C_DATA_OUT;
615
616 i2cctl &= ~E1000_I2C_DATA_OE_N;
617 i2cctl |= E1000_I2C_CLK_OE_N;
618 wr32(E1000_I2CPARAMS, i2cctl);
619 wrfl();
620
621}
622
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000623/**
624 * igb_set_i2c_clk - Sets the I2C SCL clock
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000625 * @data: pointer to hardware structure
626 * @state: state to set clock
627 *
628 * Sets the I2C clock line to state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000629 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000630static void igb_set_i2c_clk(void *data, int state)
631{
632 struct igb_adapter *adapter = (struct igb_adapter *)data;
633 struct e1000_hw *hw = &adapter->hw;
634 s32 i2cctl = rd32(E1000_I2CPARAMS);
635
636 if (state) {
637 i2cctl |= E1000_I2C_CLK_OUT;
638 i2cctl &= ~E1000_I2C_CLK_OE_N;
639 } else {
640 i2cctl &= ~E1000_I2C_CLK_OUT;
641 i2cctl &= ~E1000_I2C_CLK_OE_N;
642 }
643 wr32(E1000_I2CPARAMS, i2cctl);
644 wrfl();
645}
646
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000647/**
648 * igb_get_i2c_clk - Gets the I2C SCL clock state
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000649 * @data: pointer to hardware structure
650 *
651 * Gets the I2C clock state
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000652 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000653static int igb_get_i2c_clk(void *data)
654{
655 struct igb_adapter *adapter = (struct igb_adapter *)data;
656 struct e1000_hw *hw = &adapter->hw;
657 s32 i2cctl = rd32(E1000_I2CPARAMS);
658
Carolyn Wybornyda1f1df2014-04-11 02:11:17 +0000659 return !!(i2cctl & E1000_I2C_CLK_IN);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000660}
661
662static const struct i2c_algo_bit_data igb_i2c_algo = {
663 .setsda = igb_set_i2c_data,
664 .setscl = igb_set_i2c_clk,
665 .getsda = igb_get_i2c_data,
666 .getscl = igb_get_i2c_clk,
667 .udelay = 5,
668 .timeout = 20,
669};
670
Auke Kok9d5c8242008-01-24 02:22:38 -0800671/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000672 * igb_get_hw_dev - return device
673 * @hw: pointer to hardware structure
674 *
675 * used by hardware layer to print debugging information
Auke Kok9d5c8242008-01-24 02:22:38 -0800676 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000677struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800678{
679 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000680 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800681}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000682
683/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000684 * igb_init_module - Driver Registration Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800685 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000686 * igb_init_module is the first routine called when the driver is
687 * loaded. All it does is register with the PCI subsystem.
Auke Kok9d5c8242008-01-24 02:22:38 -0800688 **/
689static int __init igb_init_module(void)
690{
691 int ret;
Carolyn Wyborny9005df32014-04-11 01:45:34 +0000692
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000693 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800694 igb_driver_string, igb_driver_version);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000695 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800696
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700697#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700698 dca_register_notify(&dca_notifier);
699#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800700 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800701 return ret;
702}
703
704module_init(igb_init_module);
705
706/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000707 * igb_exit_module - Driver Exit Cleanup Routine
Auke Kok9d5c8242008-01-24 02:22:38 -0800708 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000709 * igb_exit_module is called just before the driver is removed
710 * from memory.
Auke Kok9d5c8242008-01-24 02:22:38 -0800711 **/
712static void __exit igb_exit_module(void)
713{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700714#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700715 dca_unregister_notify(&dca_notifier);
716#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 pci_unregister_driver(&igb_driver);
718}
719
720module_exit(igb_exit_module);
721
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800722#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
723/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000724 * igb_cache_ring_register - Descriptor ring to register mapping
725 * @adapter: board private structure to initialize
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800726 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000727 * Once we know the feature-set enabled for the device, we'll cache
728 * the register offset the descriptor ring is assigned to.
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800729 **/
730static void igb_cache_ring_register(struct igb_adapter *adapter)
731{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000732 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000733 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800734
735 switch (adapter->hw.mac.type) {
736 case e1000_82576:
737 /* The queues are allocated for virtualization such that VF 0
738 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
739 * In order to avoid collision we start at the first free queue
740 * and continue consuming queues in the same sequence
741 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000742 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000743 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000744 adapter->rx_ring[i]->reg_idx = rbase_offset +
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000745 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000746 }
Carolyn Wybornyb26141d2014-04-17 04:10:13 +0000747 /* Fall through */
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800748 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000749 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000750 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000751 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000752 case e1000_i210:
753 case e1000_i211:
Carolyn Wybornyb26141d2014-04-17 04:10:13 +0000754 /* Fall through */
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800755 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000756 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000757 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000758 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000759 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800760 break;
761 }
762}
763
Fujinaka, Todd22a8b292014-03-13 04:29:01 +0000764u32 igb_rd32(struct e1000_hw *hw, u32 reg)
765{
766 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
Mark Rutland6aa7de02017-10-23 14:07:29 -0700767 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
Fujinaka, Todd22a8b292014-03-13 04:29:01 +0000768 u32 value = 0;
769
770 if (E1000_REMOVED(hw_addr))
771 return ~value;
772
773 value = readl(&hw_addr[reg]);
774
775 /* reads should not return all F's */
776 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
777 struct net_device *netdev = igb->netdev;
778 hw->hw_addr = NULL;
Mika Westerberg17a0b9a2018-01-23 13:28:41 +0300779 netdev_err(netdev, "PCIe link lost\n");
Fujinaka, Todd22a8b292014-03-13 04:29:01 +0000780 }
781
782 return value;
783}
784
Alexander Duyck4be000c2011-08-26 07:45:52 +0000785/**
786 * igb_write_ivar - configure ivar for given MSI-X vector
787 * @hw: pointer to the HW structure
788 * @msix_vector: vector number we are allocating to a given ring
789 * @index: row index of IVAR register to write within IVAR table
790 * @offset: column offset of in IVAR, should be multiple of 8
791 *
792 * This function is intended to handle the writing of the IVAR register
793 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
794 * each containing an cause allocation for an Rx and Tx ring, and a
795 * variable number of rows depending on the number of queues supported.
796 **/
797static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
798 int index, int offset)
799{
800 u32 ivar = array_rd32(E1000_IVAR0, index);
801
802 /* clear any bits that are currently set */
803 ivar &= ~((u32)0xFF << offset);
804
805 /* write vector and valid bit */
806 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
807
808 array_wr32(E1000_IVAR0, index, ivar);
809}
810
Auke Kok9d5c8242008-01-24 02:22:38 -0800811#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000812static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800813{
Alexander Duyck047e0032009-10-27 15:49:27 +0000814 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800815 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000816 int rx_queue = IGB_N0_QUEUE;
817 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000818 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000819
Alexander Duyck0ba82992011-08-26 07:45:47 +0000820 if (q_vector->rx.ring)
821 rx_queue = q_vector->rx.ring->reg_idx;
822 if (q_vector->tx.ring)
823 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700824
825 switch (hw->mac.type) {
826 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800827 /* The 82575 assigns vectors using a bitmask, which matches the
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000828 * bitmask for the EICR/EIMS/EIMC registers. To assign one
829 * or more queues to a vector, we write the appropriate bits
830 * into the MSIXBM register for that vector.
831 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000832 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800833 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000834 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800835 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Carolyn Wybornycd14ef52013-12-10 07:58:34 +0000836 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000837 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800838 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000839 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700840 break;
841 case e1000_82576:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000842 /* 82576 uses a table that essentially consists of 2 columns
Alexander Duyck4be000c2011-08-26 07:45:52 +0000843 * with 8 rows. The ordering is column-major so we use the
844 * lower 3 bits as the row index, and the 4th bit as the
845 * column offset.
846 */
847 if (rx_queue > IGB_N0_QUEUE)
848 igb_write_ivar(hw, msix_vector,
849 rx_queue & 0x7,
850 (rx_queue & 0x8) << 1);
851 if (tx_queue > IGB_N0_QUEUE)
852 igb_write_ivar(hw, msix_vector,
853 tx_queue & 0x7,
854 ((tx_queue & 0x8) << 1) + 8);
Jacob Kellera51d8c22016-04-13 16:08:28 -0700855 q_vector->eims_value = BIT(msix_vector);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700856 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000857 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000858 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000859 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000860 case e1000_i210:
861 case e1000_i211:
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000862 /* On 82580 and newer adapters the scheme is similar to 82576
Alexander Duyck4be000c2011-08-26 07:45:52 +0000863 * however instead of ordering column-major we have things
864 * ordered row-major. So we traverse the table by using
865 * bit 0 as the column offset, and the remaining bits as the
866 * row index.
867 */
868 if (rx_queue > IGB_N0_QUEUE)
869 igb_write_ivar(hw, msix_vector,
870 rx_queue >> 1,
871 (rx_queue & 0x1) << 4);
872 if (tx_queue > IGB_N0_QUEUE)
873 igb_write_ivar(hw, msix_vector,
874 tx_queue >> 1,
875 ((tx_queue & 0x1) << 4) + 8);
Jacob Kellera51d8c22016-04-13 16:08:28 -0700876 q_vector->eims_value = BIT(msix_vector);
Alexander Duyck55cac242009-11-19 12:42:21 +0000877 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700878 default:
879 BUG();
880 break;
881 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000882
883 /* add q_vector eims value to global eims_enable_mask */
884 adapter->eims_enable_mask |= q_vector->eims_value;
885
886 /* configure q_vector to set itr on first interrupt */
887 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800888}
889
890/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000891 * igb_configure_msix - Configure MSI-X hardware
892 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800893 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000894 * igb_configure_msix sets up the hardware to properly
895 * generate MSI-X interrupts.
Auke Kok9d5c8242008-01-24 02:22:38 -0800896 **/
897static void igb_configure_msix(struct igb_adapter *adapter)
898{
899 u32 tmp;
900 int i, vector = 0;
901 struct e1000_hw *hw = &adapter->hw;
902
903 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800904
905 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700906 switch (hw->mac.type) {
907 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800908 tmp = rd32(E1000_CTRL_EXT);
909 /* enable MSI-X PBA support*/
910 tmp |= E1000_CTRL_EXT_PBA_CLR;
911
912 /* Auto-Mask interrupts upon ICR read. */
913 tmp |= E1000_CTRL_EXT_EIAME;
914 tmp |= E1000_CTRL_EXT_IRCA;
915
916 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000917
918 /* enable msix_other interrupt */
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000919 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700920 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800921
Alexander Duyck2d064c02008-07-08 15:10:12 -0700922 break;
923
924 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000925 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000926 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +0000927 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000928 case e1000_i210:
929 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000930 /* Turn on MSI-X capability first, or our settings
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000931 * won't stick. And it will take days to debug.
932 */
Alexander Duyck047e0032009-10-27 15:49:27 +0000933 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000934 E1000_GPIE_PBA | E1000_GPIE_EIAME |
935 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700936
Alexander Duyck047e0032009-10-27 15:49:27 +0000937 /* enable msix_other interrupt */
Jacob Kellera51d8c22016-04-13 16:08:28 -0700938 adapter->eims_other = BIT(vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 tmp = (vector++ | E1000_IVAR_VALID) << 8;
940
941 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700942 break;
943 default:
944 /* do nothing, since nothing else supports MSI-X */
945 break;
946 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000947
948 adapter->eims_enable_mask |= adapter->eims_other;
949
Alexander Duyck26b39272010-02-17 01:00:41 +0000950 for (i = 0; i < adapter->num_q_vectors; i++)
951 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000952
Auke Kok9d5c8242008-01-24 02:22:38 -0800953 wrfl();
954}
955
956/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000957 * igb_request_msix - Initialize MSI-X interrupts
958 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -0800959 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000960 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
961 * kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -0800962 **/
963static int igb_request_msix(struct igb_adapter *adapter)
964{
965 struct net_device *netdev = adapter->netdev;
Stefan Assmann52285b72012-12-04 06:00:17 +0000966 int i, err = 0, vector = 0, free_vector = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800967
Auke Kok9d5c8242008-01-24 02:22:38 -0800968 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000969 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800970 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000971 goto err_out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000972
973 for (i = 0; i < adapter->num_q_vectors; i++) {
974 struct igb_q_vector *q_vector = adapter->q_vector[i];
975
Stefan Assmann52285b72012-12-04 06:00:17 +0000976 vector++;
977
Jarod Wilson7b06a692015-10-19 11:52:04 -0400978 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000979
Alexander Duyck0ba82992011-08-26 07:45:47 +0000980 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000981 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000982 q_vector->rx.ring->queue_index);
983 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000984 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000985 q_vector->tx.ring->queue_index);
986 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000987 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000988 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000989 else
990 sprintf(q_vector->name, "%s-unused", netdev->name);
991
992 err = request_irq(adapter->msix_entries[vector].vector,
Jeff Kirsherb980ac12013-02-23 07:29:56 +0000993 igb_msix_ring, 0, q_vector->name,
994 q_vector);
Alexander Duyck047e0032009-10-27 15:49:27 +0000995 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000996 goto err_free;
Alexander Duyck047e0032009-10-27 15:49:27 +0000997 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800998
Auke Kok9d5c8242008-01-24 02:22:38 -0800999 igb_configure_msix(adapter);
1000 return 0;
Stefan Assmann52285b72012-12-04 06:00:17 +00001001
1002err_free:
1003 /* free already assigned IRQs */
1004 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
1005
1006 vector--;
1007 for (i = 0; i < vector; i++) {
1008 free_irq(adapter->msix_entries[free_vector++].vector,
1009 adapter->q_vector[i]);
1010 }
1011err_out:
Auke Kok9d5c8242008-01-24 02:22:38 -08001012 return err;
1013}
1014
Alexander Duyck047e0032009-10-27 15:49:27 +00001015/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001016 * igb_free_q_vector - Free memory allocated for specific interrupt vector
1017 * @adapter: board private structure to initialize
1018 * @v_idx: Index of vector to be freed
Alexander Duyck5536d212012-09-25 00:31:17 +00001019 *
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001020 * This function frees the memory allocated to the q_vector.
Alexander Duyck5536d212012-09-25 00:31:17 +00001021 **/
1022static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1023{
1024 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1025
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001026 adapter->q_vector[v_idx] = NULL;
1027
1028 /* igb_get_stats64() might access the rings on this vector,
1029 * we must wait a grace period before freeing it.
1030 */
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08001031 if (q_vector)
1032 kfree_rcu(q_vector, rcu);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001033}
1034
1035/**
1036 * igb_reset_q_vector - Reset config for interrupt vector
1037 * @adapter: board private structure to initialize
1038 * @v_idx: Index of vector to be reset
1039 *
1040 * If NAPI is enabled it will delete any references to the
1041 * NAPI struct. This is preparation for igb_free_q_vector.
1042 **/
1043static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1044{
1045 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1046
Christoph Paaschcb06d102014-03-21 03:48:19 -07001047 /* Coming from igb_set_interrupt_capability, the vectors are not yet
1048 * allocated. So, q_vector is NULL so we should stop here.
1049 */
1050 if (!q_vector)
1051 return;
1052
Alexander Duyck5536d212012-09-25 00:31:17 +00001053 if (q_vector->tx.ring)
1054 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1055
1056 if (q_vector->rx.ring)
Toshiaki Makita2439fc42015-04-13 18:15:11 +09001057 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
Alexander Duyck5536d212012-09-25 00:31:17 +00001058
Alexander Duyck5536d212012-09-25 00:31:17 +00001059 netif_napi_del(&q_vector->napi);
1060
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001061}
1062
1063static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1064{
1065 int v_idx = adapter->num_q_vectors;
1066
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001067 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001068 pci_disable_msix(adapter->pdev);
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001069 else if (adapter->flags & IGB_FLAG_HAS_MSI)
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001070 pci_disable_msi(adapter->pdev);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001071
1072 while (v_idx--)
1073 igb_reset_q_vector(adapter, v_idx);
Alexander Duyck5536d212012-09-25 00:31:17 +00001074}
1075
1076/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001077 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1078 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001079 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001080 * This function frees the memory allocated to the q_vectors. In addition if
1081 * NAPI is enabled it will delete any references to the NAPI struct prior
1082 * to freeing the q_vector.
Alexander Duyck047e0032009-10-27 15:49:27 +00001083 **/
1084static void igb_free_q_vectors(struct igb_adapter *adapter)
1085{
Alexander Duyck5536d212012-09-25 00:31:17 +00001086 int v_idx = adapter->num_q_vectors;
Alexander Duyck047e0032009-10-27 15:49:27 +00001087
Alexander Duyck5536d212012-09-25 00:31:17 +00001088 adapter->num_tx_queues = 0;
1089 adapter->num_rx_queues = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00001090 adapter->num_q_vectors = 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001091
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001092 while (v_idx--) {
1093 igb_reset_q_vector(adapter, v_idx);
Alexander Duyck5536d212012-09-25 00:31:17 +00001094 igb_free_q_vector(adapter, v_idx);
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001095 }
Alexander Duyck047e0032009-10-27 15:49:27 +00001096}
1097
1098/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001099 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1100 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001101 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001102 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1103 * MSI-X interrupts allocated.
Alexander Duyck047e0032009-10-27 15:49:27 +00001104 */
1105static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1106{
Alexander Duyck047e0032009-10-27 15:49:27 +00001107 igb_free_q_vectors(adapter);
1108 igb_reset_interrupt_capability(adapter);
1109}
Auke Kok9d5c8242008-01-24 02:22:38 -08001110
1111/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001112 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1113 * @adapter: board private structure to initialize
1114 * @msix: boolean value of MSIX capability
Auke Kok9d5c8242008-01-24 02:22:38 -08001115 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001116 * Attempt to configure interrupts using the best available
1117 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001118 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001119static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
Auke Kok9d5c8242008-01-24 02:22:38 -08001120{
1121 int err;
1122 int numvecs, i;
1123
Stefan Assmann53c7d062012-12-04 06:00:12 +00001124 if (!msix)
1125 goto msi_only;
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001126 adapter->flags |= IGB_FLAG_HAS_MSIX;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001127
Alexander Duyck83b71802009-02-06 23:15:45 +00001128 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001129 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001130 if (adapter->vfs_allocated_count)
1131 adapter->num_tx_queues = 1;
1132 else
1133 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001134
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001135 /* start with one vector for every Rx queue */
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 numvecs = adapter->num_rx_queues;
1137
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001138 /* if Tx handler is separate add 1 for every Tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001139 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1140 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001141
1142 /* store the number of vectors reserved for queues */
1143 adapter->num_q_vectors = numvecs;
1144
1145 /* add 1 vector for link status interrupts */
1146 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001147 for (i = 0; i < numvecs; i++)
1148 adapter->msix_entries[i].entry = i;
1149
Alexander Gordeev479d02d2014-02-18 11:11:43 +01001150 err = pci_enable_msix_range(adapter->pdev,
1151 adapter->msix_entries,
1152 numvecs,
1153 numvecs);
1154 if (err > 0)
Alexander Duyck0c2cc022012-09-25 00:31:22 +00001155 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08001156
1157 igb_reset_interrupt_capability(adapter);
1158
1159 /* If we can't do MSI-X, try MSI */
1160msi_only:
Christoph Paaschb7093232014-03-21 04:02:09 -07001161 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001162#ifdef CONFIG_PCI_IOV
1163 /* disable SR-IOV for non MSI-X configurations */
1164 if (adapter->vf_data) {
1165 struct e1000_hw *hw = &adapter->hw;
1166 /* disable iov and allow time for transactions to clear */
1167 pci_disable_sriov(adapter->pdev);
1168 msleep(500);
1169
Yury Kylulin4827cc32017-03-07 11:20:26 +03001170 kfree(adapter->vf_mac_list);
1171 adapter->vf_mac_list = NULL;
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001172 kfree(adapter->vf_data);
1173 adapter->vf_data = NULL;
1174 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001175 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001176 msleep(100);
1177 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1178 }
1179#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001180 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001181 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001182 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001183 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001184 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001185 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001186 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001187 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001188}
1189
Alexander Duyck5536d212012-09-25 00:31:17 +00001190static void igb_add_ring(struct igb_ring *ring,
1191 struct igb_ring_container *head)
1192{
1193 head->ring = ring;
1194 head->count++;
1195}
1196
1197/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001198 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1199 * @adapter: board private structure to initialize
1200 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1201 * @v_idx: index of vector in adapter struct
1202 * @txr_count: total number of Tx rings to allocate
1203 * @txr_idx: index of first Tx ring to allocate
1204 * @rxr_count: total number of Rx rings to allocate
1205 * @rxr_idx: index of first Rx ring to allocate
Alexander Duyck5536d212012-09-25 00:31:17 +00001206 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001207 * We allocate one q_vector. If allocation fails we return -ENOMEM.
Alexander Duyck5536d212012-09-25 00:31:17 +00001208 **/
1209static int igb_alloc_q_vector(struct igb_adapter *adapter,
1210 int v_count, int v_idx,
1211 int txr_count, int txr_idx,
1212 int rxr_count, int rxr_idx)
1213{
1214 struct igb_q_vector *q_vector;
1215 struct igb_ring *ring;
1216 int ring_count, size;
1217
1218 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1219 if (txr_count > 1 || rxr_count > 1)
1220 return -ENOMEM;
1221
1222 ring_count = txr_count + rxr_count;
1223 size = sizeof(struct igb_q_vector) +
1224 (sizeof(struct igb_ring) * ring_count);
1225
1226 /* allocate q_vector and rings */
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001227 q_vector = adapter->q_vector[v_idx];
Shota Suzuki72ddef02015-07-01 09:25:52 +09001228 if (!q_vector) {
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00001229 q_vector = kzalloc(size, GFP_KERNEL);
Shota Suzuki72ddef02015-07-01 09:25:52 +09001230 } else if (size > ksize(q_vector)) {
1231 kfree_rcu(q_vector, rcu);
1232 q_vector = kzalloc(size, GFP_KERNEL);
1233 } else {
Toshiaki Makitac0a06ee2015-04-13 18:15:10 +09001234 memset(q_vector, 0, size);
Shota Suzuki72ddef02015-07-01 09:25:52 +09001235 }
Alexander Duyck5536d212012-09-25 00:31:17 +00001236 if (!q_vector)
1237 return -ENOMEM;
1238
1239 /* initialize NAPI */
1240 netif_napi_add(adapter->netdev, &q_vector->napi,
1241 igb_poll, 64);
1242
1243 /* tie q_vector and adapter together */
1244 adapter->q_vector[v_idx] = q_vector;
1245 q_vector->adapter = adapter;
1246
1247 /* initialize work limits */
1248 q_vector->tx.work_limit = adapter->tx_work_limit;
1249
1250 /* initialize ITR configuration */
Jarod Wilson7b06a692015-10-19 11:52:04 -04001251 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
Alexander Duyck5536d212012-09-25 00:31:17 +00001252 q_vector->itr_val = IGB_START_ITR;
1253
1254 /* initialize pointer to rings */
1255 ring = q_vector->ring;
1256
Alexander Duyck4e2276672013-02-12 02:31:01 +00001257 /* intialize ITR */
1258 if (rxr_count) {
1259 /* rx or rx/tx vector */
1260 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1261 q_vector->itr_val = adapter->rx_itr_setting;
1262 } else {
1263 /* tx only vector */
1264 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1265 q_vector->itr_val = adapter->tx_itr_setting;
1266 }
1267
Alexander Duyck5536d212012-09-25 00:31:17 +00001268 if (txr_count) {
1269 /* assign generic ring traits */
1270 ring->dev = &adapter->pdev->dev;
1271 ring->netdev = adapter->netdev;
1272
1273 /* configure backlink on ring */
1274 ring->q_vector = q_vector;
1275
1276 /* update q_vector Tx values */
1277 igb_add_ring(ring, &q_vector->tx);
1278
1279 /* For 82575, context index must be unique per ring. */
1280 if (adapter->hw.mac.type == e1000_82575)
1281 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1282
1283 /* apply Tx specific ring traits */
1284 ring->count = adapter->tx_ring_count;
1285 ring->queue_index = txr_idx;
1286
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001287 ring->cbs_enable = false;
1288 ring->idleslope = 0;
1289 ring->sendslope = 0;
1290 ring->hicredit = 0;
1291 ring->locredit = 0;
1292
John Stultz827da442013-10-07 15:51:58 -07001293 u64_stats_init(&ring->tx_syncp);
1294 u64_stats_init(&ring->tx_syncp2);
1295
Alexander Duyck5536d212012-09-25 00:31:17 +00001296 /* assign ring to adapter */
1297 adapter->tx_ring[txr_idx] = ring;
1298
1299 /* push pointer to next ring */
1300 ring++;
1301 }
1302
1303 if (rxr_count) {
1304 /* assign generic ring traits */
1305 ring->dev = &adapter->pdev->dev;
1306 ring->netdev = adapter->netdev;
1307
1308 /* configure backlink on ring */
1309 ring->q_vector = q_vector;
1310
1311 /* update q_vector Rx values */
1312 igb_add_ring(ring, &q_vector->rx);
1313
1314 /* set flag indicating ring supports SCTP checksum offload */
1315 if (adapter->hw.mac.type >= e1000_82576)
1316 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1317
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00001318 /* On i350, i354, i210, and i211, loopback VLAN packets
Alexander Duyck5536d212012-09-25 00:31:17 +00001319 * have the tag byte-swapped.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001320 */
Alexander Duyck5536d212012-09-25 00:31:17 +00001321 if (adapter->hw.mac.type >= e1000_i350)
1322 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1323
1324 /* apply Rx specific ring traits */
1325 ring->count = adapter->rx_ring_count;
1326 ring->queue_index = rxr_idx;
1327
John Stultz827da442013-10-07 15:51:58 -07001328 u64_stats_init(&ring->rx_syncp);
1329
Alexander Duyck5536d212012-09-25 00:31:17 +00001330 /* assign ring to adapter */
1331 adapter->rx_ring[rxr_idx] = ring;
1332 }
1333
1334 return 0;
1335}
1336
1337
Auke Kok9d5c8242008-01-24 02:22:38 -08001338/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001339 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1340 * @adapter: board private structure to initialize
Alexander Duyck047e0032009-10-27 15:49:27 +00001341 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001342 * We allocate one q_vector per queue interrupt. If allocation fails we
1343 * return -ENOMEM.
Alexander Duyck047e0032009-10-27 15:49:27 +00001344 **/
1345static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1346{
Alexander Duyck5536d212012-09-25 00:31:17 +00001347 int q_vectors = adapter->num_q_vectors;
1348 int rxr_remaining = adapter->num_rx_queues;
1349 int txr_remaining = adapter->num_tx_queues;
1350 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1351 int err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001352
Alexander Duyck5536d212012-09-25 00:31:17 +00001353 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1354 for (; rxr_remaining; v_idx++) {
1355 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1356 0, 0, 1, rxr_idx);
1357
1358 if (err)
1359 goto err_out;
1360
1361 /* update counts and index */
1362 rxr_remaining--;
1363 rxr_idx++;
1364 }
1365 }
1366
1367 for (; v_idx < q_vectors; v_idx++) {
1368 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1369 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001370
Alexander Duyck5536d212012-09-25 00:31:17 +00001371 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1372 tqpv, txr_idx, rqpv, rxr_idx);
1373
1374 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001375 goto err_out;
Alexander Duyck5536d212012-09-25 00:31:17 +00001376
1377 /* update counts and index */
1378 rxr_remaining -= rqpv;
1379 txr_remaining -= tqpv;
1380 rxr_idx++;
1381 txr_idx++;
Alexander Duyck047e0032009-10-27 15:49:27 +00001382 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001383
Alexander Duyck047e0032009-10-27 15:49:27 +00001384 return 0;
1385
1386err_out:
Alexander Duyck5536d212012-09-25 00:31:17 +00001387 adapter->num_tx_queues = 0;
1388 adapter->num_rx_queues = 0;
1389 adapter->num_q_vectors = 0;
1390
1391 while (v_idx--)
1392 igb_free_q_vector(adapter, v_idx);
1393
Alexander Duyck047e0032009-10-27 15:49:27 +00001394 return -ENOMEM;
1395}
1396
Alexander Duyck047e0032009-10-27 15:49:27 +00001397/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001398 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1399 * @adapter: board private structure to initialize
1400 * @msix: boolean value of MSIX capability
Alexander Duyck047e0032009-10-27 15:49:27 +00001401 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001402 * This function initializes the interrupts and allocates all of the queues.
Alexander Duyck047e0032009-10-27 15:49:27 +00001403 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001404static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
Alexander Duyck047e0032009-10-27 15:49:27 +00001405{
1406 struct pci_dev *pdev = adapter->pdev;
1407 int err;
1408
Stefan Assmann53c7d062012-12-04 06:00:12 +00001409 igb_set_interrupt_capability(adapter, msix);
Alexander Duyck047e0032009-10-27 15:49:27 +00001410
1411 err = igb_alloc_q_vectors(adapter);
1412 if (err) {
1413 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1414 goto err_alloc_q_vectors;
1415 }
1416
Alexander Duyck5536d212012-09-25 00:31:17 +00001417 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001418
1419 return 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001420
Alexander Duyck047e0032009-10-27 15:49:27 +00001421err_alloc_q_vectors:
1422 igb_reset_interrupt_capability(adapter);
1423 return err;
1424}
1425
1426/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001427 * igb_request_irq - initialize interrupts
1428 * @adapter: board private structure to initialize
Auke Kok9d5c8242008-01-24 02:22:38 -08001429 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001430 * Attempts to configure interrupts using the best available
1431 * capabilities of the hardware and kernel.
Auke Kok9d5c8242008-01-24 02:22:38 -08001432 **/
1433static int igb_request_irq(struct igb_adapter *adapter)
1434{
1435 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001436 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001437 int err = 0;
1438
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001439 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001440 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001441 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001442 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001443 /* fall back to MSI */
Alexander Duyck5536d212012-09-25 00:31:17 +00001444 igb_free_all_tx_resources(adapter);
1445 igb_free_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001446
Alexander Duyck047e0032009-10-27 15:49:27 +00001447 igb_clear_interrupt_scheme(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001448 err = igb_init_interrupt_scheme(adapter, false);
1449 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001450 goto request_done;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001451
Alexander Duyck047e0032009-10-27 15:49:27 +00001452 igb_setup_all_tx_resources(adapter);
1453 igb_setup_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001454 igb_configure(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001455 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001456
Alexander Duyckc74d5882011-08-26 07:46:45 +00001457 igb_assign_vector(adapter->q_vector[0], 0);
1458
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001459 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001460 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001461 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001462 if (!err)
1463 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001464
Auke Kok9d5c8242008-01-24 02:22:38 -08001465 /* fall back to legacy interrupts */
1466 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001467 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001468 }
1469
Alexander Duyckc74d5882011-08-26 07:46:45 +00001470 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001471 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001473 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001474 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001476
1477request_done:
1478 return err;
1479}
1480
1481static void igb_free_irq(struct igb_adapter *adapter)
1482{
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001483 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001484 int vector = 0, i;
1485
Alexander Duyck047e0032009-10-27 15:49:27 +00001486 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001487
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001488 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001489 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001490 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001491 } else {
1492 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001493 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001494}
1495
1496/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001497 * igb_irq_disable - Mask off interrupt generation on the NIC
1498 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001499 **/
1500static void igb_irq_disable(struct igb_adapter *adapter)
1501{
1502 struct e1000_hw *hw = &adapter->hw;
1503
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001504 /* we need to be careful when disabling interrupts. The VFs are also
Alexander Duyck25568a52009-10-27 23:49:59 +00001505 * mapped into these registers and so clearing the bits can cause
1506 * issues on the VF drivers so we only need to clear what we set
1507 */
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001508 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001509 u32 regval = rd32(E1000_EIAM);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001510
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001511 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1512 wr32(E1000_EIMC, adapter->eims_enable_mask);
1513 regval = rd32(E1000_EIAC);
1514 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001515 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001516
1517 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001518 wr32(E1000_IMC, ~0);
1519 wrfl();
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001520 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Emil Tantilov81a61852010-08-02 14:40:52 +00001521 int i;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001522
Emil Tantilov81a61852010-08-02 14:40:52 +00001523 for (i = 0; i < adapter->num_q_vectors; i++)
1524 synchronize_irq(adapter->msix_entries[i].vector);
1525 } else {
1526 synchronize_irq(adapter->pdev->irq);
1527 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001528}
1529
1530/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001531 * igb_irq_enable - Enable default interrupt generation settings
1532 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 **/
1534static void igb_irq_enable(struct igb_adapter *adapter)
1535{
1536 struct e1000_hw *hw = &adapter->hw;
1537
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00001538 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001539 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001540 u32 regval = rd32(E1000_EIAC);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00001541
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001542 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1543 regval = rd32(E1000_EIAM);
1544 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001545 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001546 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001547 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001548 ims |= E1000_IMS_VMMB;
1549 }
1550 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001551 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001552 wr32(E1000_IMS, IMS_ENABLE_MASK |
1553 E1000_IMS_DRSTA);
1554 wr32(E1000_IAM, IMS_ENABLE_MASK |
1555 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001556 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001557}
1558
1559static void igb_update_mng_vlan(struct igb_adapter *adapter)
1560{
Alexander Duyck51466232009-10-27 23:47:35 +00001561 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001562 u16 pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08001563 u16 vid = adapter->hw.mng_cookie.vlan_id;
1564 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001565
Alexander Duyck51466232009-10-27 23:47:35 +00001566 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1567 /* add VID to filter table */
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001568 igb_vfta_set(hw, vid, pf_id, true, true);
Alexander Duyck51466232009-10-27 23:47:35 +00001569 adapter->mng_vlan_id = vid;
1570 } else {
1571 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1572 }
1573
1574 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1575 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001576 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001577 /* remove VID from filter table */
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08001578 igb_vfta_set(hw, vid, pf_id, false, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08001579 }
1580}
1581
1582/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001583 * igb_release_hw_control - release control of the h/w to f/w
1584 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001585 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001586 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1587 * For ASF and Pass Through versions of f/w this means that the
1588 * driver is no longer loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001589 **/
1590static void igb_release_hw_control(struct igb_adapter *adapter)
1591{
1592 struct e1000_hw *hw = &adapter->hw;
1593 u32 ctrl_ext;
1594
1595 /* Let firmware take over control of h/w */
1596 ctrl_ext = rd32(E1000_CTRL_EXT);
1597 wr32(E1000_CTRL_EXT,
1598 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1599}
1600
Auke Kok9d5c8242008-01-24 02:22:38 -08001601/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001602 * igb_get_hw_control - get control of the h/w from f/w
1603 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001604 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001605 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1606 * For ASF and Pass Through versions of f/w this means that
1607 * the driver is loaded.
Auke Kok9d5c8242008-01-24 02:22:38 -08001608 **/
1609static void igb_get_hw_control(struct igb_adapter *adapter)
1610{
1611 struct e1000_hw *hw = &adapter->hw;
1612 u32 ctrl_ext;
1613
1614 /* Let firmware know the driver has taken over */
1615 ctrl_ext = rd32(E1000_CTRL_EXT);
1616 wr32(E1000_CTRL_EXT,
1617 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1618}
1619
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001620static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1621{
1622 struct net_device *netdev = adapter->netdev;
1623 struct e1000_hw *hw = &adapter->hw;
1624
1625 WARN_ON(hw->mac.type != e1000_i210);
1626
1627 if (enable)
1628 adapter->flags |= IGB_FLAG_FQTSS;
1629 else
1630 adapter->flags &= ~IGB_FLAG_FQTSS;
1631
1632 if (netif_running(netdev))
1633 schedule_work(&adapter->reset_task);
1634}
1635
1636static bool is_fqtss_enabled(struct igb_adapter *adapter)
1637{
1638 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1639}
1640
1641static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1642 enum tx_queue_prio prio)
1643{
1644 u32 val;
1645
1646 WARN_ON(hw->mac.type != e1000_i210);
1647 WARN_ON(queue < 0 || queue > 4);
1648
1649 val = rd32(E1000_I210_TXDCTL(queue));
1650
1651 if (prio == TX_QUEUE_PRIO_HIGH)
1652 val |= E1000_TXDCTL_PRIORITY;
1653 else
1654 val &= ~E1000_TXDCTL_PRIORITY;
1655
1656 wr32(E1000_I210_TXDCTL(queue), val);
1657}
1658
1659static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1660{
1661 u32 val;
1662
1663 WARN_ON(hw->mac.type != e1000_i210);
1664 WARN_ON(queue < 0 || queue > 1);
1665
1666 val = rd32(E1000_I210_TQAVCC(queue));
1667
1668 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1669 val |= E1000_TQAVCC_QUEUEMODE;
1670 else
1671 val &= ~E1000_TQAVCC_QUEUEMODE;
1672
1673 wr32(E1000_I210_TQAVCC(queue), val);
1674}
1675
1676/**
1677 * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
1678 * @adapter: pointer to adapter struct
1679 * @queue: queue number
1680 * @enable: true = enable CBS, false = disable CBS
1681 * @idleslope: idleSlope in kbps
1682 * @sendslope: sendSlope in kbps
1683 * @hicredit: hiCredit in bytes
1684 * @locredit: loCredit in bytes
1685 *
1686 * Configure CBS for a given hardware queue. When disabling, idleslope,
1687 * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
1688 * success. Negative otherwise.
1689 **/
1690static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1691 bool enable, int idleslope, int sendslope,
1692 int hicredit, int locredit)
1693{
1694 struct net_device *netdev = adapter->netdev;
1695 struct e1000_hw *hw = &adapter->hw;
1696 u32 tqavcc;
1697 u16 value;
1698
1699 WARN_ON(hw->mac.type != e1000_i210);
1700 WARN_ON(queue < 0 || queue > 1);
1701
1702 if (enable) {
1703 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1704 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1705
1706 /* According to i210 datasheet section 7.2.7.7, we should set
1707 * the 'idleSlope' field from TQAVCC register following the
1708 * equation:
1709 *
1710 * For 100 Mbps link speed:
1711 *
1712 * value = BW * 0x7735 * 0.2 (E1)
1713 *
1714 * For 1000Mbps link speed:
1715 *
1716 * value = BW * 0x7735 * 2 (E2)
1717 *
1718 * E1 and E2 can be merged into one equation as shown below.
1719 * Note that 'link-speed' is in Mbps.
1720 *
1721 * value = BW * 0x7735 * 2 * link-speed
1722 * -------------- (E3)
1723 * 1000
1724 *
1725 * 'BW' is the percentage bandwidth out of full link speed
1726 * which can be found with the following equation. Note that
1727 * idleSlope here is the parameter from this function which
1728 * is in kbps.
1729 *
1730 * BW = idleSlope
1731 * ----------------- (E4)
1732 * link-speed * 1000
1733 *
1734 * That said, we can come up with a generic equation to
1735 * calculate the value we should set it TQAVCC register by
1736 * replacing 'BW' in E3 by E4. The resulting equation is:
1737 *
1738 * value = idleSlope * 0x7735 * 2 * link-speed
1739 * ----------------- -------------- (E5)
1740 * link-speed * 1000 1000
1741 *
1742 * 'link-speed' is present in both sides of the fraction so
1743 * it is canceled out. The final equation is the following:
1744 *
1745 * value = idleSlope * 61034
1746 * ----------------- (E6)
1747 * 1000000
Jesus Sanchez-Palencia0da60902017-11-10 14:21:50 -08001748 *
1749 * NOTE: For i210, given the above, we can see that idleslope
1750 * is represented in 16.38431 kbps units by the value at
1751 * the TQAVCC register (1Gbps / 61034), which reduces
1752 * the granularity for idleslope increments.
1753 * For instance, if you want to configure a 2576kbps
1754 * idleslope, the value to be written on the register
1755 * would have to be 157.23. If rounded down, you end
1756 * up with less bandwidth available than originally
1757 * required (~2572 kbps). If rounded up, you end up
1758 * with a higher bandwidth (~2589 kbps). Below the
1759 * approach we take is to always round up the
1760 * calculated value, so the resulting bandwidth might
1761 * be slightly higher for some configurations.
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001762 */
1763 value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
1764
1765 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1766 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1767 tqavcc |= value;
1768 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1769
1770 wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
1771 } else {
1772 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1773 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1774
1775 /* Set idleSlope to zero. */
1776 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1777 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1778 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1779
1780 /* Set hiCredit to zero. */
1781 wr32(E1000_I210_TQAVHC(queue), 0);
1782 }
1783
1784 /* XXX: In i210 controller the sendSlope and loCredit parameters from
1785 * CBS are not configurable by software so we don't do any 'controller
1786 * configuration' in respect to these parameters.
1787 */
1788
1789 netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1790 (enable) ? "enabled" : "disabled", queue,
1791 idleslope, sendslope, hicredit, locredit);
1792}
1793
1794static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1795 bool enable, int idleslope, int sendslope,
1796 int hicredit, int locredit)
1797{
1798 struct igb_ring *ring;
1799
1800 if (queue < 0 || queue > adapter->num_tx_queues)
1801 return -EINVAL;
1802
1803 ring = adapter->tx_ring[queue];
1804
1805 ring->cbs_enable = enable;
1806 ring->idleslope = idleslope;
1807 ring->sendslope = sendslope;
1808 ring->hicredit = hicredit;
1809 ring->locredit = locredit;
1810
1811 return 0;
1812}
1813
1814static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1815{
1816 struct igb_ring *ring;
1817 int i;
1818
1819 for (i = 0; i < adapter->num_tx_queues; i++) {
1820 ring = adapter->tx_ring[i];
1821
1822 if (ring->cbs_enable)
1823 return true;
1824 }
1825
1826 return false;
1827}
1828
1829static void igb_setup_tx_mode(struct igb_adapter *adapter)
1830{
1831 struct net_device *netdev = adapter->netdev;
1832 struct e1000_hw *hw = &adapter->hw;
1833 u32 val;
1834
1835 /* Only i210 controller supports changing the transmission mode. */
1836 if (hw->mac.type != e1000_i210)
1837 return;
1838
1839 if (is_fqtss_enabled(adapter)) {
1840 int i, max_queue;
1841
1842 /* Configure TQAVCTRL register: set transmit mode to 'Qav',
1843 * set data fetch arbitration to 'round robin' and set data
1844 * transfer arbitration to 'credit shaper algorithm.
1845 */
1846 val = rd32(E1000_I210_TQAVCTRL);
1847 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
1848 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1849 wr32(E1000_I210_TQAVCTRL, val);
1850
1851 /* Configure Tx and Rx packet buffers sizes as described in
1852 * i210 datasheet section 7.2.7.7.
1853 */
1854 val = rd32(E1000_TXPBS);
1855 val &= ~I210_TXPBSIZE_MASK;
1856 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1857 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1858 wr32(E1000_TXPBS, val);
1859
1860 val = rd32(E1000_RXPBS);
1861 val &= ~I210_RXPBSIZE_MASK;
1862 val |= I210_RXPBSIZE_PB_32KB;
1863 wr32(E1000_RXPBS, val);
1864
1865 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1866 * register should not exceed the buffer size programmed in
1867 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1868 * so according to the datasheet we should set MAX_TPKT_SIZE to
1869 * 4kB / 64.
1870 *
1871 * However, when we do so, no frame from queue 2 and 3 are
1872 * transmitted. It seems the MAX_TPKT_SIZE should not be great
1873 * or _equal_ to the buffer size programmed in TXPBS. For this
1874 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1875 */
1876 val = (4096 - 1) / 64;
1877 wr32(E1000_I210_DTXMXPKTSZ, val);
1878
1879 /* Since FQTSS mode is enabled, apply any CBS configuration
1880 * previously set. If no previous CBS configuration has been
1881 * done, then the initial configuration is applied, which means
1882 * CBS is disabled.
1883 */
1884 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1885 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1886
1887 for (i = 0; i < max_queue; i++) {
1888 struct igb_ring *ring = adapter->tx_ring[i];
1889
1890 igb_configure_cbs(adapter, i, ring->cbs_enable,
1891 ring->idleslope, ring->sendslope,
1892 ring->hicredit, ring->locredit);
1893 }
1894 } else {
1895 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1896 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1897 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1898
1899 val = rd32(E1000_I210_TQAVCTRL);
1900 /* According to Section 8.12.21, the other flags we've set when
1901 * enabling FQTSS are not relevant when disabling FQTSS so we
1902 * don't set they here.
1903 */
1904 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1905 wr32(E1000_I210_TQAVCTRL, val);
1906 }
1907
1908 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1909 "enabled" : "disabled");
1910}
1911
Auke Kok9d5c8242008-01-24 02:22:38 -08001912/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001913 * igb_configure - configure the hardware for RX and TX
1914 * @adapter: private board structure
Auke Kok9d5c8242008-01-24 02:22:38 -08001915 **/
1916static void igb_configure(struct igb_adapter *adapter)
1917{
1918 struct net_device *netdev = adapter->netdev;
1919 int i;
1920
1921 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001922 igb_set_rx_mode(netdev);
Andre Guedes05f9d3e2017-10-16 18:01:28 -07001923 igb_setup_tx_mode(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001924
1925 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001926
Alexander Duyck85b430b2009-10-27 15:50:29 +00001927 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001928 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001929 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001930
Gangfeng Huang0e71def2016-07-06 13:22:54 +08001931 igb_nfc_filter_restore(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001932 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001933 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001934
1935 igb_rx_fifo_flush_82575(&adapter->hw);
1936
Alexander Duyckc493ea42009-03-20 00:16:50 +00001937 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001938 * at least 1 descriptor unused to make sure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001939 * next_to_use != next_to_clean
1940 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001941 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001942 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001943 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001944 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001945}
1946
Nick Nunley88a268c2010-02-17 01:01:59 +00001947/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001948 * igb_power_up_link - Power up the phy/serdes link
1949 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001950 **/
1951void igb_power_up_link(struct igb_adapter *adapter)
1952{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001953 igb_reset_phy(&adapter->hw);
1954
Nick Nunley88a268c2010-02-17 01:01:59 +00001955 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1956 igb_power_up_phy_copper(&adapter->hw);
1957 else
1958 igb_power_up_serdes_link_82575(&adapter->hw);
Todd Fujinakaaec653c2014-06-17 06:58:11 +00001959
1960 igb_setup_link(&adapter->hw);
Nick Nunley88a268c2010-02-17 01:01:59 +00001961}
1962
1963/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00001964 * igb_power_down_link - Power down the phy/serdes link
1965 * @adapter: address of board private structure
Nick Nunley88a268c2010-02-17 01:01:59 +00001966 */
1967static void igb_power_down_link(struct igb_adapter *adapter)
1968{
1969 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1970 igb_power_down_phy_copper_82575(&adapter->hw);
1971 else
1972 igb_shutdown_serdes_link_82575(&adapter->hw);
1973}
Auke Kok9d5c8242008-01-24 02:22:38 -08001974
1975/**
Carolyn Wyborny56cec242013-10-17 05:36:26 +00001976 * Detect and switch function for Media Auto Sense
1977 * @adapter: address of the board private structure
1978 **/
1979static void igb_check_swap_media(struct igb_adapter *adapter)
1980{
1981 struct e1000_hw *hw = &adapter->hw;
1982 u32 ctrl_ext, connsw;
1983 bool swap_now = false;
1984
1985 ctrl_ext = rd32(E1000_CTRL_EXT);
1986 connsw = rd32(E1000_CONNSW);
1987
1988 /* need to live swap if current media is copper and we have fiber/serdes
1989 * to go to.
1990 */
1991
1992 if ((hw->phy.media_type == e1000_media_type_copper) &&
1993 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1994 swap_now = true;
1995 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
1996 /* copper signal takes time to appear */
1997 if (adapter->copper_tries < 4) {
1998 adapter->copper_tries++;
1999 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2000 wr32(E1000_CONNSW, connsw);
2001 return;
2002 } else {
2003 adapter->copper_tries = 0;
2004 if ((connsw & E1000_CONNSW_PHYSD) &&
2005 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2006 swap_now = true;
2007 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2008 wr32(E1000_CONNSW, connsw);
2009 }
2010 }
2011 }
2012
2013 if (!swap_now)
2014 return;
2015
2016 switch (hw->phy.media_type) {
2017 case e1000_media_type_copper:
2018 netdev_info(adapter->netdev,
2019 "MAS: changing media to fiber/serdes\n");
2020 ctrl_ext |=
2021 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2022 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2023 adapter->copper_tries = 0;
2024 break;
2025 case e1000_media_type_internal_serdes:
2026 case e1000_media_type_fiber:
2027 netdev_info(adapter->netdev,
2028 "MAS: changing media to copper\n");
2029 ctrl_ext &=
2030 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2031 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2032 break;
2033 default:
2034 /* shouldn't get here during regular operation */
2035 netdev_err(adapter->netdev,
2036 "AMS: Invalid media type found, returning\n");
2037 break;
2038 }
2039 wr32(E1000_CTRL_EXT, ctrl_ext);
2040}
2041
2042/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002043 * igb_up - Open the interface and prepare it to handle traffic
2044 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002045 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08002046int igb_up(struct igb_adapter *adapter)
2047{
2048 struct e1000_hw *hw = &adapter->hw;
2049 int i;
2050
2051 /* hardware has been reset, we need to reload some things */
2052 igb_configure(adapter);
2053
2054 clear_bit(__IGB_DOWN, &adapter->state);
2055
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002056 for (i = 0; i < adapter->num_q_vectors; i++)
2057 napi_enable(&(adapter->q_vector[i]->napi));
2058
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00002059 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Auke Kok9d5c8242008-01-24 02:22:38 -08002060 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00002061 else
2062 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002063
2064 /* Clear any pending interrupts. */
2065 rd32(E1000_ICR);
2066 igb_irq_enable(adapter);
2067
Alexander Duyckd4960302009-10-27 15:53:45 +00002068 /* notify VFs that reset has been completed */
2069 if (adapter->vfs_allocated_count) {
2070 u32 reg_data = rd32(E1000_CTRL_EXT);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00002071
Alexander Duyckd4960302009-10-27 15:53:45 +00002072 reg_data |= E1000_CTRL_EXT_PFRSTD;
2073 wr32(E1000_CTRL_EXT, reg_data);
2074 }
2075
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00002076 netif_tx_start_all_queues(adapter->netdev);
2077
Alexander Duyck25568a52009-10-27 23:49:59 +00002078 /* start the watchdog. */
2079 hw->mac.get_link_status = 1;
2080 schedule_work(&adapter->watchdog_task);
2081
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002082 if ((adapter->flags & IGB_FLAG_EEE) &&
2083 (!hw->dev_spec._82575.eee_disable))
2084 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2085
Auke Kok9d5c8242008-01-24 02:22:38 -08002086 return 0;
2087}
2088
2089void igb_down(struct igb_adapter *adapter)
2090{
Auke Kok9d5c8242008-01-24 02:22:38 -08002091 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00002092 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002093 u32 tctl, rctl;
2094 int i;
2095
2096 /* signal that we're down so the interrupt handler does not
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002097 * reschedule our watchdog timer
2098 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002099 set_bit(__IGB_DOWN, &adapter->state);
2100
2101 /* disable receives in the hardware */
2102 rctl = rd32(E1000_RCTL);
2103 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2104 /* flush and sleep below */
2105
Gangfeng Huang94221ae752017-05-27 09:17:53 +08002106 igb_nfc_filter_exit(adapter);
2107
Todd Fujinakaf28ea082015-03-20 17:41:53 -07002108 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002109 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002110
2111 /* disable transmits in the hardware */
2112 tctl = rd32(E1000_TCTL);
2113 tctl &= ~E1000_TCTL_EN;
2114 wr32(E1000_TCTL, tctl);
2115 /* flush both disables and wait for them to finish */
2116 wrfl();
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00002117 usleep_range(10000, 11000);
Auke Kok9d5c8242008-01-24 02:22:38 -08002118
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 igb_irq_disable(adapter);
2120
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00002121 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2122
Carolyn Wyborny41f149a2013-04-30 00:21:32 +00002123 for (i = 0; i < adapter->num_q_vectors; i++) {
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08002124 if (adapter->q_vector[i]) {
2125 napi_synchronize(&adapter->q_vector[i]->napi);
2126 napi_disable(&adapter->q_vector[i]->napi);
2127 }
Carolyn Wyborny41f149a2013-04-30 00:21:32 +00002128 }
2129
Auke Kok9d5c8242008-01-24 02:22:38 -08002130 del_timer_sync(&adapter->watchdog_timer);
2131 del_timer_sync(&adapter->phy_info_timer);
2132
Alexander Duyck04fe6352009-02-06 23:22:32 +00002133 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00002134 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07002135 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00002136 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00002137
Auke Kok9d5c8242008-01-24 02:22:38 -08002138 adapter->link_speed = 0;
2139 adapter->link_duplex = 0;
2140
Jeff Kirsher30236822008-06-24 17:01:15 -07002141 if (!pci_channel_offline(adapter->pdev))
2142 igb_reset(adapter);
Alexander Duyck16903ca2016-01-06 23:11:18 -08002143
2144 /* clear VLAN promisc flag so VFTA will be updated if necessary */
2145 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2146
Auke Kok9d5c8242008-01-24 02:22:38 -08002147 igb_clean_all_tx_rings(adapter);
2148 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00002149#ifdef CONFIG_IGB_DCA
2150
2151 /* since we reset the hardware DCA settings were cleared */
2152 igb_setup_dca(adapter);
2153#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002154}
2155
2156void igb_reinit_locked(struct igb_adapter *adapter)
2157{
2158 WARN_ON(in_interrupt());
2159 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00002160 usleep_range(1000, 2000);
Auke Kok9d5c8242008-01-24 02:22:38 -08002161 igb_down(adapter);
2162 igb_up(adapter);
2163 clear_bit(__IGB_RESETTING, &adapter->state);
2164}
2165
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002166/** igb_enable_mas - Media Autosense re-enable after swap
2167 *
2168 * @adapter: adapter struct
2169 **/
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002170static void igb_enable_mas(struct igb_adapter *adapter)
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002171{
2172 struct e1000_hw *hw = &adapter->hw;
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002173 u32 connsw = rd32(E1000_CONNSW);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002174
2175 /* configure for SerDes media detect */
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002176 if ((hw->phy.media_type == e1000_media_type_copper) &&
2177 (!(connsw & E1000_CONNSW_SERDESD))) {
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002178 connsw |= E1000_CONNSW_ENRGSRC;
2179 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2180 wr32(E1000_CONNSW, connsw);
2181 wrfl();
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002182 }
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002183}
2184
Auke Kok9d5c8242008-01-24 02:22:38 -08002185void igb_reset(struct igb_adapter *adapter)
2186{
Alexander Duyck090b1792009-10-27 23:51:55 +00002187 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002188 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07002189 struct e1000_mac_info *mac = &hw->mac;
2190 struct e1000_fc_info *fc = &hw->fc;
Alexander Duyck45693bc2016-01-06 23:10:39 -08002191 u32 pba, hwm;
Auke Kok9d5c8242008-01-24 02:22:38 -08002192
2193 /* Repartition Pba for greater than 9k mtu
2194 * To take effect CTRL.RST is required.
2195 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002196 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002197 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00002198 case e1000_i354:
Alexander Duyck55cac242009-11-19 12:42:21 +00002199 case e1000_82580:
2200 pba = rd32(E1000_RXPBS);
2201 pba = igb_rxpbs_adjust_82580(pba);
2202 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002203 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00002204 pba = rd32(E1000_RXPBS);
2205 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002206 break;
2207 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002208 case e1000_i210:
2209 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00002210 default:
2211 pba = E1000_PBA_34K;
2212 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07002213 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002214
Alexander Duyck45693bc2016-01-06 23:10:39 -08002215 if (mac->type == e1000_82575) {
2216 u32 min_rx_space, min_tx_space, needed_tx_space;
2217
2218 /* write Rx PBA so that hardware can report correct Tx PBA */
Auke Kok9d5c8242008-01-24 02:22:38 -08002219 wr32(E1000_PBA, pba);
2220
2221 /* To maintain wire speed transmits, the Tx FIFO should be
2222 * large enough to accommodate two full transmit packets,
2223 * rounded up to the next 1KB and expressed in KB. Likewise,
2224 * the Rx FIFO should be large enough to accommodate at least
2225 * one full receive packet and is similarly rounded up and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002226 * expressed in KB.
2227 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002228 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2229
2230 /* The Tx FIFO also stores 16 bytes of information about the Tx
2231 * but don't include Ethernet FCS because hardware appends it.
2232 * We only need to round down to the nearest 512 byte block
2233 * count since the value we care about is 2 frames, not 1.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002234 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002235 min_tx_space = adapter->max_frame_size;
2236 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2237 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2238
2239 /* upper 16 bits has Tx packet buffer allocation size in KB */
2240 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
Auke Kok9d5c8242008-01-24 02:22:38 -08002241
2242 /* If current Tx allocation is less than the min Tx FIFO size,
2243 * and the min Tx FIFO size is less than the current Rx FIFO
Alexander Duyck45693bc2016-01-06 23:10:39 -08002244 * allocation, take space away from current Rx allocation.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002245 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002246 if (needed_tx_space < pba) {
2247 pba -= needed_tx_space;
Auke Kok9d5c8242008-01-24 02:22:38 -08002248
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002249 /* if short on Rx space, Rx wins and must trump Tx
2250 * adjustment
2251 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002252 if (pba < min_rx_space)
2253 pba = min_rx_space;
2254 }
Alexander Duyck45693bc2016-01-06 23:10:39 -08002255
2256 /* adjust PBA for jumbo frames */
Alexander Duyck2d064c02008-07-08 15:10:12 -07002257 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08002258 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002259
Alexander Duyck45693bc2016-01-06 23:10:39 -08002260 /* flow control settings
2261 * The high water mark must be low enough to fit one full frame
2262 * after transmitting the pause frame. As such we must have enough
2263 * space to allow for us to complete our current transmit and then
2264 * receive the frame that is in progress from the link partner.
2265 * Set it to:
2266 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002267 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08002268 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002269
Matthew Vickd48507f2012-11-08 04:03:58 +00002270 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
Alexander Duyckd405ea32009-12-23 13:21:27 +00002271 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08002272 fc->pause_time = 0xFFFF;
2273 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00002274 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08002275
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002276 /* disable receive for all VFs and wait one second */
2277 if (adapter->vfs_allocated_count) {
2278 int i;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00002279
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002280 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00002281 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002282
2283 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00002284 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08002285
2286 /* disable transmits and receives */
2287 wr32(E1000_VFRE, 0);
2288 wr32(E1000_VFTE, 0);
2289 }
2290
Auke Kok9d5c8242008-01-24 02:22:38 -08002291 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00002292 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002293 wr32(E1000_WUC, 0);
2294
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002295 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2296 /* need to resetup here after media swap */
2297 adapter->ei.get_invariants(hw);
2298 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2299 }
Todd Fujinaka8cfb8792015-05-02 00:39:03 -07002300 if ((mac->type == e1000_82575) &&
2301 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2302 igb_enable_mas(adapter);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002303 }
Alexander Duyck330a6d62009-10-27 23:51:35 +00002304 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00002305 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002306
Yury Kylulin83c21332017-03-07 11:20:25 +03002307 /* RAR registers were cleared during init_hw, clear mac table */
2308 igb_flush_mac_table(adapter);
2309 __dev_uc_unsync(adapter->netdev, NULL);
2310
2311 /* Recover default RAR entry */
2312 igb_set_default_mac_filter(adapter);
2313
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002314 /* Flow control settings reset on hardware reset, so guarantee flow
Matthew Vicka27416b2012-04-18 02:57:44 +00002315 * control is off when forcing speed.
2316 */
2317 if (!hw->mac.autoneg)
2318 igb_force_mac_fc(hw);
2319
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00002320 igb_init_dmac(adapter, pba);
Carolyn Wybornye4288932012-12-07 03:01:42 +00002321#ifdef CONFIG_IGB_HWMON
2322 /* Re-initialize the thermal sensor on i350 devices. */
2323 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2324 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2325 /* If present, re-initialize the external thermal sensor
2326 * interface.
2327 */
2328 if (adapter->ets)
2329 mac->ops.init_thermal_sensor_thresh(hw);
2330 }
2331 }
2332#endif
Jeff Kirsherb9361362014-03-13 16:07:14 -07002333 /* Re-establish EEE setting */
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002334 if (hw->phy.media_type == e1000_media_type_copper) {
2335 switch (mac->type) {
2336 case e1000_i350:
2337 case e1000_i210:
2338 case e1000_i211:
Todd Fujinakac4c112f2014-08-29 06:43:13 +00002339 igb_set_eee_i350(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002340 break;
2341 case e1000_i354:
Todd Fujinakac4c112f2014-08-29 06:43:13 +00002342 igb_set_eee_i354(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00002343 break;
2344 default:
2345 break;
2346 }
2347 }
Nick Nunley88a268c2010-02-17 01:01:59 +00002348 if (!netif_running(adapter->netdev))
2349 igb_power_down_link(adapter);
2350
Auke Kok9d5c8242008-01-24 02:22:38 -08002351 igb_update_mng_vlan(adapter);
2352
2353 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2354 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2355
Matthew Vick1f6e8172012-08-18 07:26:33 +00002356 /* Re-enable PTP, where applicable. */
Jacob Keller4f3ce712016-05-24 13:56:29 -07002357 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2358 igb_ptp_reset(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00002359
Alexander Duyck330a6d62009-10-27 23:51:35 +00002360 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002361}
2362
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002363static netdev_features_t igb_fix_features(struct net_device *netdev,
2364 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002365{
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002366 /* Since there is no support for separate Rx/Tx vlan accel
2367 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002368 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002369 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2370 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002371 else
Patrick McHardyf6469682013-04-19 02:04:27 +00002372 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002373
2374 return features;
2375}
2376
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002377static int igb_set_features(struct net_device *netdev,
2378 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00002379{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002380 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00002381 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00002382
Patrick McHardyf6469682013-04-19 02:04:27 +00002383 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002384 igb_vlan_mode(netdev, features);
2385
Alexander Duyck16903ca2016-01-06 23:11:18 -08002386 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
Ben Greear89eaefb2012-03-06 09:41:58 +00002387 return 0;
2388
Gangfeng Huang0e71def2016-07-06 13:22:54 +08002389 if (!(features & NETIF_F_NTUPLE)) {
2390 struct hlist_node *node2;
2391 struct igb_nfc_filter *rule;
2392
2393 spin_lock(&adapter->nfc_lock);
2394 hlist_for_each_entry_safe(rule, node2,
2395 &adapter->nfc_filter_list, nfc_node) {
2396 igb_erase_filter(adapter, rule);
2397 hlist_del(&rule->nfc_node);
2398 kfree(rule);
2399 }
2400 spin_unlock(&adapter->nfc_lock);
2401 adapter->nfc_filter_count = 0;
2402 }
2403
Ben Greear89eaefb2012-03-06 09:41:58 +00002404 netdev->features = features;
2405
2406 if (netif_running(netdev))
2407 igb_reinit_locked(adapter);
2408 else
2409 igb_reset(adapter);
2410
Michał Mirosławac52caa2011-06-08 08:38:01 +00002411 return 0;
2412}
2413
Alexander Duyck268f9d32016-01-06 23:11:34 -08002414static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2415 struct net_device *dev,
2416 const unsigned char *addr, u16 vid,
2417 u16 flags)
2418{
2419 /* guarantee we can provide a unique filter for the unicast address */
2420 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2421 struct igb_adapter *adapter = netdev_priv(dev);
Alexander Duyck268f9d32016-01-06 23:11:34 -08002422 int vfn = adapter->vfs_allocated_count;
Alexander Duyck268f9d32016-01-06 23:11:34 -08002423
Yury Kylulin83c21332017-03-07 11:20:25 +03002424 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
Alexander Duyck268f9d32016-01-06 23:11:34 -08002425 return -ENOMEM;
2426 }
2427
2428 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2429}
2430
Alexander Duycke10715d2016-04-14 17:19:38 -04002431#define IGB_MAX_MAC_HDR_LEN 127
2432#define IGB_MAX_NETWORK_HDR_LEN 511
2433
2434static netdev_features_t
2435igb_features_check(struct sk_buff *skb, struct net_device *dev,
2436 netdev_features_t features)
2437{
2438 unsigned int network_hdr_len, mac_hdr_len;
2439
2440 /* Make certain the headers can be described by a context descriptor */
2441 mac_hdr_len = skb_network_header(skb) - skb->data;
2442 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2443 return features & ~(NETIF_F_HW_CSUM |
2444 NETIF_F_SCTP_CRC |
2445 NETIF_F_HW_VLAN_CTAG_TX |
2446 NETIF_F_TSO |
2447 NETIF_F_TSO6);
2448
2449 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2450 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2451 return features & ~(NETIF_F_HW_CSUM |
2452 NETIF_F_SCTP_CRC |
2453 NETIF_F_TSO |
2454 NETIF_F_TSO6);
2455
2456 /* We can only support IPV4 TSO in tunnels if we can mangle the
2457 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2458 */
2459 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2460 features &= ~NETIF_F_TSO;
2461
2462 return features;
2463}
2464
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002465static int igb_offload_cbs(struct igb_adapter *adapter,
2466 struct tc_cbs_qopt_offload *qopt)
2467{
2468 struct e1000_hw *hw = &adapter->hw;
2469 int err;
2470
2471 /* CBS offloading is only supported by i210 controller. */
2472 if (hw->mac.type != e1000_i210)
2473 return -EOPNOTSUPP;
2474
2475 /* CBS offloading is only supported by queue 0 and queue 1. */
2476 if (qopt->queue < 0 || qopt->queue > 1)
2477 return -EINVAL;
2478
2479 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2480 qopt->idleslope, qopt->sendslope,
2481 qopt->hicredit, qopt->locredit);
2482 if (err)
2483 return err;
2484
2485 if (is_fqtss_enabled(adapter)) {
2486 igb_configure_cbs(adapter, qopt->queue, qopt->enable,
2487 qopt->idleslope, qopt->sendslope,
2488 qopt->hicredit, qopt->locredit);
2489
2490 if (!is_any_cbs_enabled(adapter))
2491 enable_fqtss(adapter, false);
2492
2493 } else {
2494 enable_fqtss(adapter, true);
2495 }
2496
2497 return 0;
2498}
2499
2500static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2501 void *type_data)
2502{
2503 struct igb_adapter *adapter = netdev_priv(dev);
2504
2505 switch (type) {
Nogah Frankel8521db42017-11-06 07:23:43 +01002506 case TC_SETUP_QDISC_CBS:
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002507 return igb_offload_cbs(adapter, type_data);
2508
2509 default:
2510 return -EOPNOTSUPP;
2511 }
2512}
2513
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002514static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00002515 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002516 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00002517 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00002518 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00002519 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002520 .ndo_set_mac_address = igb_set_mac,
2521 .ndo_change_mtu = igb_change_mtu,
2522 .ndo_do_ioctl = igb_ioctl,
2523 .ndo_tx_timeout = igb_tx_timeout,
2524 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002525 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2526 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00002527 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2528 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04002529 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
Lior Levy70ea4782013-03-03 20:27:48 +00002530 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01002531 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
Williams, Mitch A8151d292010-02-10 01:44:24 +00002532 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002533#ifdef CONFIG_NET_POLL_CONTROLLER
2534 .ndo_poll_controller = igb_netpoll,
2535#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002536 .ndo_fix_features = igb_fix_features,
2537 .ndo_set_features = igb_set_features,
Alexander Duyck268f9d32016-01-06 23:11:34 -08002538 .ndo_fdb_add = igb_ndo_fdb_add,
Alexander Duycke10715d2016-04-14 17:19:38 -04002539 .ndo_features_check = igb_features_check,
Andre Guedes05f9d3e2017-10-16 18:01:28 -07002540 .ndo_setup_tc = igb_setup_tc,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002541};
2542
Taku Izumi42bfd33a2008-06-20 12:10:30 +09002543/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002544 * igb_set_fw_version - Configure version string for ethtool
2545 * @adapter: adapter struct
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002546 **/
2547void igb_set_fw_version(struct igb_adapter *adapter)
2548{
2549 struct e1000_hw *hw = &adapter->hw;
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002550 struct e1000_fw_version fw;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002551
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002552 igb_get_fw_version(hw, &fw);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002553
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002554 switch (hw->mac.type) {
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002555 case e1000_i210:
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002556 case e1000_i211:
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002557 if (!(igb_get_flash_presence_i210(hw))) {
2558 snprintf(adapter->fw_version,
2559 sizeof(adapter->fw_version),
2560 "%2d.%2d-%d",
2561 fw.invm_major, fw.invm_minor,
2562 fw.invm_img_type);
2563 break;
2564 }
2565 /* fall through */
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002566 default:
2567 /* if option is rom valid, display its version too */
2568 if (fw.or_valid) {
2569 snprintf(adapter->fw_version,
2570 sizeof(adapter->fw_version),
2571 "%d.%d, 0x%08x, %d.%d.%d",
2572 fw.eep_major, fw.eep_minor, fw.etrack_id,
2573 fw.or_major, fw.or_build, fw.or_patch);
2574 /* no option rom */
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002575 } else if (fw.etrack_id != 0X0000) {
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002576 snprintf(adapter->fw_version,
Carolyn Wyborny7dc98a62013-07-16 19:25:33 +00002577 sizeof(adapter->fw_version),
2578 "%d.%d, 0x%08x",
2579 fw.eep_major, fw.eep_minor, fw.etrack_id);
2580 } else {
2581 snprintf(adapter->fw_version,
2582 sizeof(adapter->fw_version),
2583 "%d.%d.%d",
2584 fw.eep_major, fw.eep_minor, fw.eep_build);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002585 }
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00002586 break;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002587 }
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002588}
2589
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002590/**
Carolyn Wyborny56cec242013-10-17 05:36:26 +00002591 * igb_init_mas - init Media Autosense feature if enabled in the NVM
2592 *
2593 * @adapter: adapter struct
2594 **/
2595static void igb_init_mas(struct igb_adapter *adapter)
2596{
2597 struct e1000_hw *hw = &adapter->hw;
2598 u16 eeprom_data;
2599
2600 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2601 switch (hw->bus.func) {
2602 case E1000_FUNC_0:
2603 if (eeprom_data & IGB_MAS_ENABLE_0) {
2604 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2605 netdev_info(adapter->netdev,
2606 "MAS: Enabling Media Autosense for port %d\n",
2607 hw->bus.func);
2608 }
2609 break;
2610 case E1000_FUNC_1:
2611 if (eeprom_data & IGB_MAS_ENABLE_1) {
2612 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2613 netdev_info(adapter->netdev,
2614 "MAS: Enabling Media Autosense for port %d\n",
2615 hw->bus.func);
2616 }
2617 break;
2618 case E1000_FUNC_2:
2619 if (eeprom_data & IGB_MAS_ENABLE_2) {
2620 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2621 netdev_info(adapter->netdev,
2622 "MAS: Enabling Media Autosense for port %d\n",
2623 hw->bus.func);
2624 }
2625 break;
2626 case E1000_FUNC_3:
2627 if (eeprom_data & IGB_MAS_ENABLE_3) {
2628 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2629 netdev_info(adapter->netdev,
2630 "MAS: Enabling Media Autosense for port %d\n",
2631 hw->bus.func);
2632 }
2633 break;
2634 default:
2635 /* Shouldn't get here */
2636 netdev_err(adapter->netdev,
2637 "MAS: Invalid port configuration, returning\n");
2638 break;
2639 }
2640}
2641
2642/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002643 * igb_init_i2c - Init I2C interface
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002644 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002645 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002646static s32 igb_init_i2c(struct igb_adapter *adapter)
2647{
Todd Fujinaka23d87822014-06-04 07:12:15 +00002648 s32 status = 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002649
2650 /* I2C interface supported on i350 devices */
2651 if (adapter->hw.mac.type != e1000_i350)
Todd Fujinaka23d87822014-06-04 07:12:15 +00002652 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002653
2654 /* Initialize the i2c bus which is controlled by the registers.
2655 * This bus will use the i2c_algo_bit structue that implements
2656 * the protocol through toggling of the 4 bits in the register.
2657 */
2658 adapter->i2c_adap.owner = THIS_MODULE;
2659 adapter->i2c_algo = igb_i2c_algo;
2660 adapter->i2c_algo.data = adapter;
2661 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2662 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2663 strlcpy(adapter->i2c_adap.name, "igb BB",
2664 sizeof(adapter->i2c_adap.name));
2665 status = i2c_bit_add_bus(&adapter->i2c_adap);
2666 return status;
2667}
2668
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002669/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002670 * igb_probe - Device Initialization Routine
2671 * @pdev: PCI device information struct
2672 * @ent: entry in igb_pci_tbl
Auke Kok9d5c8242008-01-24 02:22:38 -08002673 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002674 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08002675 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002676 * igb_probe initializes an adapter identified by a pci_dev structure.
2677 * The OS initialization, configuring of the adapter private structure,
2678 * and a hardware reset occur.
Auke Kok9d5c8242008-01-24 02:22:38 -08002679 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002680static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Auke Kok9d5c8242008-01-24 02:22:38 -08002681{
2682 struct net_device *netdev;
2683 struct igb_adapter *adapter;
2684 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00002685 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002686 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00002687 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08002688 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
David S. Miller2d6a5e92009-03-17 15:01:30 -07002689 int err, pci_using_dac;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002690 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08002691
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002692 /* Catch broken hardware that put the wrong VF device ID in
2693 * the PCIe SR-IOV capability.
2694 */
2695 if (pdev->is_virtfn) {
2696 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002697 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00002698 return -EINVAL;
2699 }
2700
Alexander Duyckaed5dec2009-02-06 23:16:04 +00002701 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002702 if (err)
2703 return err;
2704
2705 pci_using_dac = 0;
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002706 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08002707 if (!err) {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002708 pci_using_dac = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08002709 } else {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002710 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08002711 if (err) {
Russell Kingdc4ff9b2013-06-10 12:24:50 +01002712 dev_err(&pdev->dev,
2713 "No usable DMA configuration, aborting\n");
2714 goto err_dma;
Auke Kok9d5c8242008-01-24 02:22:38 -08002715 }
2716 }
2717
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02002718 err = pci_request_mem_regions(pdev, igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002719 if (err)
2720 goto err_pci_reg;
2721
Frans Pop19d5afd2009-10-02 10:04:12 -07002722 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002723
Auke Kok9d5c8242008-01-24 02:22:38 -08002724 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07002725 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002726
2727 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08002728 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00002729 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 if (!netdev)
2731 goto err_alloc_etherdev;
2732
2733 SET_NETDEV_DEV(netdev, &pdev->dev);
2734
2735 pci_set_drvdata(pdev, netdev);
2736 adapter = netdev_priv(netdev);
2737 adapter->netdev = netdev;
2738 adapter->pdev = pdev;
2739 hw = &adapter->hw;
2740 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00002741 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002742
Auke Kok9d5c8242008-01-24 02:22:38 -08002743 err = -EIO;
Jarod Wilson73bf8042015-09-10 15:37:50 -04002744 adapter->io_addr = pci_iomap(pdev, 0, 0);
2745 if (!adapter->io_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08002746 goto err_ioremap;
Jarod Wilson73bf8042015-09-10 15:37:50 -04002747 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
2748 hw->hw_addr = adapter->io_addr;
Auke Kok9d5c8242008-01-24 02:22:38 -08002749
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002750 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08002751 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002752 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08002753
2754 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2755
Aaron Sierra89dbefb2013-10-31 00:32:34 +00002756 netdev->mem_start = pci_resource_start(pdev, 0);
2757 netdev->mem_end = pci_resource_end(pdev, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002758
Auke Kok9d5c8242008-01-24 02:22:38 -08002759 /* PCI config space info */
2760 hw->vendor_id = pdev->vendor;
2761 hw->device_id = pdev->device;
2762 hw->revision_id = pdev->revision;
2763 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2764 hw->subsystem_device_id = pdev->subsystem_device;
2765
Auke Kok9d5c8242008-01-24 02:22:38 -08002766 /* Copy the default MAC, PHY and NVM function pointers */
2767 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2768 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2769 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2770 /* Initialize skew-specific constants */
2771 err = ei->get_invariants(hw);
2772 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00002773 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08002774
Alexander Duyck450c87c2009-02-06 23:22:11 +00002775 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 err = igb_sw_init(adapter);
2777 if (err)
2778 goto err_sw_init;
2779
2780 igb_get_bus_info_pcie(hw);
2781
2782 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002783
2784 /* Copper options */
2785 if (hw->phy.media_type == e1000_media_type_copper) {
2786 hw->phy.mdix = AUTO_ALL_MODES;
2787 hw->phy.disable_polarity_correction = false;
2788 hw->phy.ms_type = e1000_ms_hw_default;
2789 }
2790
2791 if (igb_check_reset_block(hw))
2792 dev_info(&pdev->dev,
2793 "PHY reset is blocked due to SOL/IDER session.\n");
2794
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002795 /* features is initialized to 0 in allocation, it might have bits
Alexander Duyck077887c2011-08-26 07:46:29 +00002796 * set by igb_sw_init so we should use an or instead of an
2797 * assignment.
2798 */
2799 netdev->features |= NETIF_F_SG |
Alexander Duyck077887c2011-08-26 07:46:29 +00002800 NETIF_F_TSO |
2801 NETIF_F_TSO6 |
2802 NETIF_F_RXHASH |
2803 NETIF_F_RXCSUM |
Alexander Duycke10715d2016-04-14 17:19:38 -04002804 NETIF_F_HW_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002805
Alexander Duyck6e033702016-01-13 07:31:23 -08002806 if (hw->mac.type >= e1000_82576)
2807 netdev->features |= NETIF_F_SCTP_CRC;
2808
Alexander Duycke10715d2016-04-14 17:19:38 -04002809#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2810 NETIF_F_GSO_GRE_CSUM | \
Tom Herbert7e133182016-05-18 09:06:10 -07002811 NETIF_F_GSO_IPXIP4 | \
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002812 NETIF_F_GSO_IPXIP6 | \
Alexander Duycke10715d2016-04-14 17:19:38 -04002813 NETIF_F_GSO_UDP_TUNNEL | \
2814 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2815
2816 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
2817 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
2818
Alexander Duyck077887c2011-08-26 07:46:29 +00002819 /* copy netdev features into list of user selectable features */
Alexander Duycke10715d2016-04-14 17:19:38 -04002820 netdev->hw_features |= netdev->features |
2821 NETIF_F_HW_VLAN_CTAG_RX |
2822 NETIF_F_HW_VLAN_CTAG_TX |
2823 NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08002824
Alexander Duyck6e033702016-01-13 07:31:23 -08002825 if (hw->mac.type >= e1000_i350)
2826 netdev->hw_features |= NETIF_F_NTUPLE;
2827
Alexander Duycke10715d2016-04-14 17:19:38 -04002828 if (pci_using_dac)
2829 netdev->features |= NETIF_F_HIGHDMA;
Alexander Duyck077887c2011-08-26 07:46:29 +00002830
Alexander Duycke10715d2016-04-14 17:19:38 -04002831 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
Alexander Duyck6e033702016-01-13 07:31:23 -08002832 netdev->mpls_features |= NETIF_F_HW_CSUM;
Alexander Duycke10715d2016-04-14 17:19:38 -04002833 netdev->hw_enc_features |= netdev->vlan_features;
2834
2835 /* set this bit last since it cannot be part of vlan_features */
2836 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2837 NETIF_F_HW_VLAN_CTAG_RX |
2838 NETIF_F_HW_VLAN_CTAG_TX;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002839
Ben Greear6b8f0922012-03-06 09:41:53 +00002840 netdev->priv_flags |= IFF_SUPP_NOFCS;
2841
Jiri Pirko01789342011-08-16 06:29:00 +00002842 netdev->priv_flags |= IFF_UNICAST_FLT;
2843
Jarod Wilson91c527a2016-10-17 15:54:05 -04002844 /* MTU range: 68 - 9216 */
2845 netdev->min_mtu = ETH_MIN_MTU;
2846 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2847
Alexander Duyck330a6d62009-10-27 23:51:35 +00002848 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002849
2850 /* before reading the NVM, reset the controller to put the device in a
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002851 * known good starting state
2852 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002853 hw->mac.ops.reset_hw(hw);
2854
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002855 /* make sure the NVM is good , i211/i210 parts can have special NVM
2856 * that doesn't contain a checksum
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002857 */
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002858 switch (hw->mac.type) {
2859 case e1000_i210:
2860 case e1000_i211:
2861 if (igb_get_flash_presence_i210(hw)) {
2862 if (hw->nvm.ops.validate(hw) < 0) {
2863 dev_err(&pdev->dev,
2864 "The NVM Checksum Is Not Valid\n");
2865 err = -EIO;
2866 goto err_eeprom;
2867 }
2868 }
2869 break;
2870 default:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002871 if (hw->nvm.ops.validate(hw) < 0) {
2872 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2873 err = -EIO;
2874 goto err_eeprom;
2875 }
Carolyn Wybornyef3a0092013-07-17 19:02:53 +00002876 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002877 }
2878
John Holland806ffb12016-02-18 12:10:52 +01002879 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
2880 /* copy the MAC address out of the NVM */
2881 if (hw->mac.ops.read_mac_addr(hw))
2882 dev_err(&pdev->dev, "NVM Read Error\n");
2883 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002884
2885 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002886
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00002887 if (!is_valid_ether_addr(netdev->dev_addr)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002888 dev_err(&pdev->dev, "Invalid MAC Address\n");
2889 err = -EIO;
2890 goto err_eeprom;
2891 }
2892
Yury Kylulin83c21332017-03-07 11:20:25 +03002893 igb_set_default_mac_filter(adapter);
2894
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002895 /* get firmware version for ethtool -i */
2896 igb_set_fw_version(adapter);
2897
Todd Fujinaka27dff8b2014-05-29 05:47:26 +00002898 /* configure RXPBSIZE and TXPBSIZE */
2899 if (hw->mac.type == e1000_i210) {
2900 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2901 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2902 }
2903
Kees Cook26566ea2017-10-16 17:29:35 -07002904 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
2905 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002906
2907 INIT_WORK(&adapter->reset_task, igb_reset_task);
2908 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2909
Alexander Duyck450c87c2009-02-06 23:22:11 +00002910 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002911 adapter->fc_autoneg = true;
2912 hw->mac.autoneg = true;
2913 hw->phy.autoneg_advertised = 0x2f;
2914
Alexander Duyck0cce1192009-07-23 18:10:24 +00002915 hw->fc.requested_mode = e1000_fc_default;
2916 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002917
Auke Kok9d5c8242008-01-24 02:22:38 -08002918 igb_validate_mdi_setting(hw);
2919
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002920 /* By default, support wake on port A */
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002921 if (hw->bus.func == 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002922 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2923
2924 /* Check the NVM for wake support on non-port A ports */
2925 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002926 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002927 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2928 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002929 else if (hw->bus.func == 1)
2930 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002931
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002932 if (eeprom_data & IGB_EEPROM_APME)
2933 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002934
2935 /* now that we have the eeprom settings, apply the special cases where
2936 * the eeprom may be wrong or the board simply won't support wake on
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002937 * lan on a particular port
2938 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002939 switch (pdev->device) {
2940 case E1000_DEV_ID_82575GB_QUAD_COPPER:
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002941 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002942 break;
2943 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002944 case E1000_DEV_ID_82576_FIBER:
2945 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002946 /* Wake events only supported on port A for dual fiber
Jeff Kirsherb980ac12013-02-23 07:29:56 +00002947 * regardless of eeprom setting
2948 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002949 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002950 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002951 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002952 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002953 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002954 /* if quad port adapter, disable WoL on all but port A */
2955 if (global_quad_port_a != 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002956 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002957 else
2958 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2959 /* Reset for multiple quad port adapters */
2960 if (++global_quad_port_a == 4)
2961 global_quad_port_a = 0;
2962 break;
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002963 default:
2964 /* If the device can't wake, don't set software support */
2965 if (!device_can_wakeup(&adapter->pdev->dev))
2966 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002967 }
2968
2969 /* initialize the wol settings based on the eeprom settings */
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002970 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2971 adapter->wol |= E1000_WUFC_MAG;
2972
2973 /* Some vendors want WoL disabled by default, but still supported */
2974 if ((hw->mac.type == e1000_i350) &&
2975 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2976 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2977 adapter->wol = 0;
2978 }
2979
Todd Fujinaka5e350b92016-01-05 10:08:28 -08002980 /* Some vendors want the ability to Use the EEPROM setting as
2981 * enable/disable only, and not for capability
2982 */
2983 if (((hw->mac.type == e1000_i350) ||
2984 (hw->mac.type == e1000_i354)) &&
2985 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
2986 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2987 adapter->wol = 0;
2988 }
2989 if (hw->mac.type == e1000_i350) {
2990 if (((pdev->subsystem_device == 0x5001) ||
2991 (pdev->subsystem_device == 0x5002)) &&
2992 (hw->bus.func == 0)) {
2993 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2994 adapter->wol = 0;
2995 }
2996 if (pdev->subsystem_device == 0x1F52)
2997 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2998 }
2999
Matthew Vick63d4a8f2012-11-09 05:49:54 +00003000 device_set_wakeup_enable(&adapter->pdev->dev,
3001 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
Auke Kok9d5c8242008-01-24 02:22:38 -08003002
3003 /* reset the hardware with the new settings */
3004 igb_reset(adapter);
3005
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003006 /* Init the I2C interface */
3007 err = igb_init_i2c(adapter);
3008 if (err) {
3009 dev_err(&pdev->dev, "failed to init i2c interface\n");
3010 goto err_eeprom;
3011 }
3012
Auke Kok9d5c8242008-01-24 02:22:38 -08003013 /* let the f/w know that the h/w is now under the control of the
Carolyn Wybornye52c0f92014-04-11 01:46:06 +00003014 * driver.
3015 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003016 igb_get_hw_control(adapter);
3017
Auke Kok9d5c8242008-01-24 02:22:38 -08003018 strcpy(netdev->name, "eth%d");
3019 err = register_netdev(netdev);
3020 if (err)
3021 goto err_register;
3022
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00003023 /* carrier off reporting is important to ethtool even BEFORE open */
3024 netif_carrier_off(netdev);
3025
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003026#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08003027 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003028 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003029 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003030 igb_setup_dca(adapter);
3031 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00003032
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003033#endif
Carolyn Wybornye4288932012-12-07 03:01:42 +00003034#ifdef CONFIG_IGB_HWMON
3035 /* Initialize the thermal sensor on i350 devices. */
3036 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3037 u16 ets_word;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00003038
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003039 /* Read the NVM to determine if this i350 device supports an
Carolyn Wybornye4288932012-12-07 03:01:42 +00003040 * external thermal sensor.
3041 */
3042 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3043 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3044 adapter->ets = true;
3045 else
3046 adapter->ets = false;
3047 if (igb_sysfs_init(adapter))
3048 dev_err(&pdev->dev,
3049 "failed to allocate sysfs resources\n");
3050 } else {
3051 adapter->ets = false;
3052 }
3053#endif
Carolyn Wyborny56cec242013-10-17 05:36:26 +00003054 /* Check if Media Autosense is enabled */
3055 adapter->ei = *ei;
3056 if (hw->dev_spec._82575.mas_capable)
3057 igb_init_mas(adapter);
3058
Anders Berggren673b8b72011-02-04 07:32:32 +00003059 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00003060 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00003061
Auke Kok9d5c8242008-01-24 02:22:38 -08003062 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003063 /* print bus type/speed/width info, not applicable to i354 */
3064 if (hw->mac.type != e1000_i354) {
3065 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3066 netdev->name,
3067 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3068 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3069 "unknown"),
3070 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3071 "Width x4" :
3072 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3073 "Width x2" :
3074 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3075 "Width x1" : "unknown"), netdev->dev_addr);
3076 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003077
Todd Fujinaka53ea6c72013-08-23 07:49:00 +00003078 if ((hw->mac.type >= e1000_i210 ||
3079 igb_get_flash_presence_i210(hw))) {
3080 ret_val = igb_read_part_string(hw, part_str,
3081 E1000_PBANUM_LENGTH);
3082 } else {
3083 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3084 }
3085
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00003086 if (ret_val)
3087 strcpy(part_str, "Unknown");
3088 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08003089 dev_info(&pdev->dev,
3090 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00003091 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003092 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08003093 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003094 if (hw->phy.media_type == e1000_media_type_copper) {
3095 switch (hw->mac.type) {
3096 case e1000_i350:
3097 case e1000_i210:
3098 case e1000_i211:
3099 /* Enable EEE for internal copper PHY devices */
Todd Fujinakac4c112f2014-08-29 06:43:13 +00003100 err = igb_set_eee_i350(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003101 if ((!err) &&
3102 (!hw->dev_spec._82575.eee_disable)) {
3103 adapter->eee_advert =
3104 MDIO_EEE_100TX | MDIO_EEE_1000T;
3105 adapter->flags |= IGB_FLAG_EEE;
3106 }
3107 break;
3108 case e1000_i354:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003109 if ((rd32(E1000_CTRL_EXT) &
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003110 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
Todd Fujinakac4c112f2014-08-29 06:43:13 +00003111 err = igb_set_eee_i354(hw, true, true);
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00003112 if ((!err) &&
3113 (!hw->dev_spec._82575.eee_disable)) {
3114 adapter->eee_advert =
3115 MDIO_EEE_100TX | MDIO_EEE_1000T;
3116 adapter->flags |= IGB_FLAG_EEE;
3117 }
3118 }
3119 break;
3120 default:
3121 break;
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003122 }
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08003123 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003124 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003125 return 0;
3126
3127err_register:
3128 igb_release_hw_control(adapter);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003129 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
Auke Kok9d5c8242008-01-24 02:22:38 -08003130err_eeprom:
3131 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003132 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003133
3134 if (hw->flash_address)
3135 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08003136err_sw_init:
Yury Kylulin83c21332017-03-07 11:20:25 +03003137 kfree(adapter->mac_table);
Jia-Ju Bai42ad1a02015-08-05 22:05:16 +08003138 kfree(adapter->shadow_vfta);
Alexander Duyck047e0032009-10-27 15:49:27 +00003139 igb_clear_interrupt_scheme(adapter);
Todd Fujinakaceee3452015-08-07 17:27:39 -07003140#ifdef CONFIG_PCI_IOV
3141 igb_disable_sriov(pdev);
3142#endif
Jarod Wilson73bf8042015-09-10 15:37:50 -04003143 pci_iounmap(pdev, adapter->io_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003144err_ioremap:
3145 free_netdev(netdev);
3146err_alloc_etherdev:
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02003147 pci_release_mem_regions(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003148err_pci_reg:
3149err_dma:
3150 pci_disable_device(pdev);
3151 return err;
3152}
3153
Greg Rosefa44f2f2013-01-17 01:03:06 -08003154#ifdef CONFIG_PCI_IOV
Stefan Assmann781798a2013-09-24 05:18:39 +00003155static int igb_disable_sriov(struct pci_dev *pdev)
Greg Rosefa44f2f2013-01-17 01:03:06 -08003156{
3157 struct net_device *netdev = pci_get_drvdata(pdev);
3158 struct igb_adapter *adapter = netdev_priv(netdev);
3159 struct e1000_hw *hw = &adapter->hw;
3160
3161 /* reclaim resources allocated to VFs */
3162 if (adapter->vf_data) {
3163 /* disable iov and allow time for transactions to clear */
Alexander Duyckb09186d2013-03-26 00:03:26 +00003164 if (pci_vfs_assigned(pdev)) {
Greg Rosefa44f2f2013-01-17 01:03:06 -08003165 dev_warn(&pdev->dev,
3166 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3167 return -EPERM;
3168 } else {
3169 pci_disable_sriov(pdev);
3170 msleep(500);
3171 }
3172
Yury Kylulin4827cc32017-03-07 11:20:26 +03003173 kfree(adapter->vf_mac_list);
3174 adapter->vf_mac_list = NULL;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003175 kfree(adapter->vf_data);
3176 adapter->vf_data = NULL;
3177 adapter->vfs_allocated_count = 0;
3178 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3179 wrfl();
3180 msleep(100);
3181 dev_info(&pdev->dev, "IOV Disabled\n");
3182
3183 /* Re-enable DMA Coalescing flag since IOV is turned off */
3184 adapter->flags |= IGB_FLAG_DMAC;
3185 }
3186
3187 return 0;
3188}
3189
3190static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3191{
3192 struct net_device *netdev = pci_get_drvdata(pdev);
3193 struct igb_adapter *adapter = netdev_priv(netdev);
3194 int old_vfs = pci_num_vf(pdev);
Yury Kylulin4827cc32017-03-07 11:20:26 +03003195 struct vf_mac_filter *mac_list;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003196 int err = 0;
Yury Kylulin4827cc32017-03-07 11:20:26 +03003197 int num_vf_mac_filters, i;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003198
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00003199 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
Mitch A Williams50267192013-06-20 06:03:36 +00003200 err = -EPERM;
3201 goto out;
3202 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003203 if (!num_vfs)
3204 goto out;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003205
Stefan Assmann781798a2013-09-24 05:18:39 +00003206 if (old_vfs) {
3207 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3208 old_vfs, max_vfs);
3209 adapter->vfs_allocated_count = old_vfs;
3210 } else
3211 adapter->vfs_allocated_count = num_vfs;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003212
3213 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3214 sizeof(struct vf_data_storage), GFP_KERNEL);
3215
3216 /* if allocation failed then we do not support SR-IOV */
3217 if (!adapter->vf_data) {
3218 adapter->vfs_allocated_count = 0;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003219 err = -ENOMEM;
3220 goto out;
3221 }
3222
Yury Kylulin4827cc32017-03-07 11:20:26 +03003223 /* Due to the limited number of RAR entries calculate potential
3224 * number of MAC filters available for the VFs. Reserve entries
3225 * for PF default MAC, PF MAC filters and at least one RAR entry
3226 * for each VF for VF MAC.
3227 */
3228 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3229 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3230 adapter->vfs_allocated_count);
3231
3232 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3233 sizeof(struct vf_mac_filter),
3234 GFP_KERNEL);
3235
3236 mac_list = adapter->vf_mac_list;
3237 INIT_LIST_HEAD(&adapter->vf_macs.l);
3238
3239 if (adapter->vf_mac_list) {
3240 /* Initialize list of VF MAC filters */
3241 for (i = 0; i < num_vf_mac_filters; i++) {
3242 mac_list->vf = -1;
3243 mac_list->free = true;
3244 list_add(&mac_list->l, &adapter->vf_macs.l);
3245 mac_list++;
3246 }
3247 } else {
3248 /* If we could not allocate memory for the VF MAC filters
3249 * we can continue without this feature but warn user.
3250 */
3251 dev_err(&pdev->dev,
3252 "Unable to allocate memory for VF MAC filter list\n");
3253 }
3254
Stefan Assmann781798a2013-09-24 05:18:39 +00003255 /* only call pci_enable_sriov() if no VFs are allocated already */
3256 if (!old_vfs) {
3257 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3258 if (err)
3259 goto err_out;
3260 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003261 dev_info(&pdev->dev, "%d VFs allocated\n",
3262 adapter->vfs_allocated_count);
3263 for (i = 0; i < adapter->vfs_allocated_count; i++)
3264 igb_vf_configure(adapter, i);
3265
3266 /* DMA Coalescing is not supported in IOV mode. */
3267 adapter->flags &= ~IGB_FLAG_DMAC;
3268 goto out;
3269
3270err_out:
Yury Kylulin4827cc32017-03-07 11:20:26 +03003271 kfree(adapter->vf_mac_list);
3272 adapter->vf_mac_list = NULL;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003273 kfree(adapter->vf_data);
3274 adapter->vf_data = NULL;
3275 adapter->vfs_allocated_count = 0;
3276out:
3277 return err;
3278}
3279
3280#endif
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003281/**
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003282 * igb_remove_i2c - Cleanup I2C interface
3283 * @adapter: pointer to adapter structure
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003284 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003285static void igb_remove_i2c(struct igb_adapter *adapter)
3286{
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003287 /* free the adapter bus structure */
3288 i2c_del_adapter(&adapter->i2c_adap);
3289}
3290
Auke Kok9d5c8242008-01-24 02:22:38 -08003291/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003292 * igb_remove - Device Removal Routine
3293 * @pdev: PCI device information struct
Auke Kok9d5c8242008-01-24 02:22:38 -08003294 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003295 * igb_remove is called by the PCI subsystem to alert the driver
3296 * that it should release a PCI device. The could be caused by a
3297 * Hot-Plug event, or because the driver is going to be removed from
3298 * memory.
Auke Kok9d5c8242008-01-24 02:22:38 -08003299 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003300static void igb_remove(struct pci_dev *pdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003301{
3302 struct net_device *netdev = pci_get_drvdata(pdev);
3303 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003304 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003305
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003306 pm_runtime_get_noresume(&pdev->dev);
Carolyn Wybornye4288932012-12-07 03:01:42 +00003307#ifdef CONFIG_IGB_HWMON
3308 igb_sysfs_exit(adapter);
3309#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00003310 igb_remove_i2c(adapter);
Matthew Vicka79f4f82012-08-10 05:40:44 +00003311 igb_ptp_stop(adapter);
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003312 /* The watchdog timer may be rescheduled, so explicitly
Tejun Heo760141a2010-12-12 16:45:14 +01003313 * disable watchdog from being rescheduled.
3314 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003315 set_bit(__IGB_DOWN, &adapter->state);
3316 del_timer_sync(&adapter->watchdog_timer);
3317 del_timer_sync(&adapter->phy_info_timer);
3318
Tejun Heo760141a2010-12-12 16:45:14 +01003319 cancel_work_sync(&adapter->reset_task);
3320 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003321
Jeff Kirsher421e02f2008-10-17 11:08:31 -07003322#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003323 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003324 dev_info(&pdev->dev, "DCA disabled\n");
3325 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003326 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08003327 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07003328 }
3329#endif
3330
Auke Kok9d5c8242008-01-24 02:22:38 -08003331 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003332 * would have already happened in close and is redundant.
3333 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003334 igb_release_hw_control(adapter);
3335
Alexander Duyck37680112009-02-19 20:40:30 -08003336#ifdef CONFIG_PCI_IOV
Greg Rosefa44f2f2013-01-17 01:03:06 -08003337 igb_disable_sriov(pdev);
Alexander Duyck37680112009-02-19 20:40:30 -08003338#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00003339
Alex Williamsonc23d92b2015-07-29 14:38:15 -06003340 unregister_netdev(netdev);
3341
3342 igb_clear_interrupt_scheme(adapter);
3343
Jarod Wilson73bf8042015-09-10 15:37:50 -04003344 pci_iounmap(pdev, adapter->io_addr);
Alexander Duyck28b07592009-02-06 23:20:31 +00003345 if (hw->flash_address)
3346 iounmap(hw->flash_address);
Johannes Thumshirn56d766d2016-06-07 09:44:05 +02003347 pci_release_mem_regions(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003348
Yury Kylulin83c21332017-03-07 11:20:25 +03003349 kfree(adapter->mac_table);
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003350 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08003351 free_netdev(netdev);
3352
Frans Pop19d5afd2009-10-02 10:04:12 -07003353 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08003354
Auke Kok9d5c8242008-01-24 02:22:38 -08003355 pci_disable_device(pdev);
3356}
3357
3358/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003359 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3360 * @adapter: board private structure to initialize
Alexander Duycka6b623e2009-10-27 23:47:53 +00003361 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003362 * This function initializes the vf specific data storage and then attempts to
3363 * allocate the VFs. The reason for ordering it this way is because it is much
3364 * mor expensive time wise to disable SR-IOV than it is to allocate and free
3365 * the memory for the VFs.
Alexander Duycka6b623e2009-10-27 23:47:53 +00003366 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003367static void igb_probe_vfs(struct igb_adapter *adapter)
Alexander Duycka6b623e2009-10-27 23:47:53 +00003368{
3369#ifdef CONFIG_PCI_IOV
3370 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003371 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka6b623e2009-10-27 23:47:53 +00003372
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003373 /* Virtualization features not supported on i210 family. */
3374 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3375 return;
3376
Jan Beulichbe069982015-10-19 04:23:29 -06003377 /* Of the below we really only want the effect of getting
3378 * IGB_FLAG_HAS_MSIX set (if available), without which
3379 * igb_enable_sriov() has no effect.
3380 */
3381 igb_set_interrupt_capability(adapter, true);
3382 igb_reset_interrupt_capability(adapter);
3383
Greg Rosefa44f2f2013-01-17 01:03:06 -08003384 pci_sriov_set_totalvfs(pdev, 7);
Stefan Assmann6423fc32015-07-10 15:01:12 +02003385 igb_enable_sriov(pdev, max_vfs);
Alexander Duycka6b623e2009-10-27 23:47:53 +00003386
Alexander Duycka6b623e2009-10-27 23:47:53 +00003387#endif /* CONFIG_PCI_IOV */
3388}
3389
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003390unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003391{
3392 struct e1000_hw *hw = &adapter->hw;
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003393 unsigned int max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08003394
Matthew Vick374a5422012-05-18 04:54:58 +00003395 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003396 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003397 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00003398 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003399 break;
Matthew Vick374a5422012-05-18 04:54:58 +00003400 case e1000_82575:
3401 case e1000_i210:
3402 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3403 break;
3404 case e1000_i350:
3405 /* I350 cannot do RSS and SR-IOV at the same time */
3406 if (!!adapter->vfs_allocated_count) {
3407 max_rss_queues = 1;
3408 break;
3409 }
3410 /* fall through */
3411 case e1000_82576:
3412 if (!!adapter->vfs_allocated_count) {
3413 max_rss_queues = 2;
3414 break;
3415 }
3416 /* fall through */
3417 case e1000_82580:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003418 case e1000_i354:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003419 default:
Matthew Vick374a5422012-05-18 04:54:58 +00003420 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003421 break;
3422 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003423
Zhang Shengju28cb2d12017-09-19 21:40:54 +08003424 return max_rss_queues;
3425}
3426
3427static void igb_init_queue_configuration(struct igb_adapter *adapter)
3428{
3429 u32 max_rss_queues;
3430
3431 max_rss_queues = igb_get_max_rss_queues(adapter);
Matthew Vick374a5422012-05-18 04:54:58 +00003432 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3433
Shota Suzuki72ddef02015-07-01 09:25:52 +09003434 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3435}
3436
3437void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3438 const u32 max_rss_queues)
3439{
3440 struct e1000_hw *hw = &adapter->hw;
3441
Matthew Vick374a5422012-05-18 04:54:58 +00003442 /* Determine if we need to pair queues. */
3443 switch (hw->mac.type) {
3444 case e1000_82575:
3445 case e1000_i211:
3446 /* Device supports enough interrupts without queue pairing. */
3447 break;
3448 case e1000_82576:
Matthew Vick374a5422012-05-18 04:54:58 +00003449 case e1000_82580:
3450 case e1000_i350:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00003451 case e1000_i354:
Matthew Vick374a5422012-05-18 04:54:58 +00003452 case e1000_i210:
3453 default:
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003454 /* If rss_queues > half of max_rss_queues, pair the queues in
Matthew Vick374a5422012-05-18 04:54:58 +00003455 * order to conserve interrupts due to limited supply.
3456 */
3457 if (adapter->rss_queues > (max_rss_queues / 2))
3458 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Shota Suzuki37a5d162015-12-11 18:44:00 +09003459 else
3460 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
Matthew Vick374a5422012-05-18 04:54:58 +00003461 break;
3462 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08003463}
3464
3465/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003466 * igb_sw_init - Initialize general software structures (struct igb_adapter)
3467 * @adapter: board private structure to initialize
Greg Rosefa44f2f2013-01-17 01:03:06 -08003468 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003469 * igb_sw_init initializes the Adapter private data structure.
3470 * Fields are initialized based on PCI device information and
3471 * OS network device settings (MTU size).
Greg Rosefa44f2f2013-01-17 01:03:06 -08003472 **/
3473static int igb_sw_init(struct igb_adapter *adapter)
3474{
3475 struct e1000_hw *hw = &adapter->hw;
3476 struct net_device *netdev = adapter->netdev;
3477 struct pci_dev *pdev = adapter->pdev;
3478
3479 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3480
3481 /* set default ring sizes */
3482 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3483 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3484
3485 /* set default ITR values */
3486 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3487 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3488
3489 /* set default work limits */
3490 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3491
3492 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3493 VLAN_HLEN;
3494 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3495
Gangfeng Huang0e71def2016-07-06 13:22:54 +08003496 spin_lock_init(&adapter->nfc_lock);
Greg Rosefa44f2f2013-01-17 01:03:06 -08003497 spin_lock_init(&adapter->stats64_lock);
3498#ifdef CONFIG_PCI_IOV
3499 switch (hw->mac.type) {
3500 case e1000_82576:
3501 case e1000_i350:
3502 if (max_vfs > 7) {
3503 dev_warn(&pdev->dev,
3504 "Maximum of 7 VFs per PF, using max\n");
Alex Williamsond0f63ac2013-03-13 15:50:24 +00003505 max_vfs = adapter->vfs_allocated_count = 7;
Greg Rosefa44f2f2013-01-17 01:03:06 -08003506 } else
3507 adapter->vfs_allocated_count = max_vfs;
3508 if (adapter->vfs_allocated_count)
3509 dev_warn(&pdev->dev,
3510 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3511 break;
3512 default:
3513 break;
3514 }
3515#endif /* CONFIG_PCI_IOV */
3516
Stefan Assmanncbfe3602015-09-17 14:46:10 +02003517 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3518 adapter->flags |= IGB_FLAG_HAS_MSIX;
3519
Yury Kylulin83c21332017-03-07 11:20:25 +03003520 adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
3521 hw->mac.rar_entry_count, GFP_ATOMIC);
3522 if (!adapter->mac_table)
3523 return -ENOMEM;
3524
Todd Fujinakaceee3452015-08-07 17:27:39 -07003525 igb_probe_vfs(adapter);
3526
Greg Rosefa44f2f2013-01-17 01:03:06 -08003527 igb_init_queue_configuration(adapter);
Alexander Duycka99955f2009-11-12 18:37:19 +00003528
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003529 /* Setup and initialize a copy of the hw vlan table array */
Joe Perchesb2adaca2013-02-03 17:43:58 +00003530 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3531 GFP_ATOMIC);
Christophe JAILLET18eb8632017-08-27 08:39:51 +02003532 if (!adapter->shadow_vfta)
3533 return -ENOMEM;
Carolyn Wyborny1128c752011-10-14 00:13:49 +00003534
Alexander Duycka6b623e2009-10-27 23:47:53 +00003535 /* This call may decrease the number of queues */
Stefan Assmann53c7d062012-12-04 06:00:12 +00003536 if (igb_init_interrupt_scheme(adapter, true)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003537 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3538 return -ENOMEM;
3539 }
3540
3541 /* Explicitly disable IRQ since the NIC can be in any state. */
3542 igb_irq_disable(adapter);
3543
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003544 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08003545 adapter->flags &= ~IGB_FLAG_DMAC;
3546
Auke Kok9d5c8242008-01-24 02:22:38 -08003547 set_bit(__IGB_DOWN, &adapter->state);
3548 return 0;
3549}
3550
3551/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003552 * igb_open - Called when a network interface is made active
3553 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003554 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003555 * Returns 0 on success, negative value on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003556 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003557 * The open entry point is called when a network interface is made
3558 * active by the system (IFF_UP). At this point all resources needed
3559 * for transmit and receive operations are allocated, the interrupt
3560 * handler is registered with the OS, the watchdog timer is started,
3561 * and the stack is notified that the interface is ready.
Auke Kok9d5c8242008-01-24 02:22:38 -08003562 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003563static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08003564{
3565 struct igb_adapter *adapter = netdev_priv(netdev);
3566 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003567 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003568 int err;
3569 int i;
3570
3571 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003572 if (test_bit(__IGB_TESTING, &adapter->state)) {
3573 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08003574 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003575 }
3576
3577 if (!resuming)
3578 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003579
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00003580 netif_carrier_off(netdev);
3581
Auke Kok9d5c8242008-01-24 02:22:38 -08003582 /* allocate transmit descriptors */
3583 err = igb_setup_all_tx_resources(adapter);
3584 if (err)
3585 goto err_setup_tx;
3586
3587 /* allocate receive descriptors */
3588 err = igb_setup_all_rx_resources(adapter);
3589 if (err)
3590 goto err_setup_rx;
3591
Nick Nunley88a268c2010-02-17 01:01:59 +00003592 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003593
Auke Kok9d5c8242008-01-24 02:22:38 -08003594 /* before we allocate an interrupt, we must be ready to handle it.
3595 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3596 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003597 * clean_rx handler before we do so.
3598 */
Auke Kok9d5c8242008-01-24 02:22:38 -08003599 igb_configure(adapter);
3600
3601 err = igb_request_irq(adapter);
3602 if (err)
3603 goto err_req_irq;
3604
Alexander Duyck0c2cc022012-09-25 00:31:22 +00003605 /* Notify the stack of the actual queue counts. */
3606 err = netif_set_real_num_tx_queues(adapter->netdev,
3607 adapter->num_tx_queues);
3608 if (err)
3609 goto err_set_queues;
3610
3611 err = netif_set_real_num_rx_queues(adapter->netdev,
3612 adapter->num_rx_queues);
3613 if (err)
3614 goto err_set_queues;
3615
Auke Kok9d5c8242008-01-24 02:22:38 -08003616 /* From here on the code is the same as igb_up() */
3617 clear_bit(__IGB_DOWN, &adapter->state);
3618
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003619 for (i = 0; i < adapter->num_q_vectors; i++)
3620 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08003621
3622 /* Clear any pending interrupts. */
3623 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07003624
3625 igb_irq_enable(adapter);
3626
Alexander Duyckd4960302009-10-27 15:53:45 +00003627 /* notify VFs that reset has been completed */
3628 if (adapter->vfs_allocated_count) {
3629 u32 reg_data = rd32(E1000_CTRL_EXT);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003630
Alexander Duyckd4960302009-10-27 15:53:45 +00003631 reg_data |= E1000_CTRL_EXT_PFRSTD;
3632 wr32(E1000_CTRL_EXT, reg_data);
3633 }
3634
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07003635 netif_tx_start_all_queues(netdev);
3636
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003637 if (!resuming)
3638 pm_runtime_put(&pdev->dev);
3639
Alexander Duyck25568a52009-10-27 23:49:59 +00003640 /* start the watchdog. */
3641 hw->mac.get_link_status = 1;
3642 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003643
3644 return 0;
3645
Alexander Duyck0c2cc022012-09-25 00:31:22 +00003646err_set_queues:
3647 igb_free_irq(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003648err_req_irq:
3649 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00003650 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003651 igb_free_all_rx_resources(adapter);
3652err_setup_rx:
3653 igb_free_all_tx_resources(adapter);
3654err_setup_tx:
3655 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003656 if (!resuming)
3657 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003658
3659 return err;
3660}
3661
Stefan Assmann46eafa52016-02-03 09:20:50 +01003662int igb_open(struct net_device *netdev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003663{
3664 return __igb_open(netdev, false);
3665}
3666
Auke Kok9d5c8242008-01-24 02:22:38 -08003667/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003668 * igb_close - Disables a network interface
3669 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003670 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003671 * Returns 0, this is not allowed to fail
Auke Kok9d5c8242008-01-24 02:22:38 -08003672 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003673 * The close entry point is called when an interface is de-activated
3674 * by the OS. The hardware is still under the driver's control, but
3675 * needs to be disabled. A global MAC reset is issued to stop the
3676 * hardware, and all transmit and receive resources are freed.
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003678static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08003679{
3680 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003681 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003682
3683 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08003684
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003685 if (!suspending)
3686 pm_runtime_get_sync(&pdev->dev);
3687
3688 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003689 igb_free_irq(adapter);
3690
3691 igb_free_all_tx_resources(adapter);
3692 igb_free_all_rx_resources(adapter);
3693
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003694 if (!suspending)
3695 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003696 return 0;
3697}
3698
Stefan Assmann46eafa52016-02-03 09:20:50 +01003699int igb_close(struct net_device *netdev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003700{
Lyude Paul888f2292017-12-12 14:31:30 -05003701 if (netif_device_present(netdev) || netdev->dismantle)
Todd Fujinaka94749332016-11-15 08:54:26 -08003702 return __igb_close(netdev, false);
3703 return 0;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003704}
3705
Auke Kok9d5c8242008-01-24 02:22:38 -08003706/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003707 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
3708 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08003709 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003710 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 **/
Alexander Duyck80785292009-10-27 15:51:47 +00003712int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003713{
Alexander Duyck59d71982010-04-27 13:09:25 +00003714 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 int size;
3716
Alexander Duyck06034642011-08-26 07:44:22 +00003717 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003718
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08003719 tx_ring->tx_buffer_info = vmalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00003720 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003721 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003722
3723 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08003724 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003725 tx_ring->size = ALIGN(tx_ring->size, 4096);
3726
Alexander Duyck5536d212012-09-25 00:31:17 +00003727 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3728 &tx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003729 if (!tx_ring->desc)
3730 goto err;
3731
Auke Kok9d5c8242008-01-24 02:22:38 -08003732 tx_ring->next_to_use = 0;
3733 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00003734
Auke Kok9d5c8242008-01-24 02:22:38 -08003735 return 0;
3736
3737err:
Alexander Duyck06034642011-08-26 07:44:22 +00003738 vfree(tx_ring->tx_buffer_info);
Alexander Duyckf33005a2012-09-13 06:27:55 +00003739 tx_ring->tx_buffer_info = NULL;
3740 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003741 return -ENOMEM;
3742}
3743
3744/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003745 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
3746 * (Descriptors) for all queues
3747 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003748 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003749 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003750 **/
3751static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3752{
Alexander Duyck439705e2009-10-27 23:49:20 +00003753 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003754 int i, err = 0;
3755
3756 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003757 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003758 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003759 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003760 "Allocation for Tx Queue %u failed\n", i);
3761 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003762 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003763 break;
3764 }
3765 }
3766
3767 return err;
3768}
3769
3770/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003771 * igb_setup_tctl - configure the transmit control registers
3772 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003774void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003775{
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 struct e1000_hw *hw = &adapter->hw;
3777 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08003778
Alexander Duyck85b430b2009-10-27 15:50:29 +00003779 /* disable queue 0 which is enabled by default on 82575 and 82576 */
3780 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003781
3782 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08003783 tctl = rd32(E1000_TCTL);
3784 tctl &= ~E1000_TCTL_CT;
3785 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
3786 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3787
3788 igb_config_collision_dist(hw);
3789
Auke Kok9d5c8242008-01-24 02:22:38 -08003790 /* Enable transmits */
3791 tctl |= E1000_TCTL_EN;
3792
3793 wr32(E1000_TCTL, tctl);
3794}
3795
3796/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003797 * igb_configure_tx_ring - Configure transmit ring after Reset
3798 * @adapter: board private structure
3799 * @ring: tx ring to configure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003800 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003801 * Configure a transmit ring after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003802 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003803void igb_configure_tx_ring(struct igb_adapter *adapter,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003804 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003805{
3806 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00003807 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003808 u64 tdba = ring->dma;
3809 int reg_idx = ring->reg_idx;
3810
3811 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003812 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003813 wrfl();
3814 mdelay(10);
3815
3816 wr32(E1000_TDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003817 ring->count * sizeof(union e1000_adv_tx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00003818 wr32(E1000_TDBAL(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003819 tdba & 0x00000000ffffffffULL);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003820 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3821
Cao jin629823b2016-11-08 15:06:20 +08003822 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003823 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003824 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003825
3826 txdctl |= IGB_TX_PTHRESH;
3827 txdctl |= IGB_TX_HTHRESH << 8;
3828 txdctl |= IGB_TX_WTHRESH << 16;
3829
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08003830 /* reinitialize tx_buffer_info */
3831 memset(ring->tx_buffer_info, 0,
3832 sizeof(struct igb_tx_buffer) * ring->count);
3833
Alexander Duyck85b430b2009-10-27 15:50:29 +00003834 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3835 wr32(E1000_TXDCTL(reg_idx), txdctl);
3836}
3837
3838/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003839 * igb_configure_tx - Configure transmit Unit after Reset
3840 * @adapter: board private structure
Alexander Duyck85b430b2009-10-27 15:50:29 +00003841 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003842 * Configure the Tx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00003843 **/
3844static void igb_configure_tx(struct igb_adapter *adapter)
3845{
3846 int i;
3847
3848 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003849 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003850}
3851
3852/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003853 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
3854 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
Auke Kok9d5c8242008-01-24 02:22:38 -08003855 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003856 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003857 **/
Alexander Duyck80785292009-10-27 15:51:47 +00003858int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003859{
Alexander Duyck59d71982010-04-27 13:09:25 +00003860 struct device *dev = rx_ring->dev;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003861 int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08003862
Alexander Duyck06034642011-08-26 07:44:22 +00003863 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003864
Alexander Duyckd2bead52017-02-06 18:25:50 -08003865 rx_ring->rx_buffer_info = vmalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00003866 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003867 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003868
Auke Kok9d5c8242008-01-24 02:22:38 -08003869 /* Round up to nearest 4K */
Alexander Duyckf33005a2012-09-13 06:27:55 +00003870 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003871 rx_ring->size = ALIGN(rx_ring->size, 4096);
3872
Alexander Duyck5536d212012-09-25 00:31:17 +00003873 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3874 &rx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003875 if (!rx_ring->desc)
3876 goto err;
3877
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003878 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003879 rx_ring->next_to_clean = 0;
3880 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003881
Auke Kok9d5c8242008-01-24 02:22:38 -08003882 return 0;
3883
3884err:
Alexander Duyck06034642011-08-26 07:44:22 +00003885 vfree(rx_ring->rx_buffer_info);
3886 rx_ring->rx_buffer_info = NULL;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003887 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003888 return -ENOMEM;
3889}
3890
3891/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003892 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3893 * (Descriptors) for all queues
3894 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08003895 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003896 * Return 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 **/
3898static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3899{
Alexander Duyck439705e2009-10-27 23:49:20 +00003900 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003901 int i, err = 0;
3902
3903 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003904 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003905 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003906 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003907 "Allocation for Rx Queue %u failed\n", i);
3908 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003909 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 break;
3911 }
3912 }
3913
3914 return err;
3915}
3916
3917/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003918 * igb_setup_mrqc - configure the multiple receive queue control registers
3919 * @adapter: Board private structure
Alexander Duyck06cf2662009-10-27 15:53:25 +00003920 **/
3921static void igb_setup_mrqc(struct igb_adapter *adapter)
3922{
3923 struct e1000_hw *hw = &adapter->hw;
3924 u32 mrqc, rxcsum;
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003925 u32 j, num_rx_queues;
Eric Dumazeteb31f842014-11-16 06:23:14 -08003926 u32 rss_key[10];
Alexander Duyck06cf2662009-10-27 15:53:25 +00003927
Eric Dumazeteb31f842014-11-16 06:23:14 -08003928 netdev_rss_key_fill(rss_key, sizeof(rss_key));
Alexander Duycka57fe232012-09-13 06:28:16 +00003929 for (j = 0; j < 10; j++)
Eric Dumazeteb31f842014-11-16 06:23:14 -08003930 wr32(E1000_RSSRK(j), rss_key[j]);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003931
Alexander Duycka99955f2009-11-12 18:37:19 +00003932 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003933
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003934 switch (hw->mac.type) {
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003935 case e1000_82576:
3936 /* 82576 supports 2 RSS queues for SR-IOV */
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003937 if (adapter->vfs_allocated_count)
Alexander Duyck06cf2662009-10-27 15:53:25 +00003938 num_rx_queues = 2;
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003939 break;
3940 default:
3941 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003942 }
3943
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003944 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3945 for (j = 0; j < IGB_RETA_SIZE; j++)
Carolyn Wybornyc502ea22014-04-11 01:46:33 +00003946 adapter->rss_indir_tbl[j] =
3947 (j * num_rx_queues) / IGB_RETA_SIZE;
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003948 adapter->rss_indir_tbl_init = num_rx_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003949 }
Laura Mihaela Vasilescued12cc92013-07-31 20:19:54 +00003950 igb_write_rss_indir_tbl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003951
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003952 /* Disable raw packet checksumming so that RSS hash is placed in
Alexander Duyck06cf2662009-10-27 15:53:25 +00003953 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3954 * offloads as they are enabled by default
3955 */
3956 rxcsum = rd32(E1000_RXCSUM);
3957 rxcsum |= E1000_RXCSUM_PCSD;
3958
3959 if (adapter->hw.mac.type >= e1000_82576)
3960 /* Enable Receive Checksum Offload for SCTP */
3961 rxcsum |= E1000_RXCSUM_CRCOFL;
3962
3963 /* Don't need to set TUOFL or IPOFL, they default to 1 */
3964 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003965
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003966 /* Generate RSS hash based on packet types, TCP/UDP
3967 * port numbers and/or IPv4/v6 src and dst addresses
3968 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003969 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3970 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3971 E1000_MRQC_RSS_FIELD_IPV6 |
3972 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3973 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003974
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003975 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3976 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3977 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3978 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3979
Alexander Duyck06cf2662009-10-27 15:53:25 +00003980 /* If VMDq is enabled then we set the appropriate mode for that, else
3981 * we default to RSS so that an RSS hash is calculated per packet even
Jeff Kirsherb980ac12013-02-23 07:29:56 +00003982 * if we are only using one queue
3983 */
Alexander Duyck06cf2662009-10-27 15:53:25 +00003984 if (adapter->vfs_allocated_count) {
3985 if (hw->mac.type > e1000_82575) {
3986 /* Set the default pool for the PF's first queue */
3987 u32 vtctl = rd32(E1000_VT_CTL);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00003988
Alexander Duyck06cf2662009-10-27 15:53:25 +00003989 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3990 E1000_VT_CTL_DISABLE_DEF_POOL);
3991 vtctl |= adapter->vfs_allocated_count <<
3992 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3993 wr32(E1000_VT_CTL, vtctl);
3994 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003995 if (adapter->rss_queues > 1)
Todd Fujinakac883de92016-01-11 09:34:50 -08003996 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003997 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003998 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003999 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00004000 if (hw->mac.type != e1000_i211)
Todd Fujinakac883de92016-01-11 09:34:50 -08004001 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00004002 }
4003 igb_vmm_control(adapter);
4004
Alexander Duyck06cf2662009-10-27 15:53:25 +00004005 wr32(E1000_MRQC, mrqc);
4006}
4007
4008/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004009 * igb_setup_rctl - configure the receive control registers
4010 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004011 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00004012void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08004013{
4014 struct e1000_hw *hw = &adapter->hw;
4015 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08004016
4017 rctl = rd32(E1000_RCTL);
4018
4019 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08004020 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004021
Alexander Duyck69d728b2008-11-25 01:04:03 -08004022 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00004023 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08004024
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004025 /* enable stripping of CRC. It's unlikely this will break BMC
Auke Kok87cb7e82008-07-08 15:08:29 -07004026 * redirection as it did with e1000. Newer features require
4027 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004028 */
Auke Kok87cb7e82008-07-08 15:08:29 -07004029 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004030
Alexander Duyck559e9c42009-10-27 23:52:50 +00004031 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08004032 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08004033
Alexander Duyck45693bc2016-01-06 23:10:39 -08004034 /* enable LPE to allow for reception of jumbo frames */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004035 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08004036
Alexander Duyck952f72a2009-10-27 15:51:07 +00004037 /* disable queue 0 to prevent tail write w/o re-config */
4038 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004039
Alexander Duycke1739522009-02-19 20:39:44 -08004040 /* Attention!!! For SR-IOV PF driver operations you must enable
4041 * queue drop for all VF and PF queues to prevent head of line blocking
4042 * if an un-trusted VF does not provide descriptors to hardware.
4043 */
4044 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08004045 /* set all queue drop enable bits */
4046 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08004047 }
4048
Ben Greear89eaefb2012-03-06 09:41:58 +00004049 /* This is useful for sniffing bad packets. */
4050 if (adapter->netdev->features & NETIF_F_RXALL) {
4051 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004052 * in e1000e_set_rx_mode
4053 */
Ben Greear89eaefb2012-03-06 09:41:58 +00004054 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4055 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4056 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4057
Alexander Duyck16903ca2016-01-06 23:11:18 -08004058 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
Ben Greear89eaefb2012-03-06 09:41:58 +00004059 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4060 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4061 * and that breaks VLANs.
4062 */
4063 }
4064
Auke Kok9d5c8242008-01-24 02:22:38 -08004065 wr32(E1000_RCTL, rctl);
4066}
4067
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004068static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004069 int vfn)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004070{
4071 struct e1000_hw *hw = &adapter->hw;
4072 u32 vmolr;
4073
Alexander Duyckd3836f82016-01-06 23:10:47 -08004074 if (size > MAX_JUMBO_FRAME_SIZE)
4075 size = MAX_JUMBO_FRAME_SIZE;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004076
4077 vmolr = rd32(E1000_VMOLR(vfn));
4078 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4079 vmolr |= size | E1000_VMOLR_LPE;
4080 wr32(E1000_VMOLR(vfn), vmolr);
4081
4082 return 0;
4083}
4084
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004085static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4086 int vfn, bool enable)
Alexander Duycke1739522009-02-19 20:39:44 -08004087{
Alexander Duycke1739522009-02-19 20:39:44 -08004088 struct e1000_hw *hw = &adapter->hw;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004089 u32 val, reg;
Alexander Duycke1739522009-02-19 20:39:44 -08004090
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004091 if (hw->mac.type < e1000_82576)
4092 return;
Alexander Duycke1739522009-02-19 20:39:44 -08004093
Corinna Vinschen030f9f52016-01-28 13:53:23 +01004094 if (hw->mac.type == e1000_i350)
4095 reg = E1000_DVMOLR(vfn);
4096 else
4097 reg = E1000_VMOLR(vfn);
4098
4099 val = rd32(reg);
4100 if (enable)
4101 val |= E1000_VMOLR_STRVLAN;
4102 else
4103 val &= ~(E1000_VMOLR_STRVLAN);
4104 wr32(reg, val);
Alexander Duycke1739522009-02-19 20:39:44 -08004105}
4106
Williams, Mitch A8151d292010-02-10 01:44:24 +00004107static inline void igb_set_vmolr(struct igb_adapter *adapter,
4108 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004109{
4110 struct e1000_hw *hw = &adapter->hw;
4111 u32 vmolr;
4112
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004113 /* This register exists only on 82576 and newer so if we are older then
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004114 * we should exit and do nothing
4115 */
4116 if (hw->mac.type < e1000_82576)
4117 return;
4118
4119 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00004120 if (aupe)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004121 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004122 else
4123 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004124
4125 /* clear all bits that might not be set */
4126 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4127
Alexander Duycka99955f2009-11-12 18:37:19 +00004128 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004129 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004130 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004131 * multicast packets
4132 */
4133 if (vfn <= adapter->vfs_allocated_count)
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004134 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004135
4136 wr32(E1000_VMOLR(vfn), vmolr);
4137}
4138
Alexander Duycke1739522009-02-19 20:39:44 -08004139/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004140 * igb_configure_rx_ring - Configure a receive ring after Reset
4141 * @adapter: board private structure
4142 * @ring: receive ring to be configured
Alexander Duyck85b430b2009-10-27 15:50:29 +00004143 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004144 * Configure the Rx unit of the MAC after a reset.
Alexander Duyck85b430b2009-10-27 15:50:29 +00004145 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00004146void igb_configure_rx_ring(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004147 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00004148{
4149 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck7ec01162017-02-06 18:25:41 -08004150 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004151 u64 rdba = ring->dma;
4152 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00004153 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004154
4155 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00004156 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00004157
4158 /* Set DMA base address registers */
4159 wr32(E1000_RDBAL(reg_idx),
4160 rdba & 0x00000000ffffffffULL);
4161 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4162 wr32(E1000_RDLEN(reg_idx),
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004163 ring->count * sizeof(union e1000_adv_rx_desc));
Alexander Duyck85b430b2009-10-27 15:50:29 +00004164
4165 /* initialize head and tail */
Cao jin629823b2016-11-08 15:06:20 +08004166 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00004167 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00004168 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00004169
Alexander Duyck952f72a2009-10-27 15:51:07 +00004170 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00004171 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck8649aae2017-02-06 18:27:03 -08004172 if (ring_uses_large_buffer(ring))
4173 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4174 else
4175 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00004176 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
Alexander Duyck06218a82011-08-26 07:46:55 +00004177 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00004178 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00004179 /* Only set Drop Enable if we are supporting multiple queues */
4180 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4181 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00004182
4183 wr32(E1000_SRRCTL(reg_idx), srrctl);
4184
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004185 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00004186 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004187
Alexander Duyck85b430b2009-10-27 15:50:29 +00004188 rxdctl |= IGB_RX_PTHRESH;
4189 rxdctl |= IGB_RX_HTHRESH << 8;
4190 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00004191
Alexander Duyckd2bead52017-02-06 18:25:50 -08004192 /* initialize rx_buffer_info */
4193 memset(ring->rx_buffer_info, 0,
4194 sizeof(struct igb_rx_buffer) * ring->count);
4195
Alexander Duyck7ec01162017-02-06 18:25:41 -08004196 /* initialize Rx descriptor 0 */
4197 rx_desc = IGB_RX_DESC(ring, 0);
4198 rx_desc->wb.upper.length = 0;
4199
Alexander Duycka74420e2011-08-26 07:43:27 +00004200 /* enable receive descriptor fetching */
4201 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00004202 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4203}
4204
Alexander Duyck8649aae2017-02-06 18:27:03 -08004205static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4206 struct igb_ring *rx_ring)
4207{
4208 /* set build_skb and buffer size flags */
Alexander Duycke3cdf682017-02-06 18:27:14 -08004209 clear_ring_build_skb_enabled(rx_ring);
Alexander Duyck8649aae2017-02-06 18:27:03 -08004210 clear_ring_uses_large_buffer(rx_ring);
4211
4212 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4213 return;
4214
Alexander Duycke3cdf682017-02-06 18:27:14 -08004215 set_ring_build_skb_enabled(rx_ring);
4216
Alexander Duyck8649aae2017-02-06 18:27:03 -08004217#if (PAGE_SIZE < 8192)
4218 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4219 return;
4220
4221 set_ring_uses_large_buffer(rx_ring);
4222#endif
4223}
4224
Alexander Duyck85b430b2009-10-27 15:50:29 +00004225/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004226 * igb_configure_rx - Configure receive Unit after Reset
4227 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004228 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004229 * Configure the Rx unit of the MAC after a reset.
Auke Kok9d5c8242008-01-24 02:22:38 -08004230 **/
4231static void igb_configure_rx(struct igb_adapter *adapter)
4232{
Hannes Eder91075842009-02-18 19:36:04 -08004233 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004234
Alexander Duyck26ad9172009-10-05 06:32:49 +00004235 /* set the correct pool for the PF default MAC address in entry 0 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004236 igb_set_default_mac_filter(adapter);
Alexander Duyck26ad9172009-10-05 06:32:49 +00004237
Alexander Duyck06cf2662009-10-27 15:53:25 +00004238 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004239 * the Base and Length of the Rx Descriptor Ring
4240 */
Alexander Duyck8649aae2017-02-06 18:27:03 -08004241 for (i = 0; i < adapter->num_rx_queues; i++) {
4242 struct igb_ring *rx_ring = adapter->rx_ring[i];
4243
4244 igb_set_rx_buffer_len(adapter, rx_ring);
4245 igb_configure_rx_ring(adapter, rx_ring);
4246 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004247}
4248
4249/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004250 * igb_free_tx_resources - Free Tx Resources per Queue
4251 * @tx_ring: Tx descriptor ring for a specific queue
Auke Kok9d5c8242008-01-24 02:22:38 -08004252 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004253 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004254 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08004255void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004256{
Mitch Williams3b644cf2008-06-27 10:59:48 -07004257 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004258
Alexander Duyck06034642011-08-26 07:44:22 +00004259 vfree(tx_ring->tx_buffer_info);
4260 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08004261
Alexander Duyck439705e2009-10-27 23:49:20 +00004262 /* if not set, then don't free */
4263 if (!tx_ring->desc)
4264 return;
4265
Alexander Duyck59d71982010-04-27 13:09:25 +00004266 dma_free_coherent(tx_ring->dev, tx_ring->size,
4267 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08004268
4269 tx_ring->desc = NULL;
4270}
4271
4272/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004273 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4274 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004275 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004276 * Free all transmit software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004277 **/
4278static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4279{
4280 int i;
4281
4282 for (i = 0; i < adapter->num_tx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004283 if (adapter->tx_ring[i])
4284 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004285}
4286
Auke Kok9d5c8242008-01-24 02:22:38 -08004287/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004288 * igb_clean_tx_ring - Free Tx Buffers
4289 * @tx_ring: ring to be cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07004291static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004292{
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004293 u16 i = tx_ring->next_to_clean;
4294 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004295
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004296 while (i != tx_ring->next_to_use) {
4297 union e1000_adv_tx_desc *eop_desc, *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004298
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004299 /* Free all the Tx ring sk_buffs */
4300 dev_kfree_skb_any(tx_buffer->skb);
4301
4302 /* unmap skb header data */
4303 dma_unmap_single(tx_ring->dev,
4304 dma_unmap_addr(tx_buffer, dma),
4305 dma_unmap_len(tx_buffer, len),
4306 DMA_TO_DEVICE);
4307
4308 /* check for eop_desc to determine the end of the packet */
4309 eop_desc = tx_buffer->next_to_watch;
4310 tx_desc = IGB_TX_DESC(tx_ring, i);
4311
4312 /* unmap remaining buffers */
4313 while (tx_desc != eop_desc) {
4314 tx_buffer++;
4315 tx_desc++;
4316 i++;
4317 if (unlikely(i == tx_ring->count)) {
4318 i = 0;
4319 tx_buffer = tx_ring->tx_buffer_info;
4320 tx_desc = IGB_TX_DESC(tx_ring, 0);
4321 }
4322
4323 /* unmap any remaining paged data */
4324 if (dma_unmap_len(tx_buffer, len))
4325 dma_unmap_page(tx_ring->dev,
4326 dma_unmap_addr(tx_buffer, dma),
4327 dma_unmap_len(tx_buffer, len),
4328 DMA_TO_DEVICE);
4329 }
4330
4331 /* move us one more past the eop_desc for start of next pkt */
4332 tx_buffer++;
4333 i++;
4334 if (unlikely(i == tx_ring->count)) {
4335 i = 0;
4336 tx_buffer = tx_ring->tx_buffer_info;
4337 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004338 }
4339
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004340 /* reset BQL for queue */
John Fastabenddad8a3b2012-04-23 12:22:39 +00004341 netdev_tx_reset_queue(txring_txq(tx_ring));
4342
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08004343 /* reset next_to_use and next_to_clean */
Auke Kok9d5c8242008-01-24 02:22:38 -08004344 tx_ring->next_to_use = 0;
4345 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004346}
4347
4348/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004349 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
4350 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004351 **/
4352static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4353{
4354 int i;
4355
4356 for (i = 0; i < adapter->num_tx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004357 if (adapter->tx_ring[i])
4358 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004359}
4360
4361/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004362 * igb_free_rx_resources - Free Rx Resources
4363 * @rx_ring: ring to clean the resources from
Auke Kok9d5c8242008-01-24 02:22:38 -08004364 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004365 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004366 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08004367void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004368{
Mitch Williams3b644cf2008-06-27 10:59:48 -07004369 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004370
Alexander Duyck06034642011-08-26 07:44:22 +00004371 vfree(rx_ring->rx_buffer_info);
4372 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08004373
Alexander Duyck439705e2009-10-27 23:49:20 +00004374 /* if not set, then don't free */
4375 if (!rx_ring->desc)
4376 return;
4377
Alexander Duyck59d71982010-04-27 13:09:25 +00004378 dma_free_coherent(rx_ring->dev, rx_ring->size,
4379 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08004380
4381 rx_ring->desc = NULL;
4382}
4383
4384/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004385 * igb_free_all_rx_resources - Free Rx Resources for All Queues
4386 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004387 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004388 * Free all receive software resources
Auke Kok9d5c8242008-01-24 02:22:38 -08004389 **/
4390static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4391{
4392 int i;
4393
4394 for (i = 0; i < adapter->num_rx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004395 if (adapter->rx_ring[i])
4396 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004397}
4398
4399/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004400 * igb_clean_rx_ring - Free Rx Buffers per Queue
4401 * @rx_ring: ring to free buffers from
Auke Kok9d5c8242008-01-24 02:22:38 -08004402 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07004403static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004404{
Alexander Duyckd2bead52017-02-06 18:25:50 -08004405 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08004406
Alexander Duyck1a1c2252012-09-25 00:30:52 +00004407 if (rx_ring->skb)
4408 dev_kfree_skb(rx_ring->skb);
4409 rx_ring->skb = NULL;
4410
Auke Kok9d5c8242008-01-24 02:22:38 -08004411 /* Free all the Rx ring sk_buffs */
Alexander Duyckd2bead52017-02-06 18:25:50 -08004412 while (i != rx_ring->next_to_alloc) {
Alexander Duyck06034642011-08-26 07:44:22 +00004413 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004414
Alexander Duyck5be59552016-12-14 15:05:30 -08004415 /* Invalidate cache lines that may have been written to by
4416 * device so that we avoid corrupting memory.
4417 */
4418 dma_sync_single_range_for_cpu(rx_ring->dev,
4419 buffer_info->dma,
4420 buffer_info->page_offset,
Alexander Duyck8649aae2017-02-06 18:27:03 -08004421 igb_rx_bufsz(rx_ring),
Alexander Duyck5be59552016-12-14 15:05:30 -08004422 DMA_FROM_DEVICE);
4423
4424 /* free resources associated with mapping */
4425 dma_unmap_page_attrs(rx_ring->dev,
4426 buffer_info->dma,
Alexander Duyck8649aae2017-02-06 18:27:03 -08004427 igb_rx_pg_size(rx_ring),
Alexander Duyck5be59552016-12-14 15:05:30 -08004428 DMA_FROM_DEVICE,
Alexander Duyck7bd17592017-02-06 18:25:26 -08004429 IGB_RX_DMA_ATTR);
Alexander Duyck2976db82017-01-10 16:58:09 -08004430 __page_frag_cache_drain(buffer_info->page,
4431 buffer_info->pagecnt_bias);
Alexander Duyckcbc8e552012-09-25 00:31:02 +00004432
Alexander Duyckd2bead52017-02-06 18:25:50 -08004433 i++;
4434 if (i == rx_ring->count)
4435 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004436 }
4437
Alexander Duyckcbc8e552012-09-25 00:31:02 +00004438 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004439 rx_ring->next_to_clean = 0;
4440 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004441}
4442
4443/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004444 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
4445 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004446 **/
4447static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4448{
4449 int i;
4450
4451 for (i = 0; i < adapter->num_rx_queues; i++)
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08004452 if (adapter->rx_ring[i])
4453 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08004454}
4455
4456/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004457 * igb_set_mac - Change the Ethernet Address of the NIC
4458 * @netdev: network interface device structure
4459 * @p: pointer to an address structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004460 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004461 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08004462 **/
4463static int igb_set_mac(struct net_device *netdev, void *p)
4464{
4465 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00004466 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08004467 struct sockaddr *addr = p;
4468
4469 if (!is_valid_ether_addr(addr->sa_data))
4470 return -EADDRNOTAVAIL;
4471
4472 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00004473 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08004474
Alexander Duyck26ad9172009-10-05 06:32:49 +00004475 /* set the correct pool for the new PF MAC address in entry 0 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004476 igb_set_default_mac_filter(adapter);
Alexander Duycke1739522009-02-19 20:39:44 -08004477
Auke Kok9d5c8242008-01-24 02:22:38 -08004478 return 0;
4479}
4480
4481/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004482 * igb_write_mc_addr_list - write multicast addresses to MTA
4483 * @netdev: network interface device structure
Alexander Duyck68d480c2009-10-05 06:33:08 +00004484 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004485 * Writes multicast address list to the MTA hash table.
4486 * Returns: -ENOMEM on failure
4487 * 0 on no addresses written
4488 * X on writing X addresses to MTA
Alexander Duyck68d480c2009-10-05 06:33:08 +00004489 **/
4490static int igb_write_mc_addr_list(struct net_device *netdev)
4491{
4492 struct igb_adapter *adapter = netdev_priv(netdev);
4493 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004494 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004495 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004496 int i;
4497
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004498 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00004499 /* nothing to program, so clear mc list */
4500 igb_update_mc_addr_list(hw, NULL, 0);
4501 igb_restore_vf_multicasts(adapter);
4502 return 0;
4503 }
4504
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004505 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004506 if (!mta_list)
4507 return -ENOMEM;
4508
Alexander Duyck68d480c2009-10-05 06:33:08 +00004509 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00004510 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004511 netdev_for_each_mc_addr(ha, netdev)
4512 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004513
Alexander Duyck68d480c2009-10-05 06:33:08 +00004514 igb_update_mc_addr_list(hw, mta_list, i);
4515 kfree(mta_list);
4516
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004517 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00004518}
4519
Alexander Duyck16903ca2016-01-06 23:11:18 -08004520static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4521{
4522 struct e1000_hw *hw = &adapter->hw;
4523 u32 i, pf_id;
4524
4525 switch (hw->mac.type) {
4526 case e1000_i210:
4527 case e1000_i211:
4528 case e1000_i350:
4529 /* VLAN filtering needed for VLAN prio filter */
4530 if (adapter->netdev->features & NETIF_F_NTUPLE)
4531 break;
4532 /* fall through */
4533 case e1000_82576:
4534 case e1000_82580:
4535 case e1000_i354:
4536 /* VLAN filtering needed for pool filtering */
4537 if (adapter->vfs_allocated_count)
4538 break;
4539 /* fall through */
4540 default:
4541 return 1;
4542 }
4543
4544 /* We are already in VLAN promisc, nothing to do */
4545 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4546 return 0;
4547
4548 if (!adapter->vfs_allocated_count)
4549 goto set_vfta;
4550
4551 /* Add PF to all active pools */
4552 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4553
4554 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4555 u32 vlvf = rd32(E1000_VLVF(i));
4556
Jacob Kellera51d8c22016-04-13 16:08:28 -07004557 vlvf |= BIT(pf_id);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004558 wr32(E1000_VLVF(i), vlvf);
4559 }
4560
4561set_vfta:
4562 /* Set all bits in the VLAN filter table array */
4563 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4564 hw->mac.ops.write_vfta(hw, i, ~0U);
4565
4566 /* Set flag so we don't redo unnecessary work */
4567 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4568
4569 return 0;
4570}
4571
4572#define VFTA_BLOCK_SIZE 8
4573static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4574{
4575 struct e1000_hw *hw = &adapter->hw;
4576 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4577 u32 vid_start = vfta_offset * 32;
4578 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4579 u32 i, vid, word, bits, pf_id;
4580
4581 /* guarantee that we don't scrub out management VLAN */
4582 vid = adapter->mng_vlan_id;
4583 if (vid >= vid_start && vid < vid_end)
Jacob Kellera51d8c22016-04-13 16:08:28 -07004584 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004585
4586 if (!adapter->vfs_allocated_count)
4587 goto set_vfta;
4588
4589 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4590
4591 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4592 u32 vlvf = rd32(E1000_VLVF(i));
4593
4594 /* pull VLAN ID from VLVF */
4595 vid = vlvf & VLAN_VID_MASK;
4596
4597 /* only concern ourselves with a certain range */
4598 if (vid < vid_start || vid >= vid_end)
4599 continue;
4600
4601 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4602 /* record VLAN ID in VFTA */
Jacob Kellera51d8c22016-04-13 16:08:28 -07004603 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004604
4605 /* if PF is part of this then continue */
4606 if (test_bit(vid, adapter->active_vlans))
4607 continue;
4608 }
4609
4610 /* remove PF from the pool */
Jacob Kellera51d8c22016-04-13 16:08:28 -07004611 bits = ~BIT(pf_id);
Alexander Duyck16903ca2016-01-06 23:11:18 -08004612 bits &= rd32(E1000_VLVF(i));
4613 wr32(E1000_VLVF(i), bits);
4614 }
4615
4616set_vfta:
4617 /* extract values from active_vlans and write back to VFTA */
4618 for (i = VFTA_BLOCK_SIZE; i--;) {
4619 vid = (vfta_offset + i) * 32;
4620 word = vid / BITS_PER_LONG;
4621 bits = vid % BITS_PER_LONG;
4622
4623 vfta[i] |= adapter->active_vlans[word] >> bits;
4624
4625 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4626 }
4627}
4628
4629static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4630{
4631 u32 i;
4632
4633 /* We are not in VLAN promisc, nothing to do */
4634 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4635 return;
4636
4637 /* Set flag so we don't redo unnecessary work */
4638 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4639
4640 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4641 igb_scrub_vfta(adapter, i);
4642}
4643
Alexander Duyck68d480c2009-10-05 06:33:08 +00004644/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004645 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4646 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08004647 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004648 * The set_rx_mode entry point is called whenever the unicast or multicast
4649 * address lists or the network interface flags are updated. This routine is
4650 * responsible for configuring the hardware for proper unicast, multicast,
4651 * promiscuous mode, and all-multi behavior.
Auke Kok9d5c8242008-01-24 02:22:38 -08004652 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004653static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004654{
4655 struct igb_adapter *adapter = netdev_priv(netdev);
4656 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004657 unsigned int vfn = adapter->vfs_allocated_count;
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004658 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004659 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08004660
4661 /* Check for Promiscuous and All Multicast modes */
Patrick McHardy746b9f02008-07-16 20:15:45 -07004662 if (netdev->flags & IFF_PROMISC) {
Alexander Duyck16903ca2016-01-06 23:11:18 -08004663 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
Alexander Duyckbf456ab2016-01-06 23:11:43 -08004664 vmolr |= E1000_VMOLR_MPME;
4665
4666 /* enable use of UTA filter to force packets to default pool */
4667 if (hw->mac.type == e1000_82576)
4668 vmolr |= E1000_VMOLR_ROPE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07004669 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00004670 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07004671 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00004672 vmolr |= E1000_VMOLR_MPME;
4673 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004674 /* Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004675 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00004676 * that we can at least receive multicast traffic
4677 */
4678 count = igb_write_mc_addr_list(netdev);
4679 if (count < 0) {
4680 rctl |= E1000_RCTL_MPE;
4681 vmolr |= E1000_VMOLR_MPME;
4682 } else if (count) {
4683 vmolr |= E1000_VMOLR_ROMPE;
4684 }
4685 }
Patrick McHardy746b9f02008-07-16 20:15:45 -07004686 }
Alexander Duyck268f9d32016-01-06 23:11:34 -08004687
4688 /* Write addresses to available RAR registers, if there is not
4689 * sufficient space to store all the addresses then enable
4690 * unicast promiscuous mode
4691 */
Yury Kylulin83c21332017-03-07 11:20:25 +03004692 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
Alexander Duyck268f9d32016-01-06 23:11:34 -08004693 rctl |= E1000_RCTL_UPE;
4694 vmolr |= E1000_VMOLR_ROPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08004695 }
Alexander Duyck16903ca2016-01-06 23:11:18 -08004696
4697 /* enable VLAN filtering by default */
4698 rctl |= E1000_RCTL_VFE;
4699
4700 /* disable VLAN filtering for modes that require it */
4701 if ((netdev->flags & IFF_PROMISC) ||
4702 (netdev->features & NETIF_F_RXALL)) {
4703 /* if we fail to set all rules then just clear VFE */
4704 if (igb_vlan_promisc_enable(adapter))
4705 rctl &= ~E1000_RCTL_VFE;
4706 } else {
4707 igb_vlan_promisc_disable(adapter);
4708 }
4709
4710 /* update state of unicast, multicast, and VLAN filtering modes */
4711 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
4712 E1000_RCTL_VFE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004713 wr32(E1000_RCTL, rctl);
4714
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004715#if (PAGE_SIZE < 8192)
4716 if (!adapter->vfs_allocated_count) {
4717 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4718 rlpml = IGB_MAX_FRAME_BUILD_SKB;
4719 }
4720#endif
4721 wr32(E1000_RLPML, rlpml);
4722
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004723 /* In order to support SR-IOV and eventually VMDq it is necessary to set
Alexander Duyck68d480c2009-10-05 06:33:08 +00004724 * the VMOLR to enable the appropriate modes. Without this workaround
4725 * we will have issues with VLAN tag stripping not being done for frames
4726 * that are only arriving because we are the default pool
4727 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00004728 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004729 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004730
Alexander Duyckbf456ab2016-01-06 23:11:43 -08004731 /* set UTA to appropriate mode */
4732 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
4733
Alexander Duyck68d480c2009-10-05 06:33:08 +00004734 vmolr |= rd32(E1000_VMOLR(vfn)) &
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004735 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
Alexander Duyck45693bc2016-01-06 23:10:39 -08004736
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004737 /* enable Rx jumbo frames, restrict as needed to support build_skb */
Alexander Duyck45693bc2016-01-06 23:10:39 -08004738 vmolr &= ~E1000_VMOLR_RLPML_MASK;
Alexander Duyckcfbc8712017-02-06 18:26:15 -08004739#if (PAGE_SIZE < 8192)
4740 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4741 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
4742 else
4743#endif
4744 vmolr |= MAX_JUMBO_FRAME_SIZE;
4745 vmolr |= E1000_VMOLR_LPE;
Alexander Duyck45693bc2016-01-06 23:10:39 -08004746
Alexander Duyck68d480c2009-10-05 06:33:08 +00004747 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck45693bc2016-01-06 23:10:39 -08004748
Alexander Duyck28fc06f2009-07-23 18:08:54 +00004749 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004750}
4751
Greg Rose13800462010-11-06 02:08:26 +00004752static void igb_check_wvbr(struct igb_adapter *adapter)
4753{
4754 struct e1000_hw *hw = &adapter->hw;
4755 u32 wvbr = 0;
4756
4757 switch (hw->mac.type) {
4758 case e1000_82576:
4759 case e1000_i350:
Carolyn Wyborny81ad8072014-04-11 01:46:13 +00004760 wvbr = rd32(E1000_WVBR);
4761 if (!wvbr)
Greg Rose13800462010-11-06 02:08:26 +00004762 return;
4763 break;
4764 default:
4765 break;
4766 }
4767
4768 adapter->wvbr |= wvbr;
4769}
4770
4771#define IGB_STAGGERED_QUEUE_OFFSET 8
4772
4773static void igb_spoof_check(struct igb_adapter *adapter)
4774{
4775 int j;
4776
4777 if (!adapter->wvbr)
4778 return;
4779
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004780 for (j = 0; j < adapter->vfs_allocated_count; j++) {
Jacob Kellera51d8c22016-04-13 16:08:28 -07004781 if (adapter->wvbr & BIT(j) ||
4782 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
Greg Rose13800462010-11-06 02:08:26 +00004783 dev_warn(&adapter->pdev->dev,
4784 "Spoof event(s) detected on VF %d\n", j);
4785 adapter->wvbr &=
Jacob Kellera51d8c22016-04-13 16:08:28 -07004786 ~(BIT(j) |
4787 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
Greg Rose13800462010-11-06 02:08:26 +00004788 }
4789 }
4790}
4791
Auke Kok9d5c8242008-01-24 02:22:38 -08004792/* Need to wait a few seconds after link up to get diagnostic information from
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004793 * the phy
4794 */
Kees Cook26566ea2017-10-16 17:29:35 -07004795static void igb_update_phy_info(struct timer_list *t)
Auke Kok9d5c8242008-01-24 02:22:38 -08004796{
Kees Cook26566ea2017-10-16 17:29:35 -07004797 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08004798 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08004799}
4800
4801/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004802 * igb_has_link - check shared code for link and determine up/down
4803 * @adapter: pointer to driver private info
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004804 **/
Nick Nunley31455352010-02-17 01:01:21 +00004805bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004806{
4807 struct e1000_hw *hw = &adapter->hw;
4808 bool link_active = false;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004809
4810 /* get_link_status is set on LSC (link status) interrupt or
4811 * rx sequence error interrupt. get_link_status will stay
4812 * false until the e1000_check_for_link establishes link
4813 * for copper adapters ONLY
4814 */
4815 switch (hw->phy.media_type) {
4816 case e1000_media_type_copper:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00004817 if (!hw->mac.get_link_status)
4818 return true;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004819 case e1000_media_type_internal_serdes:
Akeem G Abodunrine5c33702013-06-06 01:31:09 +00004820 hw->mac.ops.check_for_link(hw);
4821 link_active = !hw->mac.get_link_status;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004822 break;
4823 default:
4824 case e1000_media_type_unknown:
4825 break;
4826 }
4827
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00004828 if (((hw->mac.type == e1000_i210) ||
4829 (hw->mac.type == e1000_i211)) &&
4830 (hw->phy.id == I210_I_PHY_ID)) {
4831 if (!netif_carrier_ok(adapter->netdev)) {
4832 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4833 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
4834 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
4835 adapter->link_check_timeout = jiffies;
4836 }
4837 }
4838
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004839 return link_active;
4840}
4841
Stefan Assmann563988d2011-04-05 04:27:15 +00004842static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
4843{
4844 bool ret = false;
4845 u32 ctrl_ext, thstat;
4846
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00004847 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00004848 if (hw->mac.type == e1000_i350) {
4849 thstat = rd32(E1000_THSTAT);
4850 ctrl_ext = rd32(E1000_CTRL_EXT);
4851
4852 if ((hw->phy.media_type == e1000_media_type_copper) &&
Akeem G. Abodunrin5c17a202013-01-29 10:15:31 +00004853 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
Stefan Assmann563988d2011-04-05 04:27:15 +00004854 ret = !!(thstat & event);
Stefan Assmann563988d2011-04-05 04:27:15 +00004855 }
4856
4857 return ret;
4858}
4859
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004860/**
Carolyn Wyborny1516f0a2014-07-09 04:55:45 +00004861 * igb_check_lvmmc - check for malformed packets received
4862 * and indicated in LVMMC register
4863 * @adapter: pointer to adapter
4864 **/
4865static void igb_check_lvmmc(struct igb_adapter *adapter)
4866{
4867 struct e1000_hw *hw = &adapter->hw;
4868 u32 lvmmc;
4869
4870 lvmmc = rd32(E1000_LVMMC);
4871 if (lvmmc) {
4872 if (unlikely(net_ratelimit())) {
4873 netdev_warn(adapter->netdev,
4874 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
4875 lvmmc);
4876 }
4877 }
4878}
4879
4880/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004881 * igb_watchdog - Timer Call-back
4882 * @data: pointer to adapter cast into an unsigned long
Auke Kok9d5c8242008-01-24 02:22:38 -08004883 **/
Kees Cook26566ea2017-10-16 17:29:35 -07004884static void igb_watchdog(struct timer_list *t)
Auke Kok9d5c8242008-01-24 02:22:38 -08004885{
Kees Cook26566ea2017-10-16 17:29:35 -07004886 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
Auke Kok9d5c8242008-01-24 02:22:38 -08004887 /* Do the rest outside of interrupt context */
4888 schedule_work(&adapter->watchdog_task);
4889}
4890
4891static void igb_watchdog_task(struct work_struct *work)
4892{
4893 struct igb_adapter *adapter = container_of(work,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004894 struct igb_adapter,
4895 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08004896 struct e1000_hw *hw = &adapter->hw;
Koki Sanagic0ba4772013-01-16 11:05:53 +00004897 struct e1000_phy_info *phy = &hw->phy;
Auke Kok9d5c8242008-01-24 02:22:38 -08004898 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00004899 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07004900 int i;
Carolyn Wyborny56cec242013-10-17 05:36:26 +00004901 u32 connsw;
Takuma Uebab72f3f72015-12-31 14:58:14 +09004902 u16 phy_data, retry_count = 20;
Auke Kok9d5c8242008-01-24 02:22:38 -08004903
Alexander Duyck4d6b7252009-02-06 23:16:24 +00004904 link = igb_has_link(adapter);
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00004905
4906 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
4907 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4908 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4909 else
4910 link = false;
4911 }
4912
Carolyn Wyborny56cec242013-10-17 05:36:26 +00004913 /* Force link down if we have fiber to swap to */
4914 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4915 if (hw->phy.media_type == e1000_media_type_copper) {
4916 connsw = rd32(E1000_CONNSW);
4917 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
4918 link = 0;
4919 }
4920 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004921 if (link) {
Carolyn Wyborny2bdfc4e2013-10-17 05:23:01 +00004922 /* Perform a reset if the media type changed. */
4923 if (hw->dev_spec._82575.media_changed) {
4924 hw->dev_spec._82575.media_changed = false;
4925 adapter->flags |= IGB_FLAG_MEDIA_RESET;
4926 igb_reset(adapter);
4927 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00004928 /* Cancel scheduled suspend requests. */
4929 pm_runtime_resume(netdev->dev.parent);
4930
Auke Kok9d5c8242008-01-24 02:22:38 -08004931 if (!netif_carrier_ok(netdev)) {
4932 u32 ctrl;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00004933
Alexander Duyck330a6d62009-10-27 23:51:35 +00004934 hw->mac.ops.get_speed_and_duplex(hw,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00004935 &adapter->link_speed,
4936 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08004937
4938 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08004939 /* Links status message must follow this format */
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00004940 netdev_info(netdev,
4941 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00004942 netdev->name,
4943 adapter->link_speed,
4944 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00004945 "Full" : "Half",
4946 (ctrl & E1000_CTRL_TFCE) &&
4947 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
4948 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4949 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08004950
Carolyn Wybornyf4c01e92014-03-12 03:58:22 +00004951 /* disable EEE if enabled */
4952 if ((adapter->flags & IGB_FLAG_EEE) &&
4953 (adapter->link_duplex == HALF_DUPLEX)) {
4954 dev_info(&adapter->pdev->dev,
4955 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4956 adapter->hw.dev_spec._82575.eee_disable = true;
4957 adapter->flags &= ~IGB_FLAG_EEE;
4958 }
4959
Koki Sanagic0ba4772013-01-16 11:05:53 +00004960 /* check if SmartSpeed worked */
4961 igb_check_downshift(hw);
4962 if (phy->speed_downgraded)
4963 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4964
Stefan Assmann563988d2011-04-05 04:27:15 +00004965 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00004966 if (igb_thermal_sensor_event(hw,
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00004967 E1000_THSTAT_LINK_THROTTLE))
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00004968 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
Stefan Assmann563988d2011-04-05 04:27:15 +00004969
Emil Tantilovd07f3e32010-03-23 18:34:57 +00004970 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08004971 adapter->tx_timeout_factor = 1;
4972 switch (adapter->link_speed) {
4973 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08004974 adapter->tx_timeout_factor = 14;
4975 break;
4976 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08004977 /* maybe add some timeout factor ? */
4978 break;
4979 }
4980
Takuma Uebab72f3f72015-12-31 14:58:14 +09004981 if (adapter->link_speed != SPEED_1000)
4982 goto no_wait;
4983
4984 /* wait for Remote receiver status OK */
4985retry_read_status:
4986 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
4987 &phy_data)) {
4988 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4989 retry_count) {
4990 msleep(100);
4991 retry_count--;
4992 goto retry_read_status;
4993 } else if (!retry_count) {
4994 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
4995 }
4996 } else {
4997 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
4998 }
4999no_wait:
Auke Kok9d5c8242008-01-24 02:22:38 -08005000 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005001
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005002 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00005003 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005004
Alexander Duyck4b1a9872009-02-06 23:19:50 +00005005 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08005006 if (!test_bit(__IGB_DOWN, &adapter->state))
5007 mod_timer(&adapter->phy_info_timer,
5008 round_jiffies(jiffies + 2 * HZ));
5009 }
5010 } else {
5011 if (netif_carrier_ok(netdev)) {
5012 adapter->link_speed = 0;
5013 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00005014
5015 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00005016 if (igb_thermal_sensor_event(hw,
5017 E1000_THSTAT_PWR_DOWN)) {
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00005018 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00005019 }
Stefan Assmann563988d2011-04-05 04:27:15 +00005020
Alexander Duyck527d47c2008-11-27 00:21:39 -08005021 /* Links status message must follow this format */
Carolyn Wybornyc75c4ed2014-04-11 01:45:17 +00005022 netdev_info(netdev, "igb: %s NIC Link is Down\n",
Alexander Duyck527d47c2008-11-27 00:21:39 -08005023 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08005024 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00005025
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005026 igb_ping_all_vfs(adapter);
5027
Alexander Duyck4b1a9872009-02-06 23:19:50 +00005028 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08005029 if (!test_bit(__IGB_DOWN, &adapter->state))
5030 mod_timer(&adapter->phy_info_timer,
5031 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00005032
Carolyn Wyborny56cec242013-10-17 05:36:26 +00005033 /* link is down, time to check for alternate media */
5034 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5035 igb_check_swap_media(adapter);
5036 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5037 schedule_work(&adapter->reset_task);
5038 /* return immediately */
5039 return;
5040 }
5041 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00005042 pm_schedule_suspend(netdev->dev.parent,
5043 MSEC_PER_SEC * 5);
Carolyn Wyborny56cec242013-10-17 05:36:26 +00005044
5045 /* also check for alternate media here */
5046 } else if (!netif_carrier_ok(netdev) &&
5047 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5048 igb_check_swap_media(adapter);
5049 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5050 schedule_work(&adapter->reset_task);
5051 /* return immediately */
5052 return;
5053 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005054 }
5055 }
5056
Eric Dumazet12dcd862010-10-15 17:27:10 +00005057 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005058 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005059 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08005060
Alexander Duyckdbabb062009-11-12 18:38:16 +00005061 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005062 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00005063 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005064 /* We've lost link, so the controller stops DMA,
5065 * but we've got queued Tx work that's never going
5066 * to get done, so reset controller to flush Tx.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005067 * (Do the reset outside of interrupt context).
5068 */
Alexander Duyckdbabb062009-11-12 18:38:16 +00005069 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5070 adapter->tx_timeout_count++;
5071 schedule_work(&adapter->reset_task);
5072 /* return immediately since reset is imminent */
5073 return;
5074 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005075 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005076
Alexander Duyckdbabb062009-11-12 18:38:16 +00005077 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005078 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00005079 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005080
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005081 /* Cause software interrupt to ensure Rx ring is cleaned */
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00005082 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005083 u32 eics = 0;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00005084
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00005085 for (i = 0; i < adapter->num_q_vectors; i++)
5086 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07005087 wr32(E1000_EICS, eics);
5088 } else {
5089 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5090 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005091
Greg Rose13800462010-11-06 02:08:26 +00005092 igb_spoof_check(adapter);
Matthew Vickfc580752012-12-13 07:20:35 +00005093 igb_ptp_rx_hang(adapter);
Jacob Kellere5f36ad2017-05-03 10:29:03 -07005094 igb_ptp_tx_hang(adapter);
Greg Rose13800462010-11-06 02:08:26 +00005095
Carolyn Wyborny1516f0a2014-07-09 04:55:45 +00005096 /* Check LVMMC register on i350/i354 only */
5097 if ((adapter->hw.mac.type == e1000_i350) ||
5098 (adapter->hw.mac.type == e1000_i354))
5099 igb_check_lvmmc(adapter);
5100
Auke Kok9d5c8242008-01-24 02:22:38 -08005101 /* Reset the timer */
Akeem G Abodunrinaa9b8cc2013-08-28 02:22:43 +00005102 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5103 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5104 mod_timer(&adapter->watchdog_timer,
5105 round_jiffies(jiffies + HZ));
5106 else
5107 mod_timer(&adapter->watchdog_timer,
5108 round_jiffies(jiffies + 2 * HZ));
5109 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005110}
5111
5112enum latency_range {
5113 lowest_latency = 0,
5114 low_latency = 1,
5115 bulk_latency = 2,
5116 latency_invalid = 255
5117};
5118
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005119/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005120 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5121 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005122 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005123 * Stores a new ITR value based on strictly on packet size. This
5124 * algorithm is less sophisticated than that used in igb_update_itr,
5125 * due to the difficulty of synchronizing statistics across multiple
5126 * receive rings. The divisors and thresholds used by this function
5127 * were determined based on theoretical maximum wire speed and testing
5128 * data, in order to minimize response time while increasing bulk
5129 * throughput.
Fernando Luis Vazquez Cao406d4962014-03-18 00:26:48 -07005130 * This functionality is controlled by ethtool's coalescing settings.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005131 * NOTE: This function is called only when operating in a multiqueue
5132 * receive environment.
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005133 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005134static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005135{
Alexander Duyck047e0032009-10-27 15:49:27 +00005136 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005137 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00005138 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005139 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005140
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005141 /* For non-gigabit speeds, just fix the interrupt rate at 4000
5142 * ints/sec - ITR timer value of 120 ticks.
5143 */
5144 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005145 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005146 goto set_itr_val;
5147 }
Alexander Duyck047e0032009-10-27 15:49:27 +00005148
Alexander Duyck0ba82992011-08-26 07:45:47 +00005149 packets = q_vector->rx.total_packets;
5150 if (packets)
5151 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005152
Alexander Duyck0ba82992011-08-26 07:45:47 +00005153 packets = q_vector->tx.total_packets;
5154 if (packets)
5155 avg_wire_size = max_t(u32, avg_wire_size,
5156 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00005157
5158 /* if avg_wire_size isn't set no work was done */
5159 if (!avg_wire_size)
5160 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005161
5162 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5163 avg_wire_size += 24;
5164
5165 /* Don't starve jumbo frames */
5166 avg_wire_size = min(avg_wire_size, 3000);
5167
5168 /* Give a little boost to mid-size frames */
5169 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5170 new_val = avg_wire_size / 3;
5171 else
5172 new_val = avg_wire_size / 2;
5173
Alexander Duyck0ba82992011-08-26 07:45:47 +00005174 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5175 if (new_val < IGB_20K_ITR &&
5176 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5177 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5178 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00005179
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005180set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00005181 if (new_val != q_vector->itr_val) {
5182 q_vector->itr_val = new_val;
5183 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005184 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005185clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005186 q_vector->rx.total_bytes = 0;
5187 q_vector->rx.total_packets = 0;
5188 q_vector->tx.total_bytes = 0;
5189 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005190}
5191
5192/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005193 * igb_update_itr - update the dynamic ITR value based on statistics
5194 * @q_vector: pointer to q_vector
5195 * @ring_container: ring info to update the itr for
5196 *
5197 * Stores a new ITR value based on packets and byte
5198 * counts during the last interrupt. The advantage of per interrupt
5199 * computation is faster updates and more accurate ITR for the current
5200 * traffic pattern. Constants in this function were computed
5201 * based on theoretical maximum wire speed and thresholds were set based
5202 * on testing data as well as attempting to minimize response time
5203 * while increasing bulk throughput.
Fernando Luis Vazquez Cao406d4962014-03-18 00:26:48 -07005204 * This functionality is controlled by ethtool's coalescing settings.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005205 * NOTE: These calculations are only valid when operating in a single-
5206 * queue environment.
Auke Kok9d5c8242008-01-24 02:22:38 -08005207 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00005208static void igb_update_itr(struct igb_q_vector *q_vector,
5209 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08005210{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005211 unsigned int packets = ring_container->total_packets;
5212 unsigned int bytes = ring_container->total_bytes;
5213 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08005214
Alexander Duyck0ba82992011-08-26 07:45:47 +00005215 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08005216 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005217 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08005218
Alexander Duyck0ba82992011-08-26 07:45:47 +00005219 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005220 case lowest_latency:
5221 /* handle TSO and jumbo frames */
5222 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005223 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005224 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005225 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005226 break;
5227 case low_latency: /* 50 usec aka 20000 ints/s */
5228 if (bytes > 10000) {
5229 /* this if handles the TSO accounting */
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005230 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005231 itrval = bulk_latency;
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005232 else if ((packets < 10) || ((bytes/packets) > 1200))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005233 itrval = bulk_latency;
Carolyn Wybornyd34a15a2014-04-11 01:45:23 +00005234 else if ((packets > 35))
Alexander Duyck0ba82992011-08-26 07:45:47 +00005235 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005236 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005237 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005238 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005239 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005240 }
5241 break;
5242 case bulk_latency: /* 250 usec aka 4000 ints/s */
5243 if (bytes > 25000) {
5244 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00005245 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00005246 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00005247 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08005248 }
5249 break;
5250 }
5251
Alexander Duyck0ba82992011-08-26 07:45:47 +00005252 /* clear work counters since we have the values we need */
5253 ring_container->total_bytes = 0;
5254 ring_container->total_packets = 0;
5255
5256 /* write updated itr to ring container */
5257 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08005258}
5259
Alexander Duyck0ba82992011-08-26 07:45:47 +00005260static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005261{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005262 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00005263 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005264 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005265
5266 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5267 if (adapter->link_speed != SPEED_1000) {
5268 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005269 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08005270 goto set_itr_now;
5271 }
5272
Alexander Duyck0ba82992011-08-26 07:45:47 +00005273 igb_update_itr(q_vector, &q_vector->tx);
5274 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08005275
Alexander Duyck0ba82992011-08-26 07:45:47 +00005276 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08005277
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005278 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00005279 if (current_itr == lowest_latency &&
5280 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5281 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07005282 current_itr = low_latency;
5283
Auke Kok9d5c8242008-01-24 02:22:38 -08005284 switch (current_itr) {
5285 /* counts and packets in update_itr are dependent on these numbers */
5286 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005287 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005288 break;
5289 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005290 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005291 break;
5292 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00005293 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08005294 break;
5295 default:
5296 break;
5297 }
5298
5299set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00005300 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005301 /* this attempts to bias the interrupt rate towards Bulk
5302 * by adding intermediate steps when interrupt rate is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005303 * increasing
5304 */
Alexander Duyck047e0032009-10-27 15:49:27 +00005305 new_itr = new_itr > q_vector->itr_val ?
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005306 max((new_itr * q_vector->itr_val) /
5307 (new_itr + (q_vector->itr_val >> 2)),
5308 new_itr) : new_itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08005309 /* Don't write the value here; it resets the adapter's
5310 * internal timer, and causes us to delay far longer than
5311 * we should between interrupts. Instead, we write the ITR
5312 * value at the beginning of the next interrupt so the timing
5313 * ends up being correct.
5314 */
Alexander Duyck047e0032009-10-27 15:49:27 +00005315 q_vector->itr_val = new_itr;
5316 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005317 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005318}
5319
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005320static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
5321 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005322{
5323 struct e1000_adv_tx_context_desc *context_desc;
5324 u16 i = tx_ring->next_to_use;
5325
5326 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5327
5328 i++;
5329 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5330
5331 /* set bits to identify this as an advanced context descriptor */
5332 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5333
5334 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00005335 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005336 mss_l4len_idx |= tx_ring->reg_idx << 4;
5337
5338 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5339 context_desc->seqnum_seed = 0;
5340 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5341 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5342}
5343
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005344static int igb_tso(struct igb_ring *tx_ring,
5345 struct igb_tx_buffer *first,
5346 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08005347{
Alexander Duycke10715d2016-04-14 17:19:38 -04005348 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005349 struct sk_buff *skb = first->skb;
Alexander Duycke10715d2016-04-14 17:19:38 -04005350 union {
5351 struct iphdr *v4;
5352 struct ipv6hdr *v6;
5353 unsigned char *hdr;
5354 } ip;
5355 union {
5356 struct tcphdr *tcp;
5357 unsigned char *hdr;
5358 } l4;
5359 u32 paylen, l4_offset;
Francois Romieu06c14e52014-03-30 03:14:11 +00005360 int err;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005361
Alexander Duycked6aa102012-11-13 04:03:22 +00005362 if (skb->ip_summed != CHECKSUM_PARTIAL)
5363 return 0;
5364
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005365 if (!skb_is_gso(skb))
5366 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005367
Francois Romieu06c14e52014-03-30 03:14:11 +00005368 err = skb_cow_head(skb, 0);
5369 if (err < 0)
5370 return err;
Auke Kok9d5c8242008-01-24 02:22:38 -08005371
Alexander Duycke10715d2016-04-14 17:19:38 -04005372 ip.hdr = skb_network_header(skb);
5373 l4.hdr = skb_checksum_start(skb);
5374
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005375 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5376 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08005377
Alexander Duycke10715d2016-04-14 17:19:38 -04005378 /* initialize outer IP header fields */
5379 if (ip.v4->version == 4) {
Alexander Duyck516165a2016-11-28 10:42:23 -05005380 unsigned char *csum_start = skb_checksum_start(skb);
5381 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5382
Alexander Duycke10715d2016-04-14 17:19:38 -04005383 /* IP header will have to cancel out any data that
5384 * is not a part of the outer IP header
5385 */
Alexander Duyck516165a2016-11-28 10:42:23 -05005386 ip.v4->check = csum_fold(csum_partial(trans_start,
5387 csum_start - trans_start,
5388 0));
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005389 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duycke10715d2016-04-14 17:19:38 -04005390
5391 ip.v4->tot_len = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005392 first->tx_flags |= IGB_TX_FLAGS_TSO |
5393 IGB_TX_FLAGS_CSUM |
5394 IGB_TX_FLAGS_IPV4;
Alexander Duycke10715d2016-04-14 17:19:38 -04005395 } else {
5396 ip.v6->payload_len = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005397 first->tx_flags |= IGB_TX_FLAGS_TSO |
5398 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08005399 }
5400
Alexander Duycke10715d2016-04-14 17:19:38 -04005401 /* determine offset of inner transport header */
5402 l4_offset = l4.hdr - skb->data;
5403
5404 /* compute length of segmentation header */
5405 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5406
5407 /* remove payload length from inner checksum */
5408 paylen = skb->len - l4_offset;
5409 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Auke Kok9d5c8242008-01-24 02:22:38 -08005410
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005411 /* update gso size and bytecount with header size */
5412 first->gso_segs = skb_shinfo(skb)->gso_segs;
5413 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5414
Auke Kok9d5c8242008-01-24 02:22:38 -08005415 /* MSS L4LEN IDX */
Alexander Duycke10715d2016-04-14 17:19:38 -04005416 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005417 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08005418
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005419 /* VLAN MACLEN IPLEN */
Alexander Duycke10715d2016-04-14 17:19:38 -04005420 vlan_macip_lens = l4.hdr - ip.hdr;
5421 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005422 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005423
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005424 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08005425
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005426 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005427}
5428
Alexander Duyck6e033702016-01-13 07:31:23 -08005429static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5430{
5431 unsigned int offset = 0;
5432
5433 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5434
5435 return offset == skb_checksum_start_offset(skb);
5436}
5437
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005438static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08005439{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005440 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005441 u32 vlan_macip_lens = 0;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005442 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005443
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005444 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck6e033702016-01-13 07:31:23 -08005445csum_failed:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005446 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
5447 return;
Alexander Duyck6e033702016-01-13 07:31:23 -08005448 goto no_csum;
Auke Kok9d5c8242008-01-24 02:22:38 -08005449 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005450
Alexander Duyck6e033702016-01-13 07:31:23 -08005451 switch (skb->csum_offset) {
5452 case offsetof(struct tcphdr, check):
5453 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5454 /* fall through */
5455 case offsetof(struct udphdr, check):
5456 break;
5457 case offsetof(struct sctphdr, checksum):
5458 /* validate that this is actually an SCTP request */
5459 if (((first->protocol == htons(ETH_P_IP)) &&
5460 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5461 ((first->protocol == htons(ETH_P_IPV6)) &&
5462 igb_ipv6_csum_is_sctp(skb))) {
5463 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5464 break;
5465 }
5466 default:
5467 skb_checksum_help(skb);
5468 goto csum_failed;
5469 }
5470
5471 /* update TX checksum flag */
5472 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5473 vlan_macip_lens = skb_checksum_start_offset(skb) -
5474 skb_network_offset(skb);
5475no_csum:
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005476 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005477 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005478
Alexander Duyck6e033702016-01-13 07:31:23 -08005479 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08005480}
5481
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005482#define IGB_SET_FLAG(_input, _flag, _result) \
5483 ((_flag <= _result) ? \
5484 ((u32)(_input & _flag) * (_result / _flag)) : \
5485 ((u32)(_input & _flag) / (_flag / _result)))
5486
5487static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
Alexander Duycke032afc2011-08-26 07:44:48 +00005488{
5489 /* set type for advanced descriptor with frame checksum insertion */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005490 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5491 E1000_ADVTXD_DCMD_DEXT |
5492 E1000_ADVTXD_DCMD_IFCS;
Alexander Duycke032afc2011-08-26 07:44:48 +00005493
5494 /* set HW vlan bit if vlan is present */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005495 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5496 (E1000_ADVTXD_DCMD_VLE));
Alexander Duycke032afc2011-08-26 07:44:48 +00005497
5498 /* set segmentation bits for TSO */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005499 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5500 (E1000_ADVTXD_DCMD_TSE));
5501
5502 /* set timestamp bit if present */
5503 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5504 (E1000_ADVTXD_MAC_TSTAMP));
5505
5506 /* insert frame checksum */
5507 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
Alexander Duycke032afc2011-08-26 07:44:48 +00005508
5509 return cmd_type;
5510}
5511
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005512static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5513 union e1000_adv_tx_desc *tx_desc,
5514 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00005515{
5516 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5517
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005518 /* 82575 requires a unique index per ring */
5519 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00005520 olinfo_status |= tx_ring->reg_idx << 4;
5521
5522 /* insert L4 checksum */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005523 olinfo_status |= IGB_SET_FLAG(tx_flags,
5524 IGB_TX_FLAGS_CSUM,
5525 (E1000_TXD_POPTS_TXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00005526
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005527 /* insert IPv4 checksum */
5528 olinfo_status |= IGB_SET_FLAG(tx_flags,
5529 IGB_TX_FLAGS_IPV4,
5530 (E1000_TXD_POPTS_IXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00005531
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005532 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00005533}
5534
David S. Miller6f19e122014-08-28 01:39:31 -07005535static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5536{
5537 struct net_device *netdev = tx_ring->netdev;
5538
5539 netif_stop_subqueue(netdev, tx_ring->queue_index);
5540
5541 /* Herbert's original patch had:
5542 * smp_mb__after_netif_stop_queue();
5543 * but since that doesn't exist yet, just open code it.
5544 */
5545 smp_mb();
5546
5547 /* We need to check again in a case another CPU has just
5548 * made room available.
5549 */
5550 if (igb_desc_unused(tx_ring) < size)
5551 return -EBUSY;
5552
5553 /* A reprieve! */
5554 netif_wake_subqueue(netdev, tx_ring->queue_index);
5555
5556 u64_stats_update_begin(&tx_ring->tx_syncp2);
5557 tx_ring->tx_stats.restart_queue2++;
5558 u64_stats_update_end(&tx_ring->tx_syncp2);
5559
5560 return 0;
5561}
5562
5563static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5564{
5565 if (igb_desc_unused(tx_ring) >= size)
5566 return 0;
5567 return __igb_maybe_stop_tx(tx_ring, size);
5568}
5569
Jacob Keller74344e32017-05-03 10:28:55 -07005570static int igb_tx_map(struct igb_ring *tx_ring,
5571 struct igb_tx_buffer *first,
5572 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08005573{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005574 struct sk_buff *skb = first->skb;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005575 struct igb_tx_buffer *tx_buffer;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005576 union e1000_adv_tx_desc *tx_desc;
Alexander Duyck80d07592012-11-13 04:03:24 +00005577 struct skb_frag_struct *frag;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005578 dma_addr_t dma;
Alexander Duyck80d07592012-11-13 04:03:24 +00005579 unsigned int data_len, size;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005580 u32 tx_flags = first->tx_flags;
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005581 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005582 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005583
5584 tx_desc = IGB_TX_DESC(tx_ring, i);
5585
Alexander Duyck80d07592012-11-13 04:03:24 +00005586 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5587
5588 size = skb_headlen(skb);
5589 data_len = skb->data_len;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005590
5591 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005592
Alexander Duyck80d07592012-11-13 04:03:24 +00005593 tx_buffer = first;
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005594
Alexander Duyck80d07592012-11-13 04:03:24 +00005595 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5596 if (dma_mapping_error(tx_ring->dev, dma))
5597 goto dma_error;
5598
5599 /* record length, and DMA address */
5600 dma_unmap_len_set(tx_buffer, len, size);
5601 dma_unmap_addr_set(tx_buffer, dma, dma);
5602
5603 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5604
Alexander Duyckebe42d12011-08-26 07:45:09 +00005605 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5606 tx_desc->read.cmd_type_len =
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005607 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08005608
Alexander Duyckebe42d12011-08-26 07:45:09 +00005609 i++;
5610 tx_desc++;
5611 if (i == tx_ring->count) {
5612 tx_desc = IGB_TX_DESC(tx_ring, 0);
5613 i = 0;
5614 }
Alexander Duyck80d07592012-11-13 04:03:24 +00005615 tx_desc->read.olinfo_status = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005616
5617 dma += IGB_MAX_DATA_PER_TXD;
5618 size -= IGB_MAX_DATA_PER_TXD;
5619
Alexander Duyckebe42d12011-08-26 07:45:09 +00005620 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5621 }
5622
5623 if (likely(!data_len))
5624 break;
5625
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005626 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005627
Alexander Duyck65689fe2009-03-20 00:17:43 +00005628 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005629 tx_desc++;
5630 if (i == tx_ring->count) {
5631 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00005632 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005633 }
Alexander Duyck80d07592012-11-13 04:03:24 +00005634 tx_desc->read.olinfo_status = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00005635
Eric Dumazet9e903e02011-10-18 21:00:24 +00005636 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005637 data_len -= size;
5638
5639 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
Alexander Duyck80d07592012-11-13 04:03:24 +00005640 size, DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00005641
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005642 tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005643 }
5644
Alexander Duyckebe42d12011-08-26 07:45:09 +00005645 /* write last descriptor with RS and EOP bits */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00005646 cmd_type |= size | IGB_TXD_DCMD;
5647 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
Alexander Duyck8542db02011-08-26 07:44:43 +00005648
Alexander Duyck80d07592012-11-13 04:03:24 +00005649 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5650
Alexander Duyck8542db02011-08-26 07:44:43 +00005651 /* set the timestamp */
5652 first->time_stamp = jiffies;
5653
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005654 /* Force memory writes to complete before letting h/w know there
Alexander Duyckebe42d12011-08-26 07:45:09 +00005655 * are new descriptors to fetch. (Only applicable for weak-ordered
5656 * memory model archs, such as IA-64).
5657 *
5658 * We also need this memory barrier to make certain all of the
5659 * status bits have been updated before next_to_watch is written.
5660 */
Auke Kok9d5c8242008-01-24 02:22:38 -08005661 wmb();
5662
Alexander Duyckebe42d12011-08-26 07:45:09 +00005663 /* set next_to_watch value indicating a packet is present */
5664 first->next_to_watch = tx_desc;
5665
5666 i++;
5667 if (i == tx_ring->count)
5668 i = 0;
5669
Auke Kok9d5c8242008-01-24 02:22:38 -08005670 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005671
David S. Miller6f19e122014-08-28 01:39:31 -07005672 /* Make sure there is space in the ring for the next send. */
5673 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5674
5675 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
David S. Miller0b725a22014-08-25 15:51:53 -07005676 writel(i, tx_ring->tail);
5677
5678 /* we need this if more than one processor can write to our tail
5679 * at a time, it synchronizes IO on IA64/Altix systems
5680 */
5681 mmiowb();
5682 }
Jacob Keller74344e32017-05-03 10:28:55 -07005683 return 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005684
5685dma_error:
5686 dev_err(tx_ring->dev, "TX DMA map failed\n");
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005687 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyckebe42d12011-08-26 07:45:09 +00005688
5689 /* clear dma mappings for failed tx_buffer_info map */
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005690 while (tx_buffer != first) {
5691 if (dma_unmap_len(tx_buffer, len))
5692 dma_unmap_page(tx_ring->dev,
5693 dma_unmap_addr(tx_buffer, dma),
5694 dma_unmap_len(tx_buffer, len),
5695 DMA_TO_DEVICE);
5696 dma_unmap_len_set(tx_buffer, len, 0);
5697
Jean-Philippe Brucker104ba832017-10-19 20:07:36 +01005698 if (i-- == 0)
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005699 i += tx_ring->count;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005700 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyckebe42d12011-08-26 07:45:09 +00005701 }
5702
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005703 if (dma_unmap_len(tx_buffer, len))
5704 dma_unmap_single(tx_ring->dev,
5705 dma_unmap_addr(tx_buffer, dma),
5706 dma_unmap_len(tx_buffer, len),
5707 DMA_TO_DEVICE);
5708 dma_unmap_len_set(tx_buffer, len, 0);
5709
5710 dev_kfree_skb_any(tx_buffer->skb);
5711 tx_buffer->skb = NULL;
5712
Alexander Duyckebe42d12011-08-26 07:45:09 +00005713 tx_ring->next_to_use = i;
Jacob Keller74344e32017-05-03 10:28:55 -07005714
5715 return -1;
Auke Kok9d5c8242008-01-24 02:22:38 -08005716}
5717
Alexander Duyckcd392f52011-08-26 07:43:59 +00005718netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5719 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08005720{
Alexander Duyck8542db02011-08-26 07:44:43 +00005721 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00005722 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00005723 u32 tx_flags = 0;
Alexander Duyck2ee52ad2015-05-06 21:11:45 -07005724 unsigned short f;
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005725 u16 count = TXD_USE_COUNT(skb_headlen(skb));
Alexander Duyck31f6adb2011-08-26 07:44:53 +00005726 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00005727 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005728
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005729 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
5730 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
Auke Kok9d5c8242008-01-24 02:22:38 -08005731 * + 2 desc gap to keep tail from touching head,
Auke Kok9d5c8242008-01-24 02:22:38 -08005732 * + 1 desc for context descriptor,
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005733 * otherwise try next time
5734 */
Alexander Duyck2ee52ad2015-05-06 21:11:45 -07005735 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5736 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00005737
5738 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005739 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 return NETDEV_TX_BUSY;
5741 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005742
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005743 /* record the location of the first descriptor for this packet */
5744 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5745 first->skb = skb;
5746 first->bytecount = skb->len;
5747 first->gso_segs = 1;
5748
Alexander Duyckb646c222013-02-07 08:55:46 +00005749 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
5750 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Matthew Vick1f6e8172012-08-18 07:26:33 +00005751
Cliff Spradlin26bd4e22017-06-19 13:30:43 -07005752 if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON &&
5753 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
Jakub Kicinskied4420a2014-03-15 14:55:32 +00005754 &adapter->state)) {
Alexander Duyckb646c222013-02-07 08:55:46 +00005755 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5756 tx_flags |= IGB_TX_FLAGS_TSTAMP;
5757
5758 adapter->ptp_tx_skb = skb_get(skb);
5759 adapter->ptp_tx_start = jiffies;
5760 if (adapter->hw.mac.type == e1000_82576)
5761 schedule_work(&adapter->ptp_tx_work);
Jacob Kellerc3b8f852017-05-03 10:28:59 -07005762 } else {
5763 adapter->tx_hwtstamp_skipped++;
Alexander Duyckb646c222013-02-07 08:55:46 +00005764 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005765 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005766
Jakub Kicinskiafc835d2014-03-15 14:55:26 +00005767 skb_tx_timestamp(skb);
5768
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005769 if (skb_vlan_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005770 tx_flags |= IGB_TX_FLAGS_VLAN;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005771 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08005772 }
5773
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005774 /* record initial flags and protocol */
5775 first->tx_flags = tx_flags;
5776 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00005777
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005778 tso = igb_tso(tx_ring, first, &hdr_len);
5779 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005780 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005781 else if (!tso)
5782 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08005783
Jacob Keller74344e32017-05-03 10:28:55 -07005784 if (igb_tx_map(tx_ring, first, hdr_len))
5785 goto cleanup_tx_tstamp;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005786
Auke Kok9d5c8242008-01-24 02:22:38 -08005787 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005788
5789out_drop:
Alexander Duyck7cc6fd42017-02-06 18:26:02 -08005790 dev_kfree_skb_any(first->skb);
5791 first->skb = NULL;
Jacob Keller74344e32017-05-03 10:28:55 -07005792cleanup_tx_tstamp:
5793 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
5794 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5795
5796 dev_kfree_skb_any(adapter->ptp_tx_skb);
5797 adapter->ptp_tx_skb = NULL;
5798 if (adapter->hw.mac.type == e1000_82576)
5799 cancel_work_sync(&adapter->ptp_tx_work);
5800 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
5801 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00005802
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00005803 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005804}
5805
David S. Miller0b725a22014-08-25 15:51:53 -07005806static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5807 struct sk_buff *skb)
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005808{
David S. Miller0b725a22014-08-25 15:51:53 -07005809 unsigned int r_idx = skb->queue_mapping;
5810
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005811 if (r_idx >= adapter->num_tx_queues)
5812 r_idx = r_idx % adapter->num_tx_queues;
5813
5814 return adapter->tx_ring[r_idx];
5815}
5816
Alexander Duyckcd392f52011-08-26 07:43:59 +00005817static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5818 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08005819{
5820 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00005821
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005822 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005823 * in order to meet this minimum size requirement.
5824 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08005825 if (skb_put_padto(skb, 17))
5826 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08005827
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00005828 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08005829}
5830
5831/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005832 * igb_tx_timeout - Respond to a Tx Hang
5833 * @netdev: network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005834 **/
5835static void igb_tx_timeout(struct net_device *netdev)
5836{
5837 struct igb_adapter *adapter = netdev_priv(netdev);
5838 struct e1000_hw *hw = &adapter->hw;
5839
5840 /* Do the reset outside of interrupt context */
5841 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005842
Alexander Duyck06218a82011-08-26 07:46:55 +00005843 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005844 hw->dev_spec._82575.global_device_reset = true;
5845
Auke Kok9d5c8242008-01-24 02:22:38 -08005846 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00005847 wr32(E1000_EICS,
5848 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08005849}
5850
5851static void igb_reset_task(struct work_struct *work)
5852{
5853 struct igb_adapter *adapter;
5854 adapter = container_of(work, struct igb_adapter, reset_task);
5855
Taku Izumic97ec422010-04-27 14:39:30 +00005856 igb_dump(adapter);
5857 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08005858 igb_reinit_locked(adapter);
5859}
5860
5861/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005862 * igb_get_stats64 - Get System Network Statistics
5863 * @netdev: network interface device structure
5864 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08005865 **/
stephen hemmingerbc1f4472017-01-06 19:12:52 -08005866static void igb_get_stats64(struct net_device *netdev,
5867 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08005868{
Eric Dumazet12dcd862010-10-15 17:27:10 +00005869 struct igb_adapter *adapter = netdev_priv(netdev);
5870
5871 spin_lock(&adapter->stats64_lock);
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005872 igb_update_stats(adapter);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005873 memcpy(stats, &adapter->stats64, sizeof(*stats));
5874 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08005875}
5876
5877/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005878 * igb_change_mtu - Change the Maximum Transfer Unit
5879 * @netdev: network interface device structure
5880 * @new_mtu: new value for maximum frame size
Auke Kok9d5c8242008-01-24 02:22:38 -08005881 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005882 * Returns 0 on success, negative on failure
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 **/
5884static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5885{
5886 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005887 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00005888 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08005889
Alexander Duyck2ccd9942013-07-16 00:20:34 +00005890 /* adjust max frame to be at least the size of a standard frame */
5891 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5892 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5893
Auke Kok9d5c8242008-01-24 02:22:38 -08005894 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
Carolyn Wyborny0d451e72014-04-11 01:46:40 +00005895 usleep_range(1000, 2000);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005896
Auke Kok9d5c8242008-01-24 02:22:38 -08005897 /* igb_down has a dependency on max_frame_size */
5898 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00005899
Alexander Duyck4c844852009-10-27 15:52:07 +00005900 if (netif_running(netdev))
5901 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08005902
Alexander Duyck090b1792009-10-27 23:51:55 +00005903 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08005904 netdev->mtu, new_mtu);
5905 netdev->mtu = new_mtu;
5906
5907 if (netif_running(netdev))
5908 igb_up(adapter);
5909 else
5910 igb_reset(adapter);
5911
5912 clear_bit(__IGB_RESETTING, &adapter->state);
5913
5914 return 0;
5915}
5916
5917/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005918 * igb_update_stats - Update the board statistics counters
5919 * @adapter: board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08005920 **/
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005921void igb_update_stats(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08005922{
Benjamin Poirier81e3f642017-05-16 15:55:16 -07005923 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
Auke Kok9d5c8242008-01-24 02:22:38 -08005924 struct e1000_hw *hw = &adapter->hw;
5925 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00005926 u32 reg, mpc;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005927 int i;
5928 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005929 unsigned int start;
5930 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005931
Jeff Kirsherb980ac12013-02-23 07:29:56 +00005932 /* Prevent stats update while adapter is being reset, or if the pci
Auke Kok9d5c8242008-01-24 02:22:38 -08005933 * connection is down.
5934 */
5935 if (adapter->link_speed == 0)
5936 return;
5937 if (pci_channel_offline(pdev))
5938 return;
5939
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005940 bytes = 0;
5941 packets = 0;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00005942
5943 rcu_read_lock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005944 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005945 struct igb_ring *ring = adapter->rx_ring[i];
Todd Fujinakae66c0832014-04-08 05:36:15 +00005946 u32 rqdpc = rd32(E1000_RQDPC(i));
5947 if (hw->mac.type >= e1000_i210)
5948 wr32(E1000_RQDPC(i), 0);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005949
Alexander Duyckae1c07a2012-08-08 05:23:22 +00005950 if (rqdpc) {
5951 ring->rx_stats.drops += rqdpc;
5952 net_stats->rx_fifo_errors += rqdpc;
5953 }
Eric Dumazet12dcd862010-10-15 17:27:10 +00005954
5955 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07005956 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005957 _bytes = ring->rx_stats.bytes;
5958 _packets = ring->rx_stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07005959 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
Eric Dumazet12dcd862010-10-15 17:27:10 +00005960 bytes += _bytes;
5961 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005962 }
5963
Alexander Duyck128e45e2009-11-12 18:37:38 +00005964 net_stats->rx_bytes = bytes;
5965 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005966
5967 bytes = 0;
5968 packets = 0;
5969 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00005970 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00005971 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07005972 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005973 _bytes = ring->tx_stats.bytes;
5974 _packets = ring->tx_stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07005975 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
Eric Dumazet12dcd862010-10-15 17:27:10 +00005976 bytes += _bytes;
5977 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005978 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00005979 net_stats->tx_bytes = bytes;
5980 net_stats->tx_packets = packets;
Akeem G Abodunrin7f901282013-06-27 09:10:23 +00005981 rcu_read_unlock();
Alexander Duyck3f9c0162009-10-27 23:48:12 +00005982
5983 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08005984 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
5985 adapter->stats.gprc += rd32(E1000_GPRC);
5986 adapter->stats.gorc += rd32(E1000_GORCL);
5987 rd32(E1000_GORCH); /* clear GORCL */
5988 adapter->stats.bprc += rd32(E1000_BPRC);
5989 adapter->stats.mprc += rd32(E1000_MPRC);
5990 adapter->stats.roc += rd32(E1000_ROC);
5991
5992 adapter->stats.prc64 += rd32(E1000_PRC64);
5993 adapter->stats.prc127 += rd32(E1000_PRC127);
5994 adapter->stats.prc255 += rd32(E1000_PRC255);
5995 adapter->stats.prc511 += rd32(E1000_PRC511);
5996 adapter->stats.prc1023 += rd32(E1000_PRC1023);
5997 adapter->stats.prc1522 += rd32(E1000_PRC1522);
5998 adapter->stats.symerrs += rd32(E1000_SYMERRS);
5999 adapter->stats.sec += rd32(E1000_SEC);
6000
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00006001 mpc = rd32(E1000_MPC);
6002 adapter->stats.mpc += mpc;
6003 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006004 adapter->stats.scc += rd32(E1000_SCC);
6005 adapter->stats.ecol += rd32(E1000_ECOL);
6006 adapter->stats.mcc += rd32(E1000_MCC);
6007 adapter->stats.latecol += rd32(E1000_LATECOL);
6008 adapter->stats.dc += rd32(E1000_DC);
6009 adapter->stats.rlec += rd32(E1000_RLEC);
6010 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6011 adapter->stats.xontxc += rd32(E1000_XONTXC);
6012 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6013 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6014 adapter->stats.fcruc += rd32(E1000_FCRUC);
6015 adapter->stats.gptc += rd32(E1000_GPTC);
6016 adapter->stats.gotc += rd32(E1000_GOTCL);
6017 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00006018 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08006019 adapter->stats.ruc += rd32(E1000_RUC);
6020 adapter->stats.rfc += rd32(E1000_RFC);
6021 adapter->stats.rjc += rd32(E1000_RJC);
6022 adapter->stats.tor += rd32(E1000_TORH);
6023 adapter->stats.tot += rd32(E1000_TOTH);
6024 adapter->stats.tpr += rd32(E1000_TPR);
6025
6026 adapter->stats.ptc64 += rd32(E1000_PTC64);
6027 adapter->stats.ptc127 += rd32(E1000_PTC127);
6028 adapter->stats.ptc255 += rd32(E1000_PTC255);
6029 adapter->stats.ptc511 += rd32(E1000_PTC511);
6030 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6031 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6032
6033 adapter->stats.mptc += rd32(E1000_MPTC);
6034 adapter->stats.bptc += rd32(E1000_BPTC);
6035
Nick Nunley2d0b0f62010-02-17 01:02:59 +00006036 adapter->stats.tpt += rd32(E1000_TPT);
6037 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08006038
6039 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00006040 /* read internal phy specific stats */
6041 reg = rd32(E1000_CTRL_EXT);
6042 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6043 adapter->stats.rxerrc += rd32(E1000_RXERRC);
Carolyn Wyborny3dbdf962012-09-12 04:36:24 +00006044
6045 /* this stat has invalid values on i210/i211 */
6046 if ((hw->mac.type != e1000_i210) &&
6047 (hw->mac.type != e1000_i211))
6048 adapter->stats.tncrs += rd32(E1000_TNCRS);
Nick Nunley43915c7c2010-02-17 01:03:58 +00006049 }
6050
Auke Kok9d5c8242008-01-24 02:22:38 -08006051 adapter->stats.tsctc += rd32(E1000_TSCTC);
6052 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6053
6054 adapter->stats.iac += rd32(E1000_IAC);
6055 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6056 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6057 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6058 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6059 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6060 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6061 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6062 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6063
6064 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006065 net_stats->multicast = adapter->stats.mprc;
6066 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006067
6068 /* Rx Errors */
6069
6070 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006071 * our own version based on RUC and ROC
6072 */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006073 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08006074 adapter->stats.crcerrs + adapter->stats.algnerrc +
6075 adapter->stats.ruc + adapter->stats.roc +
6076 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00006077 net_stats->rx_length_errors = adapter->stats.ruc +
6078 adapter->stats.roc;
6079 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6080 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6081 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006082
6083 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00006084 net_stats->tx_errors = adapter->stats.ecol +
6085 adapter->stats.latecol;
6086 net_stats->tx_aborted_errors = adapter->stats.ecol;
6087 net_stats->tx_window_errors = adapter->stats.latecol;
6088 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08006089
6090 /* Tx Dropped needs to be maintained elsewhere */
6091
Auke Kok9d5c8242008-01-24 02:22:38 -08006092 /* Management Stats */
6093 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6094 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6095 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00006096
6097 /* OS2BMC Stats */
6098 reg = rd32(E1000_MANC);
6099 if (reg & E1000_MANC_EN_BMC2OS) {
6100 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6101 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6102 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6103 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6104 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006105}
6106
Richard Cochran61d7f752014-11-21 20:51:10 +00006107static void igb_tsync_interrupt(struct igb_adapter *adapter)
6108{
6109 struct e1000_hw *hw = &adapter->hw;
Richard Cochran00c65572014-11-21 20:51:20 +00006110 struct ptp_clock_event event;
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006111 struct timespec64 ts;
Richard Cochran720db4f2014-11-21 20:51:26 +00006112 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
Richard Cochran00c65572014-11-21 20:51:20 +00006113
6114 if (tsicr & TSINTR_SYS_WRAP) {
6115 event.type = PTP_CLOCK_PPS;
6116 if (adapter->ptp_caps.pps)
6117 ptp_clock_event(adapter->ptp_clock, &event);
Richard Cochran00c65572014-11-21 20:51:20 +00006118 ack |= TSINTR_SYS_WRAP;
6119 }
Richard Cochran61d7f752014-11-21 20:51:10 +00006120
6121 if (tsicr & E1000_TSICR_TXTS) {
Richard Cochran61d7f752014-11-21 20:51:10 +00006122 /* retrieve hardware timestamp */
6123 schedule_work(&adapter->ptp_tx_work);
Richard Cochran00c65572014-11-21 20:51:20 +00006124 ack |= E1000_TSICR_TXTS;
Richard Cochran61d7f752014-11-21 20:51:10 +00006125 }
Richard Cochran00c65572014-11-21 20:51:20 +00006126
Richard Cochran720db4f2014-11-21 20:51:26 +00006127 if (tsicr & TSINTR_TT0) {
6128 spin_lock(&adapter->tmreg_lock);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006129 ts = timespec64_add(adapter->perout[0].start,
6130 adapter->perout[0].period);
6131 /* u32 conversion of tv_sec is safe until y2106 */
Richard Cochran720db4f2014-11-21 20:51:26 +00006132 wr32(E1000_TRGTTIML0, ts.tv_nsec);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006133 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
Richard Cochran720db4f2014-11-21 20:51:26 +00006134 tsauxc = rd32(E1000_TSAUXC);
6135 tsauxc |= TSAUXC_EN_TT0;
6136 wr32(E1000_TSAUXC, tsauxc);
6137 adapter->perout[0].start = ts;
6138 spin_unlock(&adapter->tmreg_lock);
6139 ack |= TSINTR_TT0;
6140 }
6141
6142 if (tsicr & TSINTR_TT1) {
6143 spin_lock(&adapter->tmreg_lock);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006144 ts = timespec64_add(adapter->perout[1].start,
6145 adapter->perout[1].period);
Richard Cochran720db4f2014-11-21 20:51:26 +00006146 wr32(E1000_TRGTTIML1, ts.tv_nsec);
Arnd Bergmann40c9b072015-09-30 13:26:33 +02006147 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
Richard Cochran720db4f2014-11-21 20:51:26 +00006148 tsauxc = rd32(E1000_TSAUXC);
6149 tsauxc |= TSAUXC_EN_TT1;
6150 wr32(E1000_TSAUXC, tsauxc);
6151 adapter->perout[1].start = ts;
6152 spin_unlock(&adapter->tmreg_lock);
6153 ack |= TSINTR_TT1;
6154 }
6155
6156 if (tsicr & TSINTR_AUTT0) {
6157 nsec = rd32(E1000_AUXSTMPL0);
6158 sec = rd32(E1000_AUXSTMPH0);
6159 event.type = PTP_CLOCK_EXTTS;
6160 event.index = 0;
6161 event.timestamp = sec * 1000000000ULL + nsec;
6162 ptp_clock_event(adapter->ptp_clock, &event);
6163 ack |= TSINTR_AUTT0;
6164 }
6165
6166 if (tsicr & TSINTR_AUTT1) {
6167 nsec = rd32(E1000_AUXSTMPL1);
6168 sec = rd32(E1000_AUXSTMPH1);
6169 event.type = PTP_CLOCK_EXTTS;
6170 event.index = 1;
6171 event.timestamp = sec * 1000000000ULL + nsec;
6172 ptp_clock_event(adapter->ptp_clock, &event);
6173 ack |= TSINTR_AUTT1;
6174 }
6175
Richard Cochran00c65572014-11-21 20:51:20 +00006176 /* acknowledge the interrupts */
6177 wr32(E1000_TSICR, ack);
Richard Cochran61d7f752014-11-21 20:51:10 +00006178}
6179
Auke Kok9d5c8242008-01-24 02:22:38 -08006180static irqreturn_t igb_msix_other(int irq, void *data)
6181{
Alexander Duyck047e0032009-10-27 15:49:27 +00006182 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08006183 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006184 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006185 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00006186
Alexander Duyck7f081d42010-01-07 17:41:00 +00006187 if (icr & E1000_ICR_DRSTA)
6188 schedule_work(&adapter->reset_task);
6189
Alexander Duyck047e0032009-10-27 15:49:27 +00006190 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00006191 /* HW is reporting DMA is out of sync */
6192 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00006193 /* The DMA Out of Sync is also indication of a spoof event
6194 * in IOV mode. Check the Wrong VM Behavior register to
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006195 * see if it is really a spoof event.
6196 */
Greg Rose13800462010-11-06 02:08:26 +00006197 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00006198 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006199
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006200 /* Check for a mailbox event */
6201 if (icr & E1000_ICR_VMMB)
6202 igb_msg_task(adapter);
6203
6204 if (icr & E1000_ICR_LSC) {
6205 hw->mac.get_link_status = 1;
6206 /* guard against interrupt when we're going down */
6207 if (!test_bit(__IGB_DOWN, &adapter->state))
6208 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6209 }
6210
Richard Cochran61d7f752014-11-21 20:51:10 +00006211 if (icr & E1000_ICR_TS)
6212 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00006213
PJ Waskiewicz844290e2008-06-27 11:00:39 -07006214 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08006215
6216 return IRQ_HANDLED;
6217}
6218
Alexander Duyck047e0032009-10-27 15:49:27 +00006219static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08006220{
Alexander Duyck26b39272010-02-17 01:00:41 +00006221 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00006222 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08006223
Alexander Duyck047e0032009-10-27 15:49:27 +00006224 if (!q_vector->set_itr)
6225 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00006226
Alexander Duyck047e0032009-10-27 15:49:27 +00006227 if (!itr_val)
6228 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006229
Alexander Duyck26b39272010-02-17 01:00:41 +00006230 if (adapter->hw.mac.type == e1000_82575)
6231 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006232 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00006233 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00006234
6235 writel(itr_val, q_vector->itr_register);
6236 q_vector->set_itr = 0;
6237}
6238
6239static irqreturn_t igb_msix_ring(int irq, void *data)
6240{
6241 struct igb_q_vector *q_vector = data;
6242
6243 /* Write the ITR value calculated from the previous interrupt. */
6244 igb_write_itr(q_vector);
6245
6246 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07006247
Auke Kok9d5c8242008-01-24 02:22:38 -08006248 return IRQ_HANDLED;
6249}
6250
Jeff Kirsher421e02f2008-10-17 11:08:31 -07006251#ifdef CONFIG_IGB_DCA
Alexander Duyck6a050042012-09-25 00:31:27 +00006252static void igb_update_tx_dca(struct igb_adapter *adapter,
6253 struct igb_ring *tx_ring,
6254 int cpu)
6255{
6256 struct e1000_hw *hw = &adapter->hw;
6257 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6258
6259 if (hw->mac.type != e1000_82575)
6260 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6261
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006262 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00006263 * DCA is enabled. This is due to a known issue in some chipsets
6264 * which will cause the DCA tag to be cleared.
6265 */
6266 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6267 E1000_DCA_TXCTRL_DATA_RRO_EN |
6268 E1000_DCA_TXCTRL_DESC_DCA_EN;
6269
6270 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6271}
6272
6273static void igb_update_rx_dca(struct igb_adapter *adapter,
6274 struct igb_ring *rx_ring,
6275 int cpu)
6276{
6277 struct e1000_hw *hw = &adapter->hw;
6278 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6279
6280 if (hw->mac.type != e1000_82575)
6281 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6282
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006283 /* We can enable relaxed ordering for reads, but not writes when
Alexander Duyck6a050042012-09-25 00:31:27 +00006284 * DCA is enabled. This is due to a known issue in some chipsets
6285 * which will cause the DCA tag to be cleared.
6286 */
6287 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6288 E1000_DCA_RXCTRL_DESC_DCA_EN;
6289
6290 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6291}
6292
Alexander Duyck047e0032009-10-27 15:49:27 +00006293static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006294{
Alexander Duyck047e0032009-10-27 15:49:27 +00006295 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006296 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006297
Alexander Duyck047e0032009-10-27 15:49:27 +00006298 if (q_vector->cpu == cpu)
6299 goto out_no_update;
6300
Alexander Duyck6a050042012-09-25 00:31:27 +00006301 if (q_vector->tx.ring)
6302 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6303
6304 if (q_vector->rx.ring)
6305 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6306
Alexander Duyck047e0032009-10-27 15:49:27 +00006307 q_vector->cpu = cpu;
6308out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006309 put_cpu();
6310}
6311
6312static void igb_setup_dca(struct igb_adapter *adapter)
6313{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00006314 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006315 int i;
6316
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006317 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006318 return;
6319
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00006320 /* Always use CB2 mode, difference is masked in the CB driver. */
6321 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6322
Alexander Duyck047e0032009-10-27 15:49:27 +00006323 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00006324 adapter->q_vector[i]->cpu = -1;
6325 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006326 }
6327}
6328
6329static int __igb_notify_dca(struct device *dev, void *data)
6330{
6331 struct net_device *netdev = dev_get_drvdata(dev);
6332 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00006333 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006334 struct e1000_hw *hw = &adapter->hw;
6335 unsigned long event = *(unsigned long *)data;
6336
6337 switch (event) {
6338 case DCA_PROVIDER_ADD:
6339 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006340 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006341 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006342 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08006343 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00006344 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006345 igb_setup_dca(adapter);
6346 break;
6347 }
6348 /* Fall Through since DCA is disabled. */
6349 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006350 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006351 /* without this a class_device is left
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006352 * hanging around in the sysfs model
6353 */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006354 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00006355 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006356 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08006357 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006358 }
6359 break;
6360 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08006361
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006362 return 0;
6363}
6364
6365static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006366 void *p)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006367{
6368 int ret_val;
6369
6370 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006371 __igb_notify_dca);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07006372
6373 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6374}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07006375#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08006376
Greg Rose0224d662011-10-14 02:57:14 +00006377#ifdef CONFIG_PCI_IOV
6378static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6379{
6380 unsigned char mac_addr[ETH_ALEN];
Greg Rose0224d662011-10-14 02:57:14 +00006381
Mitch A Williams5ac6f912013-01-18 08:57:20 +00006382 eth_zero_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00006383 igb_set_vf_mac(adapter, vf, mac_addr);
6384
Lior Levy70ea4782013-03-03 20:27:48 +00006385 /* By default spoof check is enabled for all VFs */
6386 adapter->vf_data[vf].spoofchk_enabled = true;
6387
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01006388 /* By default VFs are not trusted */
6389 adapter->vf_data[vf].trusted = false;
6390
Stefan Assmannf5571472012-08-18 04:06:11 +00006391 return 0;
Greg Rose0224d662011-10-14 02:57:14 +00006392}
6393
Greg Rose0224d662011-10-14 02:57:14 +00006394#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006395static void igb_ping_all_vfs(struct igb_adapter *adapter)
6396{
6397 struct e1000_hw *hw = &adapter->hw;
6398 u32 ping;
6399 int i;
6400
6401 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6402 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006403 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006404 ping |= E1000_VT_MSGTYPE_CTS;
6405 igb_write_mbx(hw, &ping, 1, i);
6406 }
6407}
6408
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006409static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6410{
6411 struct e1000_hw *hw = &adapter->hw;
6412 u32 vmolr = rd32(E1000_VMOLR(vf));
6413 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6414
Alexander Duyckd85b90042010-09-22 17:56:20 +00006415 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006416 IGB_VF_FLAG_MULTI_PROMISC);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006417 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6418
6419 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6420 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00006421 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006422 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6423 } else {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006424 /* if we have hashes and we are clearing a multicast promisc
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006425 * flag we need to write the hashes to the MTA as this step
6426 * was previously skipped
6427 */
6428 if (vf_data->num_vf_mc_hashes > 30) {
6429 vmolr |= E1000_VMOLR_MPME;
6430 } else if (vf_data->num_vf_mc_hashes) {
6431 int j;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006432
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006433 vmolr |= E1000_VMOLR_ROMPE;
6434 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6435 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6436 }
6437 }
6438
6439 wr32(E1000_VMOLR(vf), vmolr);
6440
6441 /* there are flags left unprocessed, likely not supported */
6442 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6443 return -EINVAL;
6444
6445 return 0;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006446}
6447
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006448static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6449 u32 *msgbuf, u32 vf)
6450{
6451 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6452 u16 *hash_list = (u16 *)&msgbuf[1];
6453 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6454 int i;
6455
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006456 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006457 * to this VF for later use to restore when the PF multi cast
6458 * list changes
6459 */
6460 vf_data->num_vf_mc_hashes = n;
6461
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006462 /* only up to 30 hash values supported */
6463 if (n > 30)
6464 n = 30;
6465
6466 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006467 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07006468 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006469
6470 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006471 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006472
6473 return 0;
6474}
6475
6476static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6477{
6478 struct e1000_hw *hw = &adapter->hw;
6479 struct vf_data_storage *vf_data;
6480 int i, j;
6481
6482 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006483 u32 vmolr = rd32(E1000_VMOLR(i));
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006484
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006485 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6486
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006487 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00006488
6489 if ((vf_data->num_vf_mc_hashes > 30) ||
6490 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6491 vmolr |= E1000_VMOLR_MPME;
6492 } else if (vf_data->num_vf_mc_hashes) {
6493 vmolr |= E1000_VMOLR_ROMPE;
6494 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6495 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6496 }
6497 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006498 }
6499}
6500
6501static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6502{
6503 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck16903ca2016-01-06 23:11:18 -08006504 u32 pool_mask, vlvf_mask, i;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006505
Alexander Duyck16903ca2016-01-06 23:11:18 -08006506 /* create mask for VF and other pools */
6507 pool_mask = E1000_VLVF_POOLSEL_MASK;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006508 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
Alexander Duyck16903ca2016-01-06 23:11:18 -08006509
6510 /* drop PF from pool bits */
Jacob Kellera51d8c22016-04-13 16:08:28 -07006511 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6512 adapter->vfs_allocated_count);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006513
6514 /* Find the vlan filter for this id */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006515 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6516 u32 vlvf = rd32(E1000_VLVF(i));
6517 u32 vfta_mask, vid, vfta;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006518
6519 /* remove the vf from the pool */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006520 if (!(vlvf & vlvf_mask))
6521 continue;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006522
Alexander Duyck16903ca2016-01-06 23:11:18 -08006523 /* clear out bit from VLVF */
6524 vlvf ^= vlvf_mask;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006525
Alexander Duyck16903ca2016-01-06 23:11:18 -08006526 /* if other pools are present, just remove ourselves */
6527 if (vlvf & pool_mask)
6528 goto update_vlvfb;
6529
6530 /* if PF is present, leave VFTA */
6531 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6532 goto update_vlvf;
6533
6534 vid = vlvf & E1000_VLVF_VLANID_MASK;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006535 vfta_mask = BIT(vid % 32);
Alexander Duyck16903ca2016-01-06 23:11:18 -08006536
6537 /* clear bit from VFTA */
6538 vfta = adapter->shadow_vfta[vid / 32];
6539 if (vfta & vfta_mask)
6540 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6541update_vlvf:
6542 /* clear pool selection enable */
6543 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6544 vlvf &= E1000_VLVF_POOLSEL_MASK;
6545 else
6546 vlvf = 0;
6547update_vlvfb:
6548 /* clear pool bits */
6549 wr32(E1000_VLVF(i), vlvf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006550 }
6551}
6552
Alexander Duyck16903ca2016-01-06 23:11:18 -08006553static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006554{
Alexander Duyck16903ca2016-01-06 23:11:18 -08006555 u32 vlvf;
6556 int idx;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006557
Alexander Duyck16903ca2016-01-06 23:11:18 -08006558 /* short cut the special case */
6559 if (vlan == 0)
6560 return 0;
Alexander Duyck51466232009-10-27 23:47:35 +00006561
Alexander Duyck16903ca2016-01-06 23:11:18 -08006562 /* Search for the VLAN id in the VLVF entries */
6563 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6564 vlvf = rd32(E1000_VLVF(idx));
6565 if ((vlvf & VLAN_VID_MASK) == vlan)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006566 break;
6567 }
6568
Alexander Duyck16903ca2016-01-06 23:11:18 -08006569 return idx;
6570}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006571
Jacob Keller8008f682016-04-13 16:08:29 -07006572static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
Alexander Duyck16903ca2016-01-06 23:11:18 -08006573{
6574 struct e1000_hw *hw = &adapter->hw;
6575 u32 bits, pf_id;
6576 int idx;
Alexander Duyckae641bd2009-09-03 14:49:33 +00006577
Alexander Duyck16903ca2016-01-06 23:11:18 -08006578 idx = igb_find_vlvf_entry(hw, vid);
6579 if (!idx)
6580 return;
Alexander Duyckae641bd2009-09-03 14:49:33 +00006581
Alexander Duyck16903ca2016-01-06 23:11:18 -08006582 /* See if any other pools are set for this VLAN filter
6583 * entry other than the PF.
6584 */
6585 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
Jacob Kellera51d8c22016-04-13 16:08:28 -07006586 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
Alexander Duyck16903ca2016-01-06 23:11:18 -08006587 bits &= rd32(E1000_VLVF(idx));
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006588
Alexander Duyck16903ca2016-01-06 23:11:18 -08006589 /* Disable the filter so this falls into the default pool. */
6590 if (!bits) {
6591 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
Jacob Kellera51d8c22016-04-13 16:08:28 -07006592 wr32(E1000_VLVF(idx), BIT(pf_id));
Alexander Duyck16903ca2016-01-06 23:11:18 -08006593 else
6594 wr32(E1000_VLVF(idx), 0);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006595 }
Greg Rose6f3dc3192013-03-26 06:19:41 +00006596}
6597
Alexander Duycka15d9252016-01-06 23:11:11 -08006598static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6599 bool add, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006600{
Alexander Duycka15d9252016-01-06 23:11:11 -08006601 int pf_id = adapter->vfs_allocated_count;
Greg Rose6f3dc3192013-03-26 06:19:41 +00006602 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka15d9252016-01-06 23:11:11 -08006603 int err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006604
Alexander Duycka15d9252016-01-06 23:11:11 -08006605 /* If VLAN overlaps with one the PF is currently monitoring make
6606 * sure that we are able to allocate a VLVF entry. This may be
6607 * redundant but it guarantees PF will maintain visibility to
6608 * the VLAN.
Greg Rose6f3dc3192013-03-26 06:19:41 +00006609 */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006610 if (add && test_bit(vid, adapter->active_vlans)) {
Alexander Duycka15d9252016-01-06 23:11:11 -08006611 err = igb_vfta_set(hw, vid, pf_id, true, false);
6612 if (err)
6613 return err;
6614 }
Greg Rose6f3dc3192013-03-26 06:19:41 +00006615
Alexander Duycka15d9252016-01-06 23:11:11 -08006616 err = igb_vfta_set(hw, vid, vf, add, false);
Greg Rose6f3dc3192013-03-26 06:19:41 +00006617
Alexander Duyck16903ca2016-01-06 23:11:18 -08006618 if (add && !err)
6619 return err;
Greg Rose6f3dc3192013-03-26 06:19:41 +00006620
Alexander Duyck16903ca2016-01-06 23:11:18 -08006621 /* If we failed to add the VF VLAN or we are removing the VF VLAN
6622 * we may need to drop the PF pool bit in order to allow us to free
6623 * up the VLVF resources.
Greg Rose6f3dc3192013-03-26 06:19:41 +00006624 */
Alexander Duyck16903ca2016-01-06 23:11:18 -08006625 if (test_bit(vid, adapter->active_vlans) ||
6626 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6627 igb_update_pf_vlvf(adapter, vid);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00006628
Greg Rose6f3dc3192013-03-26 06:19:41 +00006629 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006630}
6631
6632static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6633{
6634 struct e1000_hw *hw = &adapter->hw;
6635
6636 if (vid)
6637 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6638 else
6639 wr32(E1000_VMVIR(vf), 0);
6640}
6641
Alexander Duycka15d9252016-01-06 23:11:11 -08006642static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6643 u16 vlan, u8 qos)
6644{
6645 int err;
6646
6647 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6648 if (err)
6649 return err;
6650
6651 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6652 igb_set_vmolr(adapter, vf, !vlan);
6653
6654 /* revoke access to previous VLAN */
6655 if (vlan != adapter->vf_data[vf].pf_vlan)
6656 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6657 false, vf);
6658
6659 adapter->vf_data[vf].pf_vlan = vlan;
6660 adapter->vf_data[vf].pf_qos = qos;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006661 igb_set_vf_vlan_strip(adapter, vf, true);
Alexander Duycka15d9252016-01-06 23:11:11 -08006662 dev_info(&adapter->pdev->dev,
6663 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
6664 if (test_bit(__IGB_DOWN, &adapter->state)) {
6665 dev_warn(&adapter->pdev->dev,
6666 "The VF VLAN has been set, but the PF device is not up.\n");
6667 dev_warn(&adapter->pdev->dev,
6668 "Bring the PF device up before attempting to use the VF device.\n");
6669 }
6670
6671 return err;
6672}
6673
6674static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
6675{
6676 /* Restore tagless access via VLAN 0 */
6677 igb_set_vf_vlan(adapter, 0, true, vf);
6678
6679 igb_set_vmvir(adapter, 0, vf);
6680 igb_set_vmolr(adapter, vf, true);
6681
6682 /* Remove any PF assigned VLAN */
6683 if (adapter->vf_data[vf].pf_vlan)
6684 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6685 false, vf);
6686
6687 adapter->vf_data[vf].pf_vlan = 0;
6688 adapter->vf_data[vf].pf_qos = 0;
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006689 igb_set_vf_vlan_strip(adapter, vf, false);
Alexander Duycka15d9252016-01-06 23:11:11 -08006690
6691 return 0;
6692}
6693
Moshe Shemesh79aab092016-09-22 12:11:15 +03006694static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
6695 u16 vlan, u8 qos, __be16 vlan_proto)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006696{
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006697 struct igb_adapter *adapter = netdev_priv(netdev);
6698
6699 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
6700 return -EINVAL;
Alexander Duycka15d9252016-01-06 23:11:11 -08006701
Moshe Shemesh79aab092016-09-22 12:11:15 +03006702 if (vlan_proto != htons(ETH_P_8021Q))
6703 return -EPROTONOSUPPORT;
6704
Alexander Duycka15d9252016-01-06 23:11:11 -08006705 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
6706 igb_disable_port_vlan(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006707}
6708
Alexander Duycka15d9252016-01-06 23:11:11 -08006709static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006710{
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006711 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6712 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006713 int ret;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006714
Alexander Duycka15d9252016-01-06 23:11:11 -08006715 if (adapter->vf_data[vf].pf_vlan)
6716 return -1;
Mitch A Williams5ac6f912013-01-18 08:57:20 +00006717
Alexander Duycka15d9252016-01-06 23:11:11 -08006718 /* VLAN 0 is a special case, don't allow it to be removed */
6719 if (!vid && !add)
6720 return 0;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006721
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006722 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
6723 if (!ret)
6724 igb_set_vf_vlan_strip(adapter, vf, !!vid);
6725 return ret;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006726}
6727
6728static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
6729{
Alexander Duycka15d9252016-01-06 23:11:11 -08006730 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006731
Alexander Duycka15d9252016-01-06 23:11:11 -08006732 /* clear flags - except flag that indicates PF has set the MAC */
6733 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
6734 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006735
6736 /* reset vlans for device */
6737 igb_clear_vf_vfta(adapter, vf);
Alexander Duycka15d9252016-01-06 23:11:11 -08006738 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
6739 igb_set_vmvir(adapter, vf_data->pf_vlan |
6740 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
6741 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
Corinna Vinschen030f9f52016-01-28 13:53:23 +01006742 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006743
6744 /* reset multicast table array for vf */
6745 adapter->vf_data[vf].num_vf_mc_hashes = 0;
6746
6747 /* Flush and reset the mta with the new values */
6748 igb_set_rx_mode(adapter->netdev);
6749}
6750
6751static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006752{
6753 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6754
6755 /* clear mac address as we were hotplug removed/added */
6756 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
6757 eth_zero_addr(vf_mac);
6758
6759 /* process remaining reset events */
6760 igb_vf_reset(adapter, vf);
6761}
6762
6763static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006764{
6765 struct e1000_hw *hw = &adapter->hw;
6766 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6767 u32 reg, msgbuf[3];
6768 u8 *addr = (u8 *)(&msgbuf[1]);
6769
6770 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00006771 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006772
6773 /* set vf mac address */
Yury Kylulin83c21332017-03-07 11:20:25 +03006774 igb_set_vf_mac(adapter, vf, vf_mac);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006775
6776 /* enable transmit and receive for vf */
6777 reg = rd32(E1000_VFTE);
Jacob Kellera51d8c22016-04-13 16:08:28 -07006778 wr32(E1000_VFTE, reg | BIT(vf));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006779 reg = rd32(E1000_VFRE);
Jacob Kellera51d8c22016-04-13 16:08:28 -07006780 wr32(E1000_VFRE, reg | BIT(vf));
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006781
Greg Rose8fa7e0f2010-11-06 05:43:21 +00006782 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006783
6784 /* reply to reset with ack and vf mac address */
Alexander Graf6ddbc4c2014-10-09 05:33:55 +00006785 if (!is_zero_ether_addr(vf_mac)) {
6786 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6787 memcpy(addr, vf_mac, ETH_ALEN);
6788 } else {
6789 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6790 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006791 igb_write_mbx(hw, msgbuf, 3, vf);
6792}
6793
Yury Kylulin83c21332017-03-07 11:20:25 +03006794static void igb_flush_mac_table(struct igb_adapter *adapter)
6795{
6796 struct e1000_hw *hw = &adapter->hw;
6797 int i;
6798
6799 for (i = 0; i < hw->mac.rar_entry_count; i++) {
6800 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6801 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6802 adapter->mac_table[i].queue = 0;
6803 igb_rar_set_index(adapter, i);
6804 }
6805}
6806
6807static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
6808{
6809 struct e1000_hw *hw = &adapter->hw;
6810 /* do not count rar entries reserved for VFs MAC addresses */
6811 int rar_entries = hw->mac.rar_entry_count -
6812 adapter->vfs_allocated_count;
6813 int i, count = 0;
6814
6815 for (i = 0; i < rar_entries; i++) {
6816 /* do not count default entries */
6817 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
6818 continue;
6819
6820 /* do not count "in use" entries for different queues */
6821 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
6822 (adapter->mac_table[i].queue != queue))
6823 continue;
6824
6825 count++;
6826 }
6827
6828 return count;
6829}
6830
6831/* Set default MAC address for the PF in the first RAR entry */
6832static void igb_set_default_mac_filter(struct igb_adapter *adapter)
6833{
6834 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
6835
6836 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
6837 mac_table->queue = adapter->vfs_allocated_count;
6838 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
6839
6840 igb_rar_set_index(adapter, 0);
6841}
6842
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006843static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6844 const u8 queue)
Yury Kylulin83c21332017-03-07 11:20:25 +03006845{
6846 struct e1000_hw *hw = &adapter->hw;
6847 int rar_entries = hw->mac.rar_entry_count -
6848 adapter->vfs_allocated_count;
6849 int i;
6850
6851 if (is_zero_ether_addr(addr))
6852 return -EINVAL;
6853
6854 /* Search for the first empty entry in the MAC table.
6855 * Do not touch entries at the end of the table reserved for the VF MAC
6856 * addresses.
6857 */
6858 for (i = 0; i < rar_entries; i++) {
6859 if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
6860 continue;
6861
6862 ether_addr_copy(adapter->mac_table[i].addr, addr);
6863 adapter->mac_table[i].queue = queue;
6864 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
6865
6866 igb_rar_set_index(adapter, i);
6867 return i;
6868 }
6869
6870 return -ENOSPC;
6871}
6872
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006873static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6874 const u8 queue)
Yury Kylulin83c21332017-03-07 11:20:25 +03006875{
6876 struct e1000_hw *hw = &adapter->hw;
6877 int rar_entries = hw->mac.rar_entry_count -
6878 adapter->vfs_allocated_count;
6879 int i;
6880
6881 if (is_zero_ether_addr(addr))
6882 return -EINVAL;
6883
6884 /* Search for matching entry in the MAC table based on given address
6885 * and queue. Do not touch entries at the end of the table reserved
6886 * for the VF MAC addresses.
6887 */
6888 for (i = 0; i < rar_entries; i++) {
6889 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
6890 continue;
6891 if (adapter->mac_table[i].queue != queue)
6892 continue;
6893 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
6894 continue;
6895
6896 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6897 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6898 adapter->mac_table[i].queue = 0;
6899
6900 igb_rar_set_index(adapter, i);
6901 return 0;
6902 }
6903
6904 return -ENOENT;
6905}
6906
6907static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
6908{
6909 struct igb_adapter *adapter = netdev_priv(netdev);
6910 int ret;
6911
6912 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6913
6914 return min_t(int, ret, 0);
6915}
6916
6917static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
6918{
6919 struct igb_adapter *adapter = netdev_priv(netdev);
6920
6921 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6922
6923 return 0;
6924}
6925
Colin Ian Kingb476dea2017-04-27 18:59:11 +01006926static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
6927 const u32 info, const u8 *addr)
Yury Kylulin4827cc32017-03-07 11:20:26 +03006928{
6929 struct pci_dev *pdev = adapter->pdev;
6930 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6931 struct list_head *pos;
6932 struct vf_mac_filter *entry = NULL;
6933 int ret = 0;
6934
6935 switch (info) {
6936 case E1000_VF_MAC_FILTER_CLR:
6937 /* remove all unicast MAC filters related to the current VF */
6938 list_for_each(pos, &adapter->vf_macs.l) {
6939 entry = list_entry(pos, struct vf_mac_filter, l);
6940 if (entry->vf == vf) {
6941 entry->vf = -1;
6942 entry->free = true;
6943 igb_del_mac_filter(adapter, entry->vf_mac, vf);
6944 }
6945 }
6946 break;
6947 case E1000_VF_MAC_FILTER_ADD:
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01006948 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
6949 !vf_data->trusted) {
Yury Kylulin4827cc32017-03-07 11:20:26 +03006950 dev_warn(&pdev->dev,
6951 "VF %d requested MAC filter but is administratively denied\n",
6952 vf);
6953 return -EINVAL;
6954 }
Yury Kylulin4827cc32017-03-07 11:20:26 +03006955 if (!is_valid_ether_addr(addr)) {
6956 dev_warn(&pdev->dev,
6957 "VF %d attempted to set invalid MAC filter\n",
6958 vf);
6959 return -EINVAL;
6960 }
6961
6962 /* try to find empty slot in the list */
6963 list_for_each(pos, &adapter->vf_macs.l) {
6964 entry = list_entry(pos, struct vf_mac_filter, l);
6965 if (entry->free)
6966 break;
6967 }
6968
6969 if (entry && entry->free) {
6970 entry->free = false;
6971 entry->vf = vf;
6972 ether_addr_copy(entry->vf_mac, addr);
6973
6974 ret = igb_add_mac_filter(adapter, addr, vf);
6975 ret = min_t(int, ret, 0);
6976 } else {
6977 ret = -ENOSPC;
6978 }
6979
6980 if (ret == -ENOSPC)
6981 dev_warn(&pdev->dev,
6982 "VF %d has requested MAC filter but there is no space for it\n",
6983 vf);
6984 break;
6985 default:
6986 ret = -EINVAL;
6987 break;
6988 }
6989
6990 return ret;
6991}
6992
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006993static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
6994{
Yury Kylulin4827cc32017-03-07 11:20:26 +03006995 struct pci_dev *pdev = adapter->pdev;
6996 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6997 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
6998
Jeff Kirsherb980ac12013-02-23 07:29:56 +00006999 /* The VF MAC Address is stored in a packed array of bytes
Greg Rosede42edd2010-07-01 13:39:23 +00007000 * starting at the second 32 bit word of the msg array
7001 */
Yury Kylulin4827cc32017-03-07 11:20:26 +03007002 unsigned char *addr = (unsigned char *)&msg[1];
7003 int ret = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007004
Yury Kylulin4827cc32017-03-07 11:20:26 +03007005 if (!info) {
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01007006 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7007 !vf_data->trusted) {
Yury Kylulin4827cc32017-03-07 11:20:26 +03007008 dev_warn(&pdev->dev,
7009 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7010 vf);
7011 return -EINVAL;
7012 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007013
Yury Kylulin4827cc32017-03-07 11:20:26 +03007014 if (!is_valid_ether_addr(addr)) {
7015 dev_warn(&pdev->dev,
7016 "VF %d attempted to set invalid MAC\n",
7017 vf);
7018 return -EINVAL;
7019 }
7020
7021 ret = igb_set_vf_mac(adapter, vf, addr);
7022 } else {
7023 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7024 }
7025
7026 return ret;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007027}
7028
7029static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7030{
7031 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007032 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007033 u32 msg = E1000_VT_MSGTYPE_NACK;
7034
7035 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007036 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7037 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007038 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007039 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007040 }
7041}
7042
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007043static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007044{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007045 struct pci_dev *pdev = adapter->pdev;
7046 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007047 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007048 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007049 s32 retval;
7050
Greg Edwards46b3bb92017-06-28 09:22:26 -06007051 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007052
Alexander Duyckfef45f42009-12-11 22:57:34 -08007053 if (retval) {
7054 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007055 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08007056 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7057 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007058 goto unlock;
Alexander Duyckfef45f42009-12-11 22:57:34 -08007059 goto out;
7060 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007061
7062 /* this is a message we already processed, do nothing */
7063 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007064 goto unlock;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007065
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007066 /* until the vf completes a reset it should not be
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007067 * allowed to start any configuration.
7068 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007069 if (msgbuf[0] == E1000_VF_RESET) {
Greg Edwards46b3bb92017-06-28 09:22:26 -06007070 /* unlocks mailbox */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007071 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007072 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007073 }
7074
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007075 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08007076 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
Greg Edwards46b3bb92017-06-28 09:22:26 -06007077 goto unlock;
Alexander Duyckfef45f42009-12-11 22:57:34 -08007078 retval = -1;
7079 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007080 }
7081
7082 switch ((msgbuf[0] & 0xFFFF)) {
7083 case E1000_VF_SET_MAC_ADDR:
Yury Kylulin4827cc32017-03-07 11:20:26 +03007084 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007085 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00007086 case E1000_VF_SET_PROMISC:
7087 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7088 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007089 case E1000_VF_SET_MULTICAST:
7090 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7091 break;
7092 case E1000_VF_SET_LPE:
7093 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7094 break;
7095 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00007096 retval = -1;
7097 if (vf_data->pf_vlan)
7098 dev_warn(&pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007099 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7100 vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00007101 else
Alexander Duycka15d9252016-01-06 23:11:11 -08007102 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007103 break;
7104 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00007105 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007106 retval = -1;
7107 break;
7108 }
7109
Alexander Duyckfef45f42009-12-11 22:57:34 -08007110 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7111out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007112 /* notify the VF of the results of what it sent us */
7113 if (retval)
7114 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7115 else
7116 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7117
Greg Edwards46b3bb92017-06-28 09:22:26 -06007118 /* unlocks mailbox */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007119 igb_write_mbx(hw, msgbuf, 1, vf);
Greg Edwards46b3bb92017-06-28 09:22:26 -06007120 return;
7121
7122unlock:
7123 igb_unlock_mbx(hw, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007124}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007125
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00007126static void igb_msg_task(struct igb_adapter *adapter)
7127{
7128 struct e1000_hw *hw = &adapter->hw;
7129 u32 vf;
7130
7131 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7132 /* process any reset requests */
7133 if (!igb_check_for_rst(hw, vf))
7134 igb_vf_reset_event(adapter, vf);
7135
7136 /* process any messages pending */
7137 if (!igb_check_for_msg(hw, vf))
7138 igb_rcv_msg_from_vf(adapter, vf);
7139
7140 /* process any acks */
7141 if (!igb_check_for_ack(hw, vf))
7142 igb_rcv_ack_from_vf(adapter, vf);
7143 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007144}
7145
Auke Kok9d5c8242008-01-24 02:22:38 -08007146/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00007147 * igb_set_uta - Set unicast filter table address
7148 * @adapter: board private structure
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007149 * @set: boolean indicating if we are setting or clearing bits
Alexander Duyck68d480c2009-10-05 06:33:08 +00007150 *
7151 * The unicast table address is a register array of 32-bit registers.
7152 * The table is meant to be used in a way similar to how the MTA is used
7153 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007154 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
7155 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00007156 **/
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007157static void igb_set_uta(struct igb_adapter *adapter, bool set)
Alexander Duyck68d480c2009-10-05 06:33:08 +00007158{
7159 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007160 u32 uta = set ? ~0 : 0;
Alexander Duyck68d480c2009-10-05 06:33:08 +00007161 int i;
7162
Alexander Duyck68d480c2009-10-05 06:33:08 +00007163 /* we only need to do this if VMDq is enabled */
7164 if (!adapter->vfs_allocated_count)
7165 return;
7166
Alexander Duyckbf456ab2016-01-06 23:11:43 -08007167 for (i = hw->mac.uta_reg_count; i--;)
7168 array_wr32(E1000_UTA, i, uta);
Alexander Duyck68d480c2009-10-05 06:33:08 +00007169}
7170
7171/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007172 * igb_intr_msi - Interrupt Handler
7173 * @irq: interrupt number
7174 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08007175 **/
7176static irqreturn_t igb_intr_msi(int irq, void *data)
7177{
Alexander Duyck047e0032009-10-27 15:49:27 +00007178 struct igb_adapter *adapter = data;
7179 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08007180 struct e1000_hw *hw = &adapter->hw;
7181 /* read ICR disables interrupts using IAM */
7182 u32 icr = rd32(E1000_ICR);
7183
Alexander Duyck047e0032009-10-27 15:49:27 +00007184 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08007185
Alexander Duyck7f081d42010-01-07 17:41:00 +00007186 if (icr & E1000_ICR_DRSTA)
7187 schedule_work(&adapter->reset_task);
7188
Alexander Duyck047e0032009-10-27 15:49:27 +00007189 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00007190 /* HW is reporting DMA is out of sync */
7191 adapter->stats.doosync++;
7192 }
7193
Auke Kok9d5c8242008-01-24 02:22:38 -08007194 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7195 hw->mac.get_link_status = 1;
7196 if (!test_bit(__IGB_DOWN, &adapter->state))
7197 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7198 }
7199
Richard Cochran61d7f752014-11-21 20:51:10 +00007200 if (icr & E1000_ICR_TS)
7201 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00007202
Alexander Duyck047e0032009-10-27 15:49:27 +00007203 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08007204
7205 return IRQ_HANDLED;
7206}
7207
7208/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007209 * igb_intr - Legacy Interrupt Handler
7210 * @irq: interrupt number
7211 * @data: pointer to a network interface device structure
Auke Kok9d5c8242008-01-24 02:22:38 -08007212 **/
7213static irqreturn_t igb_intr(int irq, void *data)
7214{
Alexander Duyck047e0032009-10-27 15:49:27 +00007215 struct igb_adapter *adapter = data;
7216 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08007217 struct e1000_hw *hw = &adapter->hw;
7218 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007219 * need for the IMC write
7220 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007221 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08007222
7223 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007224 * not set, then the adapter didn't send an interrupt
7225 */
Auke Kok9d5c8242008-01-24 02:22:38 -08007226 if (!(icr & E1000_ICR_INT_ASSERTED))
7227 return IRQ_NONE;
7228
Alexander Duyck0ba82992011-08-26 07:45:47 +00007229 igb_write_itr(q_vector);
7230
Alexander Duyck7f081d42010-01-07 17:41:00 +00007231 if (icr & E1000_ICR_DRSTA)
7232 schedule_work(&adapter->reset_task);
7233
Alexander Duyck047e0032009-10-27 15:49:27 +00007234 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00007235 /* HW is reporting DMA is out of sync */
7236 adapter->stats.doosync++;
7237 }
7238
Auke Kok9d5c8242008-01-24 02:22:38 -08007239 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7240 hw->mac.get_link_status = 1;
7241 /* guard against interrupt when we're going down */
7242 if (!test_bit(__IGB_DOWN, &adapter->state))
7243 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7244 }
7245
Richard Cochran61d7f752014-11-21 20:51:10 +00007246 if (icr & E1000_ICR_TS)
7247 igb_tsync_interrupt(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00007248
Alexander Duyck047e0032009-10-27 15:49:27 +00007249 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08007250
7251 return IRQ_HANDLED;
7252}
7253
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00007254static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08007255{
Alexander Duyck047e0032009-10-27 15:49:27 +00007256 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08007257 struct e1000_hw *hw = &adapter->hw;
7258
Alexander Duyck0ba82992011-08-26 07:45:47 +00007259 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7260 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7261 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7262 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007263 else
Alexander Duyck047e0032009-10-27 15:49:27 +00007264 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007265 }
7266
7267 if (!test_bit(__IGB_DOWN, &adapter->state)) {
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00007268 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Alexander Duyck047e0032009-10-27 15:49:27 +00007269 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08007270 else
7271 igb_irq_enable(adapter);
7272 }
7273}
7274
Auke Kok9d5c8242008-01-24 02:22:38 -08007275/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007276 * igb_poll - NAPI Rx polling callback
7277 * @napi: napi polling structure
7278 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08007279 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07007280static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007281{
Alexander Duyck047e0032009-10-27 15:49:27 +00007282 struct igb_q_vector *q_vector = container_of(napi,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007283 struct igb_q_vector,
7284 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007285 bool clean_complete = true;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007286 int work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08007287
Jeff Kirsher421e02f2008-10-17 11:08:31 -07007288#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00007289 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7290 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07007291#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00007292 if (q_vector->tx.ring)
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007293 clean_complete = igb_clean_tx_irq(q_vector, budget);
Auke Kok9d5c8242008-01-24 02:22:38 -08007294
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007295 if (q_vector->rx.ring) {
7296 int cleaned = igb_clean_rx_irq(q_vector, budget);
7297
7298 work_done += cleaned;
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007299 if (cleaned >= budget)
7300 clean_complete = false;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007301 }
Alexander Duyck047e0032009-10-27 15:49:27 +00007302
Alexander Duyck16eb8812011-08-26 07:43:54 +00007303 /* If all work not completed, return budget and keep polling */
7304 if (!clean_complete)
7305 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08007306
Alexander Duyck46544252009-02-19 20:39:04 -08007307 /* If not enough Rx work done, exit the polling mode */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007308 napi_complete_done(napi, work_done);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007309 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08007310
Alexander Duyck16eb8812011-08-26 07:43:54 +00007311 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08007312}
Al Viro6d8126f2008-03-16 22:23:24 +00007313
Patrick Ohly33af6bc2009-02-12 05:03:43 +00007314/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007315 * igb_clean_tx_irq - Reclaim resources after transmit completes
7316 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007317 * @napi_budget: Used to determine if we are in netpoll
Ben Hutchings49ce9c22012-07-10 10:56:00 +00007318 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007319 * returns true if ring is completely cleaned
Auke Kok9d5c8242008-01-24 02:22:38 -08007320 **/
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007321static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007322{
Alexander Duyck047e0032009-10-27 15:49:27 +00007323 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00007324 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00007325 struct igb_tx_buffer *tx_buffer;
Alexander Duyckf4128782012-09-13 06:28:01 +00007326 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08007327 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00007328 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00007329 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08007330
Alexander Duyck13fde972011-10-05 13:35:24 +00007331 if (test_bit(__IGB_DOWN, &adapter->state))
7332 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08007333
Alexander Duyck06034642011-08-26 07:44:22 +00007334 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00007335 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00007336 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08007337
Alexander Duyckf4128782012-09-13 06:28:01 +00007338 do {
7339 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duyck8542db02011-08-26 07:44:43 +00007340
7341 /* if next_to_watch is not set then there is no work pending */
7342 if (!eop_desc)
7343 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00007344
Alexander Duyckf4128782012-09-13 06:28:01 +00007345 /* prevent any other reads prior to eop_desc */
Brian Kingc4cb9912017-11-17 11:05:47 -06007346 smp_rmb();
Alexander Duyckf4128782012-09-13 06:28:01 +00007347
Alexander Duyck13fde972011-10-05 13:35:24 +00007348 /* if DD is not set pending work has not been completed */
7349 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7350 break;
7351
Alexander Duyck8542db02011-08-26 07:44:43 +00007352 /* clear next_to_watch to prevent false hangs */
7353 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00007354
Alexander Duyckebe42d12011-08-26 07:45:09 +00007355 /* update the statistics for this packet */
7356 total_bytes += tx_buffer->bytecount;
7357 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00007358
Alexander Duyckebe42d12011-08-26 07:45:09 +00007359 /* free the skb */
Alexander Duyck7f0ba842016-03-07 09:30:21 -08007360 napi_consume_skb(tx_buffer->skb, napi_budget);
Alexander Duyckebe42d12011-08-26 07:45:09 +00007361
7362 /* unmap skb header data */
7363 dma_unmap_single(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007364 dma_unmap_addr(tx_buffer, dma),
7365 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00007366 DMA_TO_DEVICE);
7367
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007368 /* clear tx_buffer data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007369 dma_unmap_len_set(tx_buffer, len, 0);
7370
Alexander Duyckebe42d12011-08-26 07:45:09 +00007371 /* clear last DMA location and unmap remaining buffers */
7372 while (tx_desc != eop_desc) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007373 tx_buffer++;
7374 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08007375 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00007376 if (unlikely(!i)) {
7377 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00007378 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00007379 tx_desc = IGB_TX_DESC(tx_ring, 0);
7380 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00007381
7382 /* unmap any remaining paged data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007383 if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00007384 dma_unmap_page(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007385 dma_unmap_addr(tx_buffer, dma),
7386 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00007387 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00007388 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00007389 }
7390 }
7391
Alexander Duyckebe42d12011-08-26 07:45:09 +00007392 /* move us one more past the eop_desc for start of next pkt */
7393 tx_buffer++;
7394 tx_desc++;
7395 i++;
7396 if (unlikely(!i)) {
7397 i -= tx_ring->count;
7398 tx_buffer = tx_ring->tx_buffer_info;
7399 tx_desc = IGB_TX_DESC(tx_ring, 0);
7400 }
Alexander Duyckf4128782012-09-13 06:28:01 +00007401
7402 /* issue prefetch for next Tx descriptor */
7403 prefetch(tx_desc);
7404
7405 /* update budget accounting */
7406 budget--;
7407 } while (likely(budget));
Alexander Duyck0e014cb2008-12-26 01:33:18 -08007408
Eric Dumazetbdbc0632012-01-04 20:23:36 +00007409 netdev_tx_completed_queue(txring_txq(tx_ring),
7410 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00007411 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08007412 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00007413 u64_stats_update_begin(&tx_ring->tx_syncp);
7414 tx_ring->tx_stats.bytes += total_bytes;
7415 tx_ring->tx_stats.packets += total_packets;
7416 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00007417 q_vector->tx.total_bytes += total_bytes;
7418 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08007419
Alexander Duyck6d095fa2011-08-26 07:46:19 +00007420 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007421 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00007422
Auke Kok9d5c8242008-01-24 02:22:38 -08007423 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007424 * check with the clearing of time_stamp and movement of i
7425 */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00007426 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckf4128782012-09-13 06:28:01 +00007427 if (tx_buffer->next_to_watch &&
Alexander Duyck8542db02011-08-26 07:44:43 +00007428 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00007429 (adapter->tx_timeout_factor * HZ)) &&
7430 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007431
Auke Kok9d5c8242008-01-24 02:22:38 -08007432 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00007433 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08007434 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07007435 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007436 " TDH <%x>\n"
7437 " TDT <%x>\n"
7438 " next_to_use <%x>\n"
7439 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007440 "buffer_info[next_to_clean]\n"
7441 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00007442 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08007443 " jiffies <%lx>\n"
7444 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07007445 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00007446 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00007447 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08007448 tx_ring->next_to_use,
7449 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00007450 tx_buffer->time_stamp,
Alexander Duyckf4128782012-09-13 06:28:01 +00007451 tx_buffer->next_to_watch,
Auke Kok9d5c8242008-01-24 02:22:38 -08007452 jiffies,
Alexander Duyckf4128782012-09-13 06:28:01 +00007453 tx_buffer->next_to_watch->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00007454 netif_stop_subqueue(tx_ring->netdev,
7455 tx_ring->queue_index);
7456
7457 /* we are about to reset, no point in enabling stuff */
7458 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08007459 }
7460 }
Alexander Duyck13fde972011-10-05 13:35:24 +00007461
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00007462#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyck13fde972011-10-05 13:35:24 +00007463 if (unlikely(total_packets &&
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007464 netif_carrier_ok(tx_ring->netdev) &&
7465 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00007466 /* Make sure that anybody stopping the queue after this
7467 * sees the new next_to_clean.
7468 */
7469 smp_mb();
7470 if (__netif_subqueue_stopped(tx_ring->netdev,
7471 tx_ring->queue_index) &&
7472 !(test_bit(__IGB_DOWN, &adapter->state))) {
7473 netif_wake_subqueue(tx_ring->netdev,
7474 tx_ring->queue_index);
7475
7476 u64_stats_update_begin(&tx_ring->tx_syncp);
7477 tx_ring->tx_stats.restart_queue++;
7478 u64_stats_update_end(&tx_ring->tx_syncp);
7479 }
7480 }
7481
7482 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08007483}
7484
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007485/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007486 * igb_reuse_rx_page - page flip buffer and store it back on the ring
7487 * @rx_ring: rx descriptor ring to store buffers on
7488 * @old_buff: donor buffer to have page reused
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007489 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007490 * Synchronizes page for reuse by the adapter
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007491 **/
7492static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7493 struct igb_rx_buffer *old_buff)
7494{
7495 struct igb_rx_buffer *new_buff;
7496 u16 nta = rx_ring->next_to_alloc;
7497
7498 new_buff = &rx_ring->rx_buffer_info[nta];
7499
7500 /* update, and store next to alloc */
7501 nta++;
7502 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7503
Alexander Duycke0142722017-02-06 18:27:26 -08007504 /* Transfer page from old buffer to new buffer.
7505 * Move each member individually to avoid possible store
7506 * forwarding stalls.
7507 */
7508 new_buff->dma = old_buff->dma;
7509 new_buff->page = old_buff->page;
7510 new_buff->page_offset = old_buff->page_offset;
7511 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007512}
7513
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007514static inline bool igb_page_is_reserved(struct page *page)
7515{
Michal Hocko2f064f32015-08-21 14:11:51 -07007516 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007517}
7518
Alexander Duycke0142722017-02-06 18:27:26 -08007519static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007520{
Alexander Duycke0142722017-02-06 18:27:26 -08007521 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7522 struct page *page = rx_buffer->page;
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007523
Alexander Duyck74e238e2013-02-02 05:07:11 +00007524 /* avoid re-using remote pages */
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007525 if (unlikely(igb_page_is_reserved(page)))
Roman Gushchinbc16e472014-10-23 03:32:27 +00007526 return false;
7527
Alexander Duyck74e238e2013-02-02 05:07:11 +00007528#if (PAGE_SIZE < 8192)
7529 /* if we are only owner of page we can reuse it */
Alexander Duycke0142722017-02-06 18:27:26 -08007530 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
Alexander Duyck74e238e2013-02-02 05:07:11 +00007531 return false;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007532#else
Alexander Duyck8649aae2017-02-06 18:27:03 -08007533#define IGB_LAST_OFFSET \
7534 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007535
Alexander Duyck8649aae2017-02-06 18:27:03 -08007536 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
Alexander Duyck74e238e2013-02-02 05:07:11 +00007537 return false;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007538#endif
7539
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007540 /* If we have drained the page fragment pool we need to update
7541 * the pagecnt_bias and page count so that we fully restock the
7542 * number of references the driver holds.
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007543 */
Alexander Duycke0142722017-02-06 18:27:26 -08007544 if (unlikely(!pagecnt_bias)) {
Alexander Duyckbd4171a2016-12-14 15:05:34 -08007545 page_ref_add(page, USHRT_MAX);
7546 rx_buffer->pagecnt_bias = USHRT_MAX;
7547 }
Alexander Duyck95dd44b2014-11-14 00:56:19 +00007548
Alexander Duyck74e238e2013-02-02 05:07:11 +00007549 return true;
7550}
7551
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007552/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007553 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
7554 * @rx_ring: rx descriptor ring to transact packets on
7555 * @rx_buffer: buffer containing page to add
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007556 * @skb: sk_buff to place the data into
Alexander Duycke0142722017-02-06 18:27:26 -08007557 * @size: size of buffer to be added
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007558 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007559 * This function will add the data contained in rx_buffer->page to the skb.
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007560 **/
Alexander Duycke0142722017-02-06 18:27:26 -08007561static void igb_add_rx_frag(struct igb_ring *rx_ring,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007562 struct igb_rx_buffer *rx_buffer,
Alexander Duycke0142722017-02-06 18:27:26 -08007563 struct sk_buff *skb,
7564 unsigned int size)
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007565{
Alexander Duyck74e238e2013-02-02 05:07:11 +00007566#if (PAGE_SIZE < 8192)
Alexander Duyck8649aae2017-02-06 18:27:03 -08007567 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
Alexander Duyck74e238e2013-02-02 05:07:11 +00007568#else
Alexander Duycke3cdf682017-02-06 18:27:14 -08007569 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7570 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7571 SKB_DATA_ALIGN(size);
Alexander Duyck74e238e2013-02-02 05:07:11 +00007572#endif
Alexander Duycke0142722017-02-06 18:27:26 -08007573 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7574 rx_buffer->page_offset, size, truesize);
7575#if (PAGE_SIZE < 8192)
7576 rx_buffer->page_offset ^= truesize;
7577#else
7578 rx_buffer->page_offset += truesize;
7579#endif
7580}
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007581
Alexander Duycke0142722017-02-06 18:27:26 -08007582static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
7583 struct igb_rx_buffer *rx_buffer,
7584 union e1000_adv_rx_desc *rx_desc,
7585 unsigned int size)
7586{
7587 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7588#if (PAGE_SIZE < 8192)
7589 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7590#else
7591 unsigned int truesize = SKB_DATA_ALIGN(size);
7592#endif
7593 unsigned int headlen;
7594 struct sk_buff *skb;
7595
7596 /* prefetch first cache line of first page */
7597 prefetch(va);
7598#if L1_CACHE_BYTES < 128
7599 prefetch(va + L1_CACHE_BYTES);
7600#endif
7601
7602 /* allocate a skb to store the frags */
7603 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
7604 if (unlikely(!skb))
7605 return NULL;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007606
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007607 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
7608 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
7609 va += IGB_TS_HDR_LEN;
7610 size -= IGB_TS_HDR_LEN;
7611 }
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007612
Alexander Duycke0142722017-02-06 18:27:26 -08007613 /* Determine available headroom for copy */
7614 headlen = size;
7615 if (headlen > IGB_RX_HDR_LEN)
7616 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007617
7618 /* align pull length to size of long to optimize memcpy performance */
Alexander Duycke0142722017-02-06 18:27:26 -08007619 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
Alexander Duyckf56e7bb2015-04-22 21:49:17 -07007620
7621 /* update all of the pointers */
Alexander Duycke0142722017-02-06 18:27:26 -08007622 size -= headlen;
7623 if (size) {
7624 skb_add_rx_frag(skb, 0, rx_buffer->page,
7625 (va + headlen) - page_address(rx_buffer->page),
7626 size, truesize);
7627#if (PAGE_SIZE < 8192)
7628 rx_buffer->page_offset ^= truesize;
7629#else
7630 rx_buffer->page_offset += truesize;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007631#endif
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007632 } else {
Alexander Duycke0142722017-02-06 18:27:26 -08007633 rx_buffer->pagecnt_bias++;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007634 }
7635
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007636 return skb;
7637}
7638
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007639static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
7640 struct igb_rx_buffer *rx_buffer,
7641 union e1000_adv_rx_desc *rx_desc,
7642 unsigned int size)
7643{
7644 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7645#if (PAGE_SIZE < 8192)
7646 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7647#else
7648 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
7649 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
7650#endif
7651 struct sk_buff *skb;
7652
7653 /* prefetch first cache line of first page */
7654 prefetch(va);
7655#if L1_CACHE_BYTES < 128
7656 prefetch(va + L1_CACHE_BYTES);
7657#endif
7658
Alexander Duyck3a1eb6d2017-02-15 09:15:59 -08007659 /* build an skb around the page buffer */
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007660 skb = build_skb(va - IGB_SKB_PAD, truesize);
7661 if (unlikely(!skb))
7662 return NULL;
7663
7664 /* update pointers within the skb to store the data */
7665 skb_reserve(skb, IGB_SKB_PAD);
7666 __skb_put(skb, size);
7667
7668 /* pull timestamp out of packet data */
7669 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
7670 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
7671 __skb_pull(skb, IGB_TS_HDR_LEN);
7672 }
7673
7674 /* update buffer offset */
7675#if (PAGE_SIZE < 8192)
7676 rx_buffer->page_offset ^= truesize;
7677#else
7678 rx_buffer->page_offset += truesize;
7679#endif
7680
7681 return skb;
7682}
7683
Alexander Duyckcd392f52011-08-26 07:43:59 +00007684static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007685 union e1000_adv_rx_desc *rx_desc,
7686 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08007687{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07007688 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08007689
Alexander Duyck294e7d72011-08-26 07:45:57 +00007690 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007691 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00007692 return;
7693
7694 /* Rx checksum disabled via ethtool */
7695 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08007696 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00007697
Auke Kok9d5c8242008-01-24 02:22:38 -08007698 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007699 if (igb_test_staterr(rx_desc,
7700 E1000_RXDEXT_STATERR_TCPE |
7701 E1000_RXDEXT_STATERR_IPE)) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007702 /* work around errata with sctp packets where the TCPE aka
Jesse Brandeburgb9473562009-04-27 22:36:13 +00007703 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
7704 * packets, (aka let the stack check the crc32c)
7705 */
Alexander Duyck866cff02011-08-26 07:45:36 +00007706 if (!((skb->len == 60) &&
7707 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00007708 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00007709 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00007710 u64_stats_update_end(&ring->rx_syncp);
7711 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007712 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08007713 return;
7714 }
7715 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007716 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
7717 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08007718 skb->ip_summed = CHECKSUM_UNNECESSARY;
7719
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007720 dev_dbg(ring->dev, "cksum success: bits %08X\n",
7721 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08007722}
7723
Alexander Duyck077887c2011-08-26 07:46:29 +00007724static inline void igb_rx_hash(struct igb_ring *ring,
7725 union e1000_adv_rx_desc *rx_desc,
7726 struct sk_buff *skb)
7727{
7728 if (ring->netdev->features & NETIF_F_RXHASH)
Tom Herbert42bdf082013-12-18 16:46:58 +00007729 skb_set_hash(skb,
7730 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
7731 PKT_HASH_TYPE_L3);
Alexander Duyck077887c2011-08-26 07:46:29 +00007732}
7733
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007734/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007735 * igb_is_non_eop - process handling of non-EOP buffers
7736 * @rx_ring: Rx ring being processed
7737 * @rx_desc: Rx descriptor for current buffer
7738 * @skb: current socket buffer containing buffer in progress
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007739 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007740 * This function updates next to clean. If the buffer is an EOP buffer
7741 * this function exits returning false, otherwise it will place the
7742 * sk_buff in the next buffer to be chained and return true indicating
7743 * that this is in fact a non-EOP buffer.
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007744 **/
7745static bool igb_is_non_eop(struct igb_ring *rx_ring,
7746 union e1000_adv_rx_desc *rx_desc)
7747{
7748 u32 ntc = rx_ring->next_to_clean + 1;
7749
7750 /* fetch, update, and store next to clean */
7751 ntc = (ntc < rx_ring->count) ? ntc : 0;
7752 rx_ring->next_to_clean = ntc;
7753
7754 prefetch(IGB_RX_DESC(rx_ring, ntc));
7755
7756 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
7757 return false;
7758
7759 return true;
7760}
7761
7762/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007763 * igb_cleanup_headers - Correct corrupted or empty headers
7764 * @rx_ring: rx descriptor ring packet is being transacted on
7765 * @rx_desc: pointer to the EOP Rx descriptor
7766 * @skb: pointer to current skb being fixed
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007767 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007768 * Address the case where we are pulling data in on pages only
7769 * and as such no data is present in the skb header.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007770 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007771 * In addition if skb is not at least 60 bytes we need to pad it so that
7772 * it is large enough to qualify as a valid Ethernet frame.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007773 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007774 * Returns true if an error was encountered and skb was freed.
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007775 **/
7776static bool igb_cleanup_headers(struct igb_ring *rx_ring,
7777 union e1000_adv_rx_desc *rx_desc,
7778 struct sk_buff *skb)
7779{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007780 if (unlikely((igb_test_staterr(rx_desc,
7781 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
7782 struct net_device *netdev = rx_ring->netdev;
7783 if (!(netdev->features & NETIF_F_RXALL)) {
7784 dev_kfree_skb_any(skb);
7785 return true;
7786 }
7787 }
7788
Alexander Duycka94d9e22014-12-03 08:17:39 -08007789 /* if eth_skb_pad returns an error the skb was freed */
7790 if (eth_skb_pad(skb))
7791 return true;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007792
7793 return false;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00007794}
7795
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007796/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007797 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
7798 * @rx_ring: rx descriptor ring packet is being transacted on
7799 * @rx_desc: pointer to the EOP Rx descriptor
7800 * @skb: pointer to current skb being populated
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007801 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007802 * This function checks the ring, descriptor, and packet information in
7803 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
7804 * other fields within the skb.
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007805 **/
7806static void igb_process_skb_fields(struct igb_ring *rx_ring,
7807 union e1000_adv_rx_desc *rx_desc,
7808 struct sk_buff *skb)
7809{
7810 struct net_device *dev = rx_ring->netdev;
7811
7812 igb_rx_hash(rx_ring, rx_desc, skb);
7813
7814 igb_rx_checksum(rx_ring, rx_desc, skb);
7815
Jakub Kicinski5499a962014-04-02 10:33:33 +00007816 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
7817 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
7818 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007819
Patrick McHardyf6469682013-04-19 02:04:27 +00007820 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007821 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
7822 u16 vid;
Carolyn Wyborny9005df32014-04-11 01:45:34 +00007823
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007824 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
7825 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
7826 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
7827 else
7828 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
7829
Patrick McHardy86a9bad2013-04-19 02:04:30 +00007830 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007831 }
7832
7833 skb_record_rx_queue(skb, rx_ring->queue_index);
7834
7835 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
7836}
7837
Alexander Duycke0142722017-02-06 18:27:26 -08007838static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
7839 const unsigned int size)
7840{
7841 struct igb_rx_buffer *rx_buffer;
7842
7843 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
7844 prefetchw(rx_buffer->page);
7845
7846 /* we are reusing so sync this buffer for CPU use */
7847 dma_sync_single_range_for_cpu(rx_ring->dev,
7848 rx_buffer->dma,
7849 rx_buffer->page_offset,
7850 size,
7851 DMA_FROM_DEVICE);
7852
7853 rx_buffer->pagecnt_bias--;
7854
7855 return rx_buffer;
7856}
7857
7858static void igb_put_rx_buffer(struct igb_ring *rx_ring,
7859 struct igb_rx_buffer *rx_buffer)
7860{
7861 if (igb_can_reuse_rx_page(rx_buffer)) {
7862 /* hand second half of page back to the ring */
7863 igb_reuse_rx_page(rx_ring, rx_buffer);
7864 } else {
7865 /* We are not reusing the buffer so unmap it and free
7866 * any references we are holding to it
7867 */
7868 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
7869 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
7870 IGB_RX_DMA_ATTR);
7871 __page_frag_cache_drain(rx_buffer->page,
7872 rx_buffer->pagecnt_bias);
7873 }
7874
7875 /* clear contents of rx_buffer */
7876 rx_buffer->page = NULL;
7877}
7878
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007879static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08007880{
Alexander Duyck0ba82992011-08-26 07:45:47 +00007881 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007882 struct sk_buff *skb = rx_ring->skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08007883 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00007884 u16 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08007885
Eric W. Biederman57ba34c2014-03-14 18:00:06 -07007886 while (likely(total_packets < budget)) {
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007887 union e1000_adv_rx_desc *rx_desc;
Alexander Duycke0142722017-02-06 18:27:26 -08007888 struct igb_rx_buffer *rx_buffer;
7889 unsigned int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08007890
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007891 /* return some buffers to hardware, one at a time is too slow */
7892 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
7893 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7894 cleaned_count = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007895 }
7896
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007897 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
Alexander Duycke0142722017-02-06 18:27:26 -08007898 size = le16_to_cpu(rx_desc->wb.upper.length);
7899 if (!size)
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007900 break;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007901
Alexander Duyck74e238e2013-02-02 05:07:11 +00007902 /* This memory barrier is needed to keep us from reading
7903 * any other fields out of the rx_desc until we know the
Alexander Duyck124b74c2014-12-11 15:02:28 -08007904 * descriptor has been written back
Alexander Duyck74e238e2013-02-02 05:07:11 +00007905 */
Alexander Duyck124b74c2014-12-11 15:02:28 -08007906 dma_rmb();
Alexander Duyck74e238e2013-02-02 05:07:11 +00007907
Alexander Duycke0142722017-02-06 18:27:26 -08007908 rx_buffer = igb_get_rx_buffer(rx_ring, size);
7909
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007910 /* retrieve a buffer from the ring */
Alexander Duycke0142722017-02-06 18:27:26 -08007911 if (skb)
7912 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
Alexander Duyckb1bb2eb2017-02-06 18:27:36 -08007913 else if (ring_uses_build_skb(rx_ring))
7914 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
Alexander Duycke0142722017-02-06 18:27:26 -08007915 else
7916 skb = igb_construct_skb(rx_ring, rx_buffer,
7917 rx_desc, size);
Alexander Duyck16eb8812011-08-26 07:43:54 +00007918
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007919 /* exit if we failed to retrieve a buffer */
Alexander Duycke0142722017-02-06 18:27:26 -08007920 if (!skb) {
7921 rx_ring->rx_stats.alloc_failed++;
7922 rx_buffer->pagecnt_bias++;
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007923 break;
Alexander Duycke0142722017-02-06 18:27:26 -08007924 }
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007925
Alexander Duycke0142722017-02-06 18:27:26 -08007926 igb_put_rx_buffer(rx_ring, rx_buffer);
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007927 cleaned_count++;
7928
7929 /* fetch next buffer in frame if non-eop */
7930 if (igb_is_non_eop(rx_ring, rx_desc))
7931 continue;
Alexander Duyck44390ca2011-08-26 07:43:38 +00007932
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007933 /* verify the packet layout is correct */
7934 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
7935 skb = NULL;
7936 continue;
Auke Kok9d5c8242008-01-24 02:22:38 -08007937 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007938
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007939 /* probably a little skewed due to removing CRC */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007940 total_bytes += skb->len;
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007941
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00007942 /* populate checksum, timestamp, VLAN, and protocol */
7943 igb_process_skb_fields(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00007944
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00007945 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08007946
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007947 /* reset skb pointer */
7948 skb = NULL;
7949
Alexander Duyck2e334ee2012-09-25 00:31:07 +00007950 /* update budget accounting */
7951 total_packets++;
Eric W. Biederman57ba34c2014-03-14 18:00:06 -07007952 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07007953
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007954 /* place incomplete frames back on ring for completion */
7955 rx_ring->skb = skb;
7956
Eric Dumazet12dcd862010-10-15 17:27:10 +00007957 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08007958 rx_ring->rx_stats.packets += total_packets;
7959 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00007960 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00007961 q_vector->rx.total_packets += total_packets;
7962 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007963
7964 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00007965 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00007966
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07007967 return total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08007968}
7969
Alexander Duycke3cdf682017-02-06 18:27:14 -08007970static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
7971{
7972 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
7973}
7974
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007975static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7976 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00007977{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00007978 struct page *page = bi->page;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007979 dma_addr_t dma;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007980
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007981 /* since we are recycling buffers we should seldom need to alloc */
7982 if (likely(page))
Alexander Duyckc023cd82011-08-26 07:43:43 +00007983 return true;
7984
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007985 /* alloc new page for storage */
Alexander Duyck8649aae2017-02-06 18:27:03 -08007986 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007987 if (unlikely(!page)) {
7988 rx_ring->rx_stats.alloc_failed++;
7989 return false;
Alexander Duyckc023cd82011-08-26 07:43:43 +00007990 }
7991
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007992 /* map page for use */
Alexander Duyck8649aae2017-02-06 18:27:03 -08007993 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
7994 igb_rx_pg_size(rx_ring),
7995 DMA_FROM_DEVICE,
7996 IGB_RX_DMA_ATTR);
Alexander Duyckc023cd82011-08-26 07:43:43 +00007997
Jeff Kirsherb980ac12013-02-23 07:29:56 +00007998 /* if mapping failed free memory back to system since
Alexander Duyckcbc8e552012-09-25 00:31:02 +00007999 * there isn't much point in holding memory we can't use
8000 */
Alexander Duyckc023cd82011-08-26 07:43:43 +00008001 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyck8649aae2017-02-06 18:27:03 -08008002 __free_pages(page, igb_rx_pg_order(rx_ring));
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008003
Alexander Duyckc023cd82011-08-26 07:43:43 +00008004 rx_ring->rx_stats.alloc_failed++;
8005 return false;
8006 }
8007
8008 bi->dma = dma;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008009 bi->page = page;
Alexander Duycke3cdf682017-02-06 18:27:14 -08008010 bi->page_offset = igb_rx_offset(rx_ring);
Alexander Duyckbd4171a2016-12-14 15:05:34 -08008011 bi->pagecnt_bias = 1;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00008012
Alexander Duyckc023cd82011-08-26 07:43:43 +00008013 return true;
8014}
8015
Auke Kok9d5c8242008-01-24 02:22:38 -08008016/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008017 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
8018 * @adapter: address of board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08008019 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00008020void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08008021{
Auke Kok9d5c8242008-01-24 02:22:38 -08008022 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00008023 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008024 u16 i = rx_ring->next_to_use;
Alexander Duyck8649aae2017-02-06 18:27:03 -08008025 u16 bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08008026
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008027 /* nothing to do */
8028 if (!cleaned_count)
8029 return;
8030
Alexander Duyck601369062011-08-26 07:44:05 +00008031 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00008032 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00008033 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08008034
Alexander Duyck8649aae2017-02-06 18:27:03 -08008035 bufsz = igb_rx_bufsz(rx_ring);
8036
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008037 do {
Alexander Duyck1a1c2252012-09-25 00:30:52 +00008038 if (!igb_alloc_mapped_page(rx_ring, bi))
Alexander Duyckc023cd82011-08-26 07:43:43 +00008039 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08008040
Alexander Duyck5be59552016-12-14 15:05:30 -08008041 /* sync the buffer for use by the device */
8042 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
Alexander Duyck8649aae2017-02-06 18:27:03 -08008043 bi->page_offset, bufsz,
Alexander Duyck5be59552016-12-14 15:05:30 -08008044 DMA_FROM_DEVICE);
8045
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008046 /* Refresh the desc even if buffer_addrs didn't change
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008047 * because each write-back erases this info.
8048 */
Alexander Duyckf9d40f62013-04-17 20:41:04 +00008049 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Auke Kok9d5c8242008-01-24 02:22:38 -08008050
Alexander Duyckc023cd82011-08-26 07:43:43 +00008051 rx_desc++;
8052 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08008053 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008054 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00008055 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00008056 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00008057 i -= rx_ring->count;
8058 }
8059
Alexander Duyck7ec01162017-02-06 18:25:41 -08008060 /* clear the length for the next_to_use descriptor */
8061 rx_desc->wb.upper.length = 0;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008062
8063 cleaned_count--;
8064 } while (cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08008065
Alexander Duyckc023cd82011-08-26 07:43:43 +00008066 i += rx_ring->count;
8067
Auke Kok9d5c8242008-01-24 02:22:38 -08008068 if (rx_ring->next_to_use != i) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008069 /* record the next descriptor to use */
Auke Kok9d5c8242008-01-24 02:22:38 -08008070 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08008071
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008072 /* update next to alloc since we have filled the ring */
8073 rx_ring->next_to_alloc = i;
8074
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008075 /* Force memory writes to complete before letting h/w
Auke Kok9d5c8242008-01-24 02:22:38 -08008076 * know there are new descriptors to fetch. (Only
8077 * applicable for weak-ordered memory model archs,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00008078 * such as IA-64).
8079 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008080 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00008081 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08008082 }
8083}
8084
8085/**
8086 * igb_mii_ioctl -
8087 * @netdev:
8088 * @ifreq:
8089 * @cmd:
8090 **/
8091static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8092{
8093 struct igb_adapter *adapter = netdev_priv(netdev);
8094 struct mii_ioctl_data *data = if_mii(ifr);
8095
8096 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8097 return -EOPNOTSUPP;
8098
8099 switch (cmd) {
8100 case SIOCGMIIPHY:
8101 data->phy_id = adapter->hw.phy.addr;
8102 break;
8103 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08008104 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
Carolyn Wyborny9005df32014-04-11 01:45:34 +00008105 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08008106 return -EIO;
8107 break;
8108 case SIOCSMIIREG:
8109 default:
8110 return -EOPNOTSUPP;
8111 }
8112 return 0;
8113}
8114
8115/**
8116 * igb_ioctl -
8117 * @netdev:
8118 * @ifreq:
8119 * @cmd:
8120 **/
8121static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8122{
8123 switch (cmd) {
8124 case SIOCGMIIPHY:
8125 case SIOCGMIIREG:
8126 case SIOCSMIIREG:
8127 return igb_mii_ioctl(netdev, ifr, cmd);
Jacob Keller6ab5f7b2014-01-11 07:20:06 +00008128 case SIOCGHWTSTAMP:
8129 return igb_ptp_get_ts_config(netdev, ifr);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00008130 case SIOCSHWTSTAMP:
Jacob Keller6ab5f7b2014-01-11 07:20:06 +00008131 return igb_ptp_set_ts_config(netdev, ifr);
Auke Kok9d5c8242008-01-24 02:22:38 -08008132 default:
8133 return -EOPNOTSUPP;
8134 }
8135}
8136
Todd Fujinaka94826482014-07-10 01:47:15 -07008137void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8138{
8139 struct igb_adapter *adapter = hw->back;
8140
8141 pci_read_config_word(adapter->pdev, reg, value);
8142}
8143
8144void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8145{
8146 struct igb_adapter *adapter = hw->back;
8147
8148 pci_write_config_word(adapter->pdev, reg, *value);
8149}
8150
Alexander Duyck009bc062009-07-23 18:08:35 +00008151s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8152{
8153 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00008154
Jiang Liu23d028c2012-08-20 13:32:20 -06008155 if (pcie_capability_read_word(adapter->pdev, reg, value))
Alexander Duyck009bc062009-07-23 18:08:35 +00008156 return -E1000_ERR_CONFIG;
8157
Alexander Duyck009bc062009-07-23 18:08:35 +00008158 return 0;
8159}
8160
8161s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8162{
8163 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00008164
Jiang Liu23d028c2012-08-20 13:32:20 -06008165 if (pcie_capability_write_word(adapter->pdev, reg, *value))
Alexander Duyck009bc062009-07-23 18:08:35 +00008166 return -E1000_ERR_CONFIG;
8167
Alexander Duyck009bc062009-07-23 18:08:35 +00008168 return 0;
8169}
8170
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008171static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08008172{
8173 struct igb_adapter *adapter = netdev_priv(netdev);
8174 struct e1000_hw *hw = &adapter->hw;
8175 u32 ctrl, rctl;
Patrick McHardyf6469682013-04-19 02:04:27 +00008176 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08008177
Alexander Duyck5faf0302011-08-26 07:46:08 +00008178 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008179 /* enable VLAN tag insert/strip */
8180 ctrl = rd32(E1000_CTRL);
8181 ctrl |= E1000_CTRL_VME;
8182 wr32(E1000_CTRL, ctrl);
8183
Alexander Duyck51466232009-10-27 23:47:35 +00008184 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08008185 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08008186 rctl &= ~E1000_RCTL_CFIEN;
8187 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08008188 } else {
8189 /* disable VLAN tag insert/strip */
8190 ctrl = rd32(E1000_CTRL);
8191 ctrl &= ~E1000_CTRL_VME;
8192 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08008193 }
8194
Corinna Vinschen030f9f52016-01-28 13:53:23 +01008195 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
Auke Kok9d5c8242008-01-24 02:22:38 -08008196}
8197
Patrick McHardy80d5c362013-04-19 02:04:28 +00008198static int igb_vlan_rx_add_vid(struct net_device *netdev,
8199 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08008200{
8201 struct igb_adapter *adapter = netdev_priv(netdev);
8202 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008203 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08008204
Alexander Duyck51466232009-10-27 23:47:35 +00008205 /* add the filter since PF can receive vlans w/o entry in vlvf */
Alexander Duyck16903ca2016-01-06 23:11:18 -08008206 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8207 igb_vfta_set(hw, vid, pf_id, true, !!vid);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00008208
8209 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05008210
8211 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08008212}
8213
Patrick McHardy80d5c362013-04-19 02:04:28 +00008214static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8215 __be16 proto, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08008216{
8217 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008218 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08008219 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08008220
Alexander Duyck8b77c6b2016-01-06 23:11:04 -08008221 /* remove VID from filter table */
Alexander Duyck16903ca2016-01-06 23:11:18 -08008222 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8223 igb_vfta_set(hw, vid, pf_id, false, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00008224
8225 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05008226
8227 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08008228}
8229
8230static void igb_restore_vlan(struct igb_adapter *adapter)
8231{
Alexander Duyck5982a552016-01-06 23:10:54 -08008232 u16 vid = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08008233
Alexander Duyck5faf0302011-08-26 07:46:08 +00008234 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
Alexander Duyck5982a552016-01-06 23:10:54 -08008235 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
Alexander Duyck5faf0302011-08-26 07:46:08 +00008236
Alexander Duyck5982a552016-01-06 23:10:54 -08008237 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00008238 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08008239}
8240
David Decotigny14ad2512011-04-27 18:32:43 +00008241int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08008242{
Alexander Duyck090b1792009-10-27 23:51:55 +00008243 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08008244 struct e1000_mac_info *mac = &adapter->hw.mac;
8245
8246 mac->autoneg = 0;
8247
David Decotigny14ad2512011-04-27 18:32:43 +00008248 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008249 * for the switch() below to work
8250 */
David Decotigny14ad2512011-04-27 18:32:43 +00008251 if ((spd & 1) || (dplx & ~1))
8252 goto err_inval;
8253
Akeem G. Abodunrinf502ef72013-04-05 16:49:06 +00008254 /* Fiber NIC's only allow 1000 gbps Full duplex
8255 * and 100Mbps Full duplex for 100baseFx sfp
8256 */
8257 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8258 switch (spd + dplx) {
8259 case SPEED_10 + DUPLEX_HALF:
8260 case SPEED_10 + DUPLEX_FULL:
8261 case SPEED_100 + DUPLEX_HALF:
8262 goto err_inval;
8263 default:
8264 break;
8265 }
8266 }
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00008267
David Decotigny14ad2512011-04-27 18:32:43 +00008268 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008269 case SPEED_10 + DUPLEX_HALF:
8270 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8271 break;
8272 case SPEED_10 + DUPLEX_FULL:
8273 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8274 break;
8275 case SPEED_100 + DUPLEX_HALF:
8276 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8277 break;
8278 case SPEED_100 + DUPLEX_FULL:
8279 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8280 break;
8281 case SPEED_1000 + DUPLEX_FULL:
8282 mac->autoneg = 1;
8283 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8284 break;
8285 case SPEED_1000 + DUPLEX_HALF: /* not supported */
8286 default:
David Decotigny14ad2512011-04-27 18:32:43 +00008287 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08008288 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00008289
8290 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
8291 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8292
Auke Kok9d5c8242008-01-24 02:22:38 -08008293 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00008294
8295err_inval:
8296 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8297 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08008298}
8299
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008300static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8301 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08008302{
8303 struct net_device *netdev = pci_get_drvdata(pdev);
8304 struct igb_adapter *adapter = netdev_priv(netdev);
8305 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07008306 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008307 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08008308#ifdef CONFIG_PM
8309 int retval = 0;
8310#endif
8311
Todd Fujinaka94749332016-11-15 08:54:26 -08008312 rtnl_lock();
Auke Kok9d5c8242008-01-24 02:22:38 -08008313 netif_device_detach(netdev);
8314
Alexander Duycka88f10e2008-07-08 15:13:38 -07008315 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008316 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07008317
Jacob Keller8646f7b2016-05-24 13:56:31 -07008318 igb_ptp_suspend(adapter);
8319
Alexander Duyck047e0032009-10-27 15:49:27 +00008320 igb_clear_interrupt_scheme(adapter);
Todd Fujinaka94749332016-11-15 08:54:26 -08008321 rtnl_unlock();
Auke Kok9d5c8242008-01-24 02:22:38 -08008322
8323#ifdef CONFIG_PM
8324 retval = pci_save_state(pdev);
8325 if (retval)
8326 return retval;
8327#endif
8328
8329 status = rd32(E1000_STATUS);
8330 if (status & E1000_STATUS_LU)
8331 wufc &= ~E1000_WUFC_LNKC;
8332
8333 if (wufc) {
8334 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008335 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008336
8337 /* turn on all-multi mode if wake on multicast is enabled */
8338 if (wufc & E1000_WUFC_MC) {
8339 rctl = rd32(E1000_RCTL);
8340 rctl |= E1000_RCTL_MPE;
8341 wr32(E1000_RCTL, rctl);
8342 }
8343
8344 ctrl = rd32(E1000_CTRL);
8345 /* advertise wake from D3Cold */
8346 #define E1000_CTRL_ADVD3WUC 0x00100000
8347 /* phy power management enable */
8348 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8349 ctrl |= E1000_CTRL_ADVD3WUC;
8350 wr32(E1000_CTRL, ctrl);
8351
Auke Kok9d5c8242008-01-24 02:22:38 -08008352 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00008353 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08008354
8355 wr32(E1000_WUC, E1000_WUC_PME_EN);
8356 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08008357 } else {
8358 wr32(E1000_WUC, 0);
8359 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08008360 }
8361
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008362 *enable_wake = wufc || adapter->en_mng_pt;
8363 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00008364 igb_power_down_link(adapter);
8365 else
8366 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08008367
8368 /* Release control of h/w to f/w. If f/w is AMT enabled, this
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008369 * would have already happened in close and is redundant.
8370 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008371 igb_release_hw_control(adapter);
8372
8373 pci_disable_device(pdev);
8374
Auke Kok9d5c8242008-01-24 02:22:38 -08008375 return 0;
8376}
8377
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008378static void igb_deliver_wake_packet(struct net_device *netdev)
8379{
8380 struct igb_adapter *adapter = netdev_priv(netdev);
8381 struct e1000_hw *hw = &adapter->hw;
8382 struct sk_buff *skb;
8383 u32 wupl;
8384
8385 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8386
8387 /* WUPM stores only the first 128 bytes of the wake packet.
8388 * Read the packet only if we have the whole thing.
8389 */
8390 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8391 return;
8392
8393 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8394 if (!skb)
8395 return;
8396
8397 skb_put(skb, wupl);
8398
8399 /* Ensure reads are 32-bit aligned */
8400 wupl = roundup(wupl, 4);
8401
8402 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8403
8404 skb->protocol = eth_type_trans(skb, netdev);
8405 netif_rx(skb);
8406}
8407
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008408static int __maybe_unused igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008409{
8410 int retval;
8411 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008412 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008413
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008414 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008415 if (retval)
8416 return retval;
8417
8418 if (wake) {
8419 pci_prepare_to_sleep(pdev);
8420 } else {
8421 pci_wake_from_d3(pdev, false);
8422 pci_set_power_state(pdev, PCI_D3hot);
8423 }
8424
8425 return 0;
8426}
8427
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008428static int __maybe_unused igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08008429{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008430 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008431 struct net_device *netdev = pci_get_drvdata(pdev);
8432 struct igb_adapter *adapter = netdev_priv(netdev);
8433 struct e1000_hw *hw = &adapter->hw;
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008434 u32 err, val;
Auke Kok9d5c8242008-01-24 02:22:38 -08008435
8436 pci_set_power_state(pdev, PCI_D0);
8437 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00008438 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09008439
Carolyn Wyborny17a402a2014-11-21 23:52:54 -08008440 if (!pci_device_is_present(pdev))
8441 return -ENODEV;
Alexander Duyckaed5dec2009-02-06 23:16:04 +00008442 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08008443 if (err) {
8444 dev_err(&pdev->dev,
8445 "igb: Cannot enable PCI device from suspend\n");
8446 return err;
8447 }
8448 pci_set_master(pdev);
8449
8450 pci_enable_wake(pdev, PCI_D3hot, 0);
8451 pci_enable_wake(pdev, PCI_D3cold, 0);
8452
Stefan Assmann53c7d062012-12-04 06:00:12 +00008453 if (igb_init_interrupt_scheme(adapter, true)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07008454 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8455 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08008456 }
8457
Auke Kok9d5c8242008-01-24 02:22:38 -08008458 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00008459
8460 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008461 * driver.
8462 */
Alexander Duycka8564f02009-02-06 23:21:10 +00008463 igb_get_hw_control(adapter);
8464
Kim Tatt Chuahb90fa872017-03-27 08:44:35 +08008465 val = rd32(E1000_WUS);
8466 if (val & WAKE_PKT_WUS)
8467 igb_deliver_wake_packet(netdev);
8468
Auke Kok9d5c8242008-01-24 02:22:38 -08008469 wr32(E1000_WUS, ~0);
8470
Todd Fujinaka94749332016-11-15 08:54:26 -08008471 rtnl_lock();
8472 if (!err && netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008473 err = __igb_open(netdev, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08008474
Todd Fujinaka94749332016-11-15 08:54:26 -08008475 if (!err)
8476 netif_device_attach(netdev);
8477 rtnl_unlock();
8478
8479 return err;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008480}
8481
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008482static int __maybe_unused igb_runtime_idle(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008483{
8484 struct pci_dev *pdev = to_pci_dev(dev);
8485 struct net_device *netdev = pci_get_drvdata(pdev);
8486 struct igb_adapter *adapter = netdev_priv(netdev);
8487
8488 if (!igb_has_link(adapter))
8489 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8490
8491 return -EBUSY;
8492}
8493
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008494static int __maybe_unused igb_runtime_suspend(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008495{
8496 struct pci_dev *pdev = to_pci_dev(dev);
8497 int retval;
8498 bool wake;
8499
8500 retval = __igb_shutdown(pdev, &wake, 1);
8501 if (retval)
8502 return retval;
8503
8504 if (wake) {
8505 pci_prepare_to_sleep(pdev);
8506 } else {
8507 pci_wake_from_d3(pdev, false);
8508 pci_set_power_state(pdev, PCI_D3hot);
8509 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008510
Auke Kok9d5c8242008-01-24 02:22:38 -08008511 return 0;
8512}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008513
Arnd Bergmann000ba1f2017-04-27 21:09:52 +02008514static int __maybe_unused igb_runtime_resume(struct device *dev)
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008515{
8516 return igb_resume(dev);
8517}
Auke Kok9d5c8242008-01-24 02:22:38 -08008518
8519static void igb_shutdown(struct pci_dev *pdev)
8520{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008521 bool wake;
8522
Yan, Zheng749ab2c2012-01-04 20:23:37 +00008523 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00008524
8525 if (system_state == SYSTEM_POWER_OFF) {
8526 pci_wake_from_d3(pdev, wake);
8527 pci_set_power_state(pdev, PCI_D3hot);
8528 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008529}
8530
Greg Rosefa44f2f2013-01-17 01:03:06 -08008531#ifdef CONFIG_PCI_IOV
8532static int igb_sriov_reinit(struct pci_dev *dev)
8533{
8534 struct net_device *netdev = pci_get_drvdata(dev);
8535 struct igb_adapter *adapter = netdev_priv(netdev);
8536 struct pci_dev *pdev = adapter->pdev;
8537
8538 rtnl_lock();
8539
8540 if (netif_running(netdev))
8541 igb_close(netdev);
Stefan Assmann76252722014-07-10 03:29:39 -07008542 else
8543 igb_reset(adapter);
Greg Rosefa44f2f2013-01-17 01:03:06 -08008544
8545 igb_clear_interrupt_scheme(adapter);
8546
8547 igb_init_queue_configuration(adapter);
8548
8549 if (igb_init_interrupt_scheme(adapter, true)) {
Vasily Averinf468adc2015-07-07 18:53:45 +03008550 rtnl_unlock();
Greg Rosefa44f2f2013-01-17 01:03:06 -08008551 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8552 return -ENOMEM;
8553 }
8554
8555 if (netif_running(netdev))
8556 igb_open(netdev);
8557
8558 rtnl_unlock();
8559
8560 return 0;
8561}
8562
8563static int igb_pci_disable_sriov(struct pci_dev *dev)
8564{
8565 int err = igb_disable_sriov(dev);
8566
8567 if (!err)
8568 err = igb_sriov_reinit(dev);
8569
8570 return err;
8571}
8572
8573static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8574{
8575 int err = igb_enable_sriov(dev, num_vfs);
8576
8577 if (err)
8578 goto out;
8579
8580 err = igb_sriov_reinit(dev);
8581 if (!err)
8582 return num_vfs;
8583
8584out:
8585 return err;
8586}
8587
8588#endif
8589static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8590{
8591#ifdef CONFIG_PCI_IOV
8592 if (num_vfs == 0)
8593 return igb_pci_disable_sriov(dev);
8594 else
8595 return igb_pci_enable_sriov(dev, num_vfs);
8596#endif
8597 return 0;
8598}
8599
Auke Kok9d5c8242008-01-24 02:22:38 -08008600#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008601/* Polling 'interrupt' - used by things like netconsole to send skbs
Auke Kok9d5c8242008-01-24 02:22:38 -08008602 * without having to re-enable interrupts. It's not called while
8603 * the interrupt routine is executing.
8604 */
8605static void igb_netpoll(struct net_device *netdev)
8606{
8607 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00008608 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008609 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08008610 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08008611
Alexander Duyck047e0032009-10-27 15:49:27 +00008612 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008613 q_vector = adapter->q_vector[i];
Carolyn Wybornycd14ef52013-12-10 07:58:34 +00008614 if (adapter->flags & IGB_FLAG_HAS_MSIX)
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00008615 wr32(E1000_EIMC, q_vector->eims_value);
8616 else
8617 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00008618 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00008619 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008620}
8621#endif /* CONFIG_NET_POLL_CONTROLLER */
8622
8623/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008624 * igb_io_error_detected - called when PCI error is detected
8625 * @pdev: Pointer to PCI device
8626 * @state: The current pci connection state
Auke Kok9d5c8242008-01-24 02:22:38 -08008627 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008628 * This function is called after a PCI bus error affecting
8629 * this device has been detected.
8630 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08008631static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8632 pci_channel_state_t state)
8633{
8634 struct net_device *netdev = pci_get_drvdata(pdev);
8635 struct igb_adapter *adapter = netdev_priv(netdev);
8636
8637 netif_device_detach(netdev);
8638
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00008639 if (state == pci_channel_io_perm_failure)
8640 return PCI_ERS_RESULT_DISCONNECT;
8641
Auke Kok9d5c8242008-01-24 02:22:38 -08008642 if (netif_running(netdev))
8643 igb_down(adapter);
8644 pci_disable_device(pdev);
8645
8646 /* Request a slot slot reset. */
8647 return PCI_ERS_RESULT_NEED_RESET;
8648}
8649
8650/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008651 * igb_io_slot_reset - called after the pci bus has been reset.
8652 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08008653 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008654 * Restart the card from scratch, as if from a cold-boot. Implementation
8655 * resembles the first-half of the igb_resume routine.
8656 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08008657static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
8658{
8659 struct net_device *netdev = pci_get_drvdata(pdev);
8660 struct igb_adapter *adapter = netdev_priv(netdev);
8661 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08008662 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09008663 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08008664
Alexander Duyckaed5dec2009-02-06 23:16:04 +00008665 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08008666 dev_err(&pdev->dev,
8667 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08008668 result = PCI_ERS_RESULT_DISCONNECT;
8669 } else {
8670 pci_set_master(pdev);
8671 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00008672 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08008673
8674 pci_enable_wake(pdev, PCI_D3hot, 0);
8675 pci_enable_wake(pdev, PCI_D3cold, 0);
8676
Guilherme G Piccoli69b97cf2016-11-10 16:46:43 -02008677 /* In case of PCI error, adapter lose its HW address
8678 * so we should re-assign it here.
8679 */
8680 hw->hw_addr = adapter->io_addr;
8681
Alexander Duyck40a914f2008-11-27 00:24:37 -08008682 igb_reset(adapter);
8683 wr32(E1000_WUS, ~0);
8684 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08008685 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008686
Jeff Kirsherea943d42008-12-11 20:34:19 -08008687 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8688 if (err) {
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008689 dev_err(&pdev->dev,
8690 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8691 err);
Jeff Kirsherea943d42008-12-11 20:34:19 -08008692 /* non-fatal, continue */
8693 }
Auke Kok9d5c8242008-01-24 02:22:38 -08008694
Alexander Duyck40a914f2008-11-27 00:24:37 -08008695 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08008696}
8697
8698/**
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008699 * igb_io_resume - called when traffic can start flowing again.
8700 * @pdev: Pointer to PCI device
Auke Kok9d5c8242008-01-24 02:22:38 -08008701 *
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008702 * This callback is called when the error recovery driver tells us that
8703 * its OK to resume normal operation. Implementation resembles the
8704 * second-half of the igb_resume routine.
Auke Kok9d5c8242008-01-24 02:22:38 -08008705 */
8706static void igb_io_resume(struct pci_dev *pdev)
8707{
8708 struct net_device *netdev = pci_get_drvdata(pdev);
8709 struct igb_adapter *adapter = netdev_priv(netdev);
8710
Auke Kok9d5c8242008-01-24 02:22:38 -08008711 if (netif_running(netdev)) {
8712 if (igb_up(adapter)) {
8713 dev_err(&pdev->dev, "igb_up failed after reset\n");
8714 return;
8715 }
8716 }
8717
8718 netif_device_attach(netdev);
8719
8720 /* let the f/w know that the h/w is now under the control of the
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008721 * driver.
8722 */
Auke Kok9d5c8242008-01-24 02:22:38 -08008723 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08008724}
8725
Yury Kylulin83c21332017-03-07 11:20:25 +03008726/**
8727 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
8728 * @adapter: Pointer to adapter structure
8729 * @index: Index of the RAR entry which need to be synced with MAC table
8730 **/
8731static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
Alexander Duyck26ad9172009-10-05 06:32:49 +00008732{
Alexander Duyck26ad9172009-10-05 06:32:49 +00008733 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc3278582016-01-06 23:10:23 -08008734 u32 rar_low, rar_high;
Yury Kylulin83c21332017-03-07 11:20:25 +03008735 u8 *addr = adapter->mac_table[index].addr;
Alexander Duyck26ad9172009-10-05 06:32:49 +00008736
Alexander Duyck415cd2a2016-03-18 16:06:53 -07008737 /* HW expects these to be in network order when they are plugged
8738 * into the registers which are little endian. In order to guarantee
8739 * that ordering we need to do an leXX_to_cpup here in order to be
8740 * ready for the byteswap that occurs with writel
Alexander Duyck26ad9172009-10-05 06:32:49 +00008741 */
Alexander Duyck415cd2a2016-03-18 16:06:53 -07008742 rar_low = le32_to_cpup((__le32 *)(addr));
8743 rar_high = le16_to_cpup((__le16 *)(addr + 4));
Alexander Duyck26ad9172009-10-05 06:32:49 +00008744
8745 /* Indicate to hardware the Address is Valid. */
Yury Kylulin83c21332017-03-07 11:20:25 +03008746 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
Corinna Vinschen177132d2017-04-10 10:58:14 +02008747 if (is_valid_ether_addr(addr))
8748 rar_high |= E1000_RAH_AV;
Alexander Duyck26ad9172009-10-05 06:32:49 +00008749
Yury Kylulin83c21332017-03-07 11:20:25 +03008750 if (hw->mac.type == e1000_82575)
8751 rar_high |= E1000_RAH_POOL_1 *
8752 adapter->mac_table[index].queue;
8753 else
8754 rar_high |= E1000_RAH_POOL_1 <<
8755 adapter->mac_table[index].queue;
8756 }
Alexander Duyck26ad9172009-10-05 06:32:49 +00008757
8758 wr32(E1000_RAL(index), rar_low);
8759 wrfl();
8760 wr32(E1000_RAH(index), rar_high);
8761 wrfl();
8762}
8763
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008764static int igb_set_vf_mac(struct igb_adapter *adapter,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008765 int vf, unsigned char *mac_addr)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008766{
8767 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008768 /* VF MAC addresses start at end of receive addresses and moves
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008769 * towards the first, as a result a collision should not be possible
8770 */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00008771 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Yury Kylulin83c21332017-03-07 11:20:25 +03008772 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008773
Yury Kylulin83c21332017-03-07 11:20:25 +03008774 ether_addr_copy(vf_mac_addr, mac_addr);
8775 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
8776 adapter->mac_table[rar_entry].queue = vf;
8777 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
8778 igb_rar_set_index(adapter, rar_entry);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008779
8780 return 0;
8781}
8782
Williams, Mitch A8151d292010-02-10 01:44:24 +00008783static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
8784{
8785 struct igb_adapter *adapter = netdev_priv(netdev);
Corinna Vinschen177132d2017-04-10 10:58:14 +02008786
8787 if (vf >= adapter->vfs_allocated_count)
Williams, Mitch A8151d292010-02-10 01:44:24 +00008788 return -EINVAL;
Corinna Vinschen177132d2017-04-10 10:58:14 +02008789
8790 /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
8791 * flag and allows to overwrite the MAC via VF netdev. This
8792 * is necessary to allow libvirt a way to restore the original
8793 * MAC after unbinding vfio-pci and reloading igbvf after shutting
8794 * down a VM.
8795 */
8796 if (is_zero_ether_addr(mac)) {
8797 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
8798 dev_info(&adapter->pdev->dev,
8799 "remove administratively set MAC on VF %d\n",
8800 vf);
8801 } else if (is_valid_ether_addr(mac)) {
8802 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
8803 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
8804 mac, vf);
8805 dev_info(&adapter->pdev->dev,
8806 "Reload the VF driver to make this change effective.");
8807 /* Generate additional warning if PF is down */
8808 if (test_bit(__IGB_DOWN, &adapter->state)) {
8809 dev_warn(&adapter->pdev->dev,
8810 "The VF MAC address has been set, but the PF device is not up.\n");
8811 dev_warn(&adapter->pdev->dev,
8812 "Bring the PF device up before attempting to use the VF device.\n");
8813 }
8814 } else {
8815 return -EINVAL;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008816 }
8817 return igb_set_vf_mac(adapter, vf, mac);
8818}
8819
Lior Levy17dc5662011-02-08 02:28:46 +00008820static int igb_link_mbps(int internal_link_speed)
8821{
8822 switch (internal_link_speed) {
8823 case SPEED_100:
8824 return 100;
8825 case SPEED_1000:
8826 return 1000;
8827 default:
8828 return 0;
8829 }
8830}
8831
8832static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
8833 int link_speed)
8834{
8835 int rf_dec, rf_int;
8836 u32 bcnrc_val;
8837
8838 if (tx_rate != 0) {
8839 /* Calculate the rate factor values to set */
8840 rf_int = link_speed / tx_rate;
8841 rf_dec = (link_speed - (rf_int * tx_rate));
Jacob Kellera51d8c22016-04-13 16:08:28 -07008842 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008843 tx_rate;
Lior Levy17dc5662011-02-08 02:28:46 +00008844
8845 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008846 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
8847 E1000_RTTBCNRC_RF_INT_MASK);
Lior Levy17dc5662011-02-08 02:28:46 +00008848 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
8849 } else {
8850 bcnrc_val = 0;
8851 }
8852
8853 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008854 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
Lior Levyf00b0da2011-06-04 06:05:03 +00008855 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
8856 */
8857 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00008858 wr32(E1000_RTTBCNRC, bcnrc_val);
8859}
8860
8861static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
8862{
8863 int actual_link_speed, i;
8864 bool reset_rate = false;
8865
8866 /* VF TX rate limit was not set or not supported */
8867 if ((adapter->vf_rate_link_speed == 0) ||
8868 (adapter->hw.mac.type != e1000_82576))
8869 return;
8870
8871 actual_link_speed = igb_link_mbps(adapter->link_speed);
8872 if (actual_link_speed != adapter->vf_rate_link_speed) {
8873 reset_rate = true;
8874 adapter->vf_rate_link_speed = 0;
8875 dev_info(&adapter->pdev->dev,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008876 "Link speed has been changed. VF Transmit rate is disabled\n");
Lior Levy17dc5662011-02-08 02:28:46 +00008877 }
8878
8879 for (i = 0; i < adapter->vfs_allocated_count; i++) {
8880 if (reset_rate)
8881 adapter->vf_data[i].tx_rate = 0;
8882
8883 igb_set_vf_rate_limit(&adapter->hw, i,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00008884 adapter->vf_data[i].tx_rate,
8885 actual_link_speed);
Lior Levy17dc5662011-02-08 02:28:46 +00008886 }
8887}
8888
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008889static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
8890 int min_tx_rate, int max_tx_rate)
Williams, Mitch A8151d292010-02-10 01:44:24 +00008891{
Lior Levy17dc5662011-02-08 02:28:46 +00008892 struct igb_adapter *adapter = netdev_priv(netdev);
8893 struct e1000_hw *hw = &adapter->hw;
8894 int actual_link_speed;
8895
8896 if (hw->mac.type != e1000_82576)
8897 return -EOPNOTSUPP;
8898
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008899 if (min_tx_rate)
8900 return -EINVAL;
8901
Lior Levy17dc5662011-02-08 02:28:46 +00008902 actual_link_speed = igb_link_mbps(adapter->link_speed);
8903 if ((vf >= adapter->vfs_allocated_count) ||
8904 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008905 (max_tx_rate < 0) ||
8906 (max_tx_rate > actual_link_speed))
Lior Levy17dc5662011-02-08 02:28:46 +00008907 return -EINVAL;
8908
8909 adapter->vf_rate_link_speed = actual_link_speed;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008910 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
8911 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
Lior Levy17dc5662011-02-08 02:28:46 +00008912
8913 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008914}
8915
Lior Levy70ea4782013-03-03 20:27:48 +00008916static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
8917 bool setting)
8918{
8919 struct igb_adapter *adapter = netdev_priv(netdev);
8920 struct e1000_hw *hw = &adapter->hw;
8921 u32 reg_val, reg_offset;
8922
8923 if (!adapter->vfs_allocated_count)
8924 return -EOPNOTSUPP;
8925
8926 if (vf >= adapter->vfs_allocated_count)
8927 return -EINVAL;
8928
8929 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
8930 reg_val = rd32(reg_offset);
8931 if (setting)
Jacob Kellera51d8c22016-04-13 16:08:28 -07008932 reg_val |= (BIT(vf) |
8933 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
Lior Levy70ea4782013-03-03 20:27:48 +00008934 else
Jacob Kellera51d8c22016-04-13 16:08:28 -07008935 reg_val &= ~(BIT(vf) |
8936 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
Lior Levy70ea4782013-03-03 20:27:48 +00008937 wr32(reg_offset, reg_val);
8938
8939 adapter->vf_data[vf].spoofchk_enabled = setting;
Todd Fujinaka23d87822014-06-04 07:12:15 +00008940 return 0;
Lior Levy70ea4782013-03-03 20:27:48 +00008941}
8942
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01008943static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
8944{
8945 struct igb_adapter *adapter = netdev_priv(netdev);
8946
8947 if (vf >= adapter->vfs_allocated_count)
8948 return -EINVAL;
8949 if (adapter->vf_data[vf].trusted == setting)
8950 return 0;
8951
8952 adapter->vf_data[vf].trusted = setting;
8953
8954 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
8955 vf, setting ? "" : "not ");
8956 return 0;
8957}
8958
Williams, Mitch A8151d292010-02-10 01:44:24 +00008959static int igb_ndo_get_vf_config(struct net_device *netdev,
8960 int vf, struct ifla_vf_info *ivi)
8961{
8962 struct igb_adapter *adapter = netdev_priv(netdev);
8963 if (vf >= adapter->vfs_allocated_count)
8964 return -EINVAL;
8965 ivi->vf = vf;
8966 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04008967 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
8968 ivi->min_tx_rate = 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008969 ivi->vlan = adapter->vf_data[vf].pf_vlan;
8970 ivi->qos = adapter->vf_data[vf].pf_qos;
Lior Levy70ea4782013-03-03 20:27:48 +00008971 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
Corinna Vinschen1b8b0622018-01-17 11:53:39 +01008972 ivi->trusted = adapter->vf_data[vf].trusted;
Williams, Mitch A8151d292010-02-10 01:44:24 +00008973 return 0;
8974}
8975
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008976static void igb_vmm_control(struct igb_adapter *adapter)
8977{
8978 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00008979 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008980
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008981 switch (hw->mac.type) {
8982 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00008983 case e1000_i210:
8984 case e1000_i211:
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00008985 case e1000_i354:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008986 default:
8987 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08008988 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008989 case e1000_82576:
8990 /* notify HW that the MAC is adding vlan tags */
8991 reg = rd32(E1000_DTXCTL);
8992 reg |= E1000_DTXCTL_VLAN_ADDED;
8993 wr32(E1000_DTXCTL, reg);
Carolyn Wybornyb26141d2014-04-17 04:10:13 +00008994 /* Fall through */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00008995 case e1000_82580:
8996 /* enable replication vlan tag stripping */
8997 reg = rd32(E1000_RPLOLR);
8998 reg |= E1000_RPLOLR_STRVLAN;
8999 wr32(E1000_RPLOLR, reg);
Carolyn Wybornyb26141d2014-04-17 04:10:13 +00009000 /* Fall through */
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00009001 case e1000_i350:
9002 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00009003 break;
9004 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00009005
Alexander Duyckd4960302009-10-27 15:53:45 +00009006 if (adapter->vfs_allocated_count) {
9007 igb_vmdq_set_loopback_pf(hw, true);
9008 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00009009 igb_vmdq_set_anti_spoofing_pf(hw, true,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009010 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00009011 } else {
9012 igb_vmdq_set_loopback_pf(hw, false);
9013 igb_vmdq_set_replication_pf(hw, false);
9014 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08009015}
9016
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009017static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9018{
9019 struct e1000_hw *hw = &adapter->hw;
9020 u32 dmac_thr;
9021 u16 hwm;
9022
9023 if (hw->mac.type > e1000_82580) {
9024 if (adapter->flags & IGB_FLAG_DMAC) {
9025 u32 reg;
9026
9027 /* force threshold to 0. */
9028 wr32(E1000_DMCTXTH, 0);
9029
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009030 /* DMA Coalescing high water mark needs to be greater
Matthew Vicke8c626e2011-11-17 08:33:12 +00009031 * than the Rx threshold. Set hwm to PBA - max frame
9032 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009033 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08009034 hwm = 64 * (pba - 6);
Matthew Vicke8c626e2011-11-17 08:33:12 +00009035 reg = rd32(E1000_FCRTC);
9036 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9037 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9038 & E1000_FCRTC_RTH_COAL_MASK);
9039 wr32(E1000_FCRTC, reg);
9040
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009041 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
Matthew Vicke8c626e2011-11-17 08:33:12 +00009042 * frame size, capping it at PBA - 10KB.
9043 */
Alexander Duyck45693bc2016-01-06 23:10:39 -08009044 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009045 reg = rd32(E1000_DMACR);
9046 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009047 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9048 & E1000_DMACR_DMACTHR_MASK);
9049
9050 /* transition to L0x or L1 if available..*/
9051 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9052
9053 /* watchdog timer= +-1000 usec in 32usec intervals */
9054 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00009055
9056 /* Disable BMC-to-OS Watchdog Enable */
Carolyn Wybornyceb5f132013-04-18 22:21:30 +00009057 if (hw->mac.type != e1000_i354)
9058 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9059
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009060 wr32(E1000_DMACR, reg);
9061
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009062 /* no lower threshold to disable
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009063 * coalescing(smart fifb)-UTRESH=0
9064 */
9065 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009066
9067 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9068
9069 wr32(E1000_DMCTLX, reg);
9070
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009071 /* free space in tx packet buffer to wake from
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009072 * DMA coal
9073 */
9074 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9075 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9076
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009077 /* make low power state decision controlled
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009078 * by DMA coal
9079 */
9080 reg = rd32(E1000_PCIEMISC);
9081 reg &= ~E1000_PCIEMISC_LX_DECISION;
9082 wr32(E1000_PCIEMISC, reg);
9083 } /* endif adapter->dmac is not disabled */
9084 } else if (hw->mac.type == e1000_82580) {
9085 u32 reg = rd32(E1000_PCIEMISC);
Carolyn Wyborny9005df32014-04-11 01:45:34 +00009086
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00009087 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9088 wr32(E1000_DMACR, 0);
9089 }
9090}
9091
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009092/**
9093 * igb_read_i2c_byte - Reads 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009094 * @hw: pointer to hardware structure
9095 * @byte_offset: byte offset to read
9096 * @dev_addr: device address
9097 * @data: value read
9098 *
9099 * Performs byte read operation over I2C interface at
9100 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009101 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009102s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009103 u8 dev_addr, u8 *data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009104{
9105 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00009106 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009107 s32 status;
9108 u16 swfw_mask = 0;
9109
9110 if (!this_client)
9111 return E1000_ERR_I2C;
9112
9113 swfw_mask = E1000_SWFW_PHY0_SM;
9114
Todd Fujinaka23d87822014-06-04 07:12:15 +00009115 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009116 return E1000_ERR_SWFW_SYNC;
9117
9118 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9119 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9120
9121 if (status < 0)
9122 return E1000_ERR_I2C;
9123 else {
9124 *data = status;
Todd Fujinaka23d87822014-06-04 07:12:15 +00009125 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009126 }
9127}
9128
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009129/**
9130 * igb_write_i2c_byte - Writes 8 bit word over I2C
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009131 * @hw: pointer to hardware structure
9132 * @byte_offset: byte offset to write
9133 * @dev_addr: device address
9134 * @data: value to write
9135 *
9136 * Performs byte write operation over I2C interface at
9137 * a specified device address.
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009138 **/
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009139s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
Jeff Kirsherb980ac12013-02-23 07:29:56 +00009140 u8 dev_addr, u8 data)
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009141{
9142 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00009143 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009144 s32 status;
9145 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9146
9147 if (!this_client)
9148 return E1000_ERR_I2C;
9149
Todd Fujinaka23d87822014-06-04 07:12:15 +00009150 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009151 return E1000_ERR_SWFW_SYNC;
9152 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9153 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9154
9155 if (status)
9156 return E1000_ERR_I2C;
9157 else
Todd Fujinaka23d87822014-06-04 07:12:15 +00009158 return 0;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00009159
9160}
Laura Mihaela Vasilescu907b7832013-10-01 04:33:56 -07009161
9162int igb_reinit_queues(struct igb_adapter *adapter)
9163{
9164 struct net_device *netdev = adapter->netdev;
9165 struct pci_dev *pdev = adapter->pdev;
9166 int err = 0;
9167
9168 if (netif_running(netdev))
9169 igb_close(netdev);
9170
Carolyn Wyborny02ef6e12013-12-10 07:58:29 +00009171 igb_reset_interrupt_capability(adapter);
Laura Mihaela Vasilescu907b7832013-10-01 04:33:56 -07009172
9173 if (igb_init_interrupt_scheme(adapter, true)) {
9174 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9175 return -ENOMEM;
9176 }
9177
9178 if (netif_running(netdev))
9179 err = igb_open(netdev);
9180
9181 return err;
9182}
Gangfeng Huang0e71def2016-07-06 13:22:54 +08009183
9184static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9185{
9186 struct igb_nfc_filter *rule;
9187
9188 spin_lock(&adapter->nfc_lock);
9189
9190 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9191 igb_erase_filter(adapter, rule);
9192
9193 spin_unlock(&adapter->nfc_lock);
9194}
9195
9196static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9197{
9198 struct igb_nfc_filter *rule;
9199
9200 spin_lock(&adapter->nfc_lock);
9201
9202 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9203 igb_add_filter(adapter, rule);
9204
9205 spin_unlock(&adapter->nfc_lock);
9206}
Auke Kok9d5c8242008-01-24 02:22:38 -08009207/* igb_main.c */