blob: d27eb4ed7a090e18f155eea891fa2b76c92f7f59 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +00004 Copyright(c) 2007-2013 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +000060#include <linux/i2c.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080061#include "igb.h"
62
Carolyn Wyborny200e5fd2012-05-31 23:39:30 +000063#define MAJ 4
Carolyn Wyborny66999382012-12-05 02:46:05 +000064#define MIN 1
65#define BUILD 2
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080066#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000067__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080068char igb_driver_name[] = "igb";
69char igb_driver_version[] = DRV_VERSION;
70static const char igb_driver_string[] =
71 "Intel(R) Gigabit Ethernet Network Driver";
Akeem G. Abodunrin4b9ea462013-01-08 18:31:12 +000072static const char igb_copyright[] =
73 "Copyright (c) 2007-2013 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080074
Auke Kok9d5c8242008-01-24 02:22:38 -080075static const struct e1000_info *igb_info_tbl[] = {
76 [board_82575] = &e1000_82575_info,
77};
78
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000079static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +0000100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +0000101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
110 /* required last entry */
111 {0, }
112};
113
114MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
115
116void igb_reset(struct igb_adapter *);
117static int igb_setup_all_tx_resources(struct igb_adapter *);
118static int igb_setup_all_rx_resources(struct igb_adapter *);
119static void igb_free_all_tx_resources(struct igb_adapter *);
120static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000121static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static int igb_probe(struct pci_dev *, const struct pci_device_id *);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500123static void igb_remove(struct pci_dev *pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -0800124static int igb_sw_init(struct igb_adapter *);
125static int igb_open(struct net_device *);
126static int igb_close(struct net_device *);
Stefan Assmann53c7d062012-12-04 06:00:12 +0000127static void igb_configure(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static void igb_configure_tx(struct igb_adapter *);
129static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800130static void igb_clean_all_tx_rings(struct igb_adapter *);
131static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700132static void igb_clean_tx_ring(struct igb_ring *);
133static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000134static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800135static void igb_update_phy_info(unsigned long);
136static void igb_watchdog(unsigned long);
137static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
140 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static int igb_change_mtu(struct net_device *, int);
142static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000143static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800144static irqreturn_t igb_intr(int irq, void *);
145static irqreturn_t igb_intr_msi(int irq, void *);
146static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000147static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000149static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700150static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700151#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700152static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000153static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000154static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
156static void igb_tx_timeout(struct net_device *);
157static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000158static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500159static int igb_vlan_rx_add_vid(struct net_device *, u16);
160static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800161static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000162static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800163static void igb_ping_all_vfs(struct igb_adapter *);
164static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800165static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000166static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800167static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000168static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
169static int igb_ndo_set_vf_vlan(struct net_device *netdev,
170 int vf, u16 vlan, u8 qos);
171static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
172static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
173 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000174static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000175
176#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000177static int igb_vf_configure(struct igb_adapter *adapter, int vf);
Stefan Assmannf5571472012-08-18 04:06:11 +0000178static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000179#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800180
Auke Kok9d5c8242008-01-24 02:22:38 -0800181#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000182#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000183static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000184#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000185static int igb_resume(struct device *);
186#ifdef CONFIG_PM_RUNTIME
187static int igb_runtime_suspend(struct device *dev);
188static int igb_runtime_resume(struct device *dev);
189static int igb_runtime_idle(struct device *dev);
190#endif
191static const struct dev_pm_ops igb_pm_ops = {
192 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
193 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
194 igb_runtime_idle)
195};
Auke Kok9d5c8242008-01-24 02:22:38 -0800196#endif
197static void igb_shutdown(struct pci_dev *);
Greg Rosefa44f2f2013-01-17 01:03:06 -0800198static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700199#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700200static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
201static struct notifier_block dca_notifier = {
202 .notifier_call = igb_notify_dca,
203 .next = NULL,
204 .priority = 0
205};
206#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800207#ifdef CONFIG_NET_POLL_CONTROLLER
208/* for netdump / net console */
209static void igb_netpoll(struct net_device *);
210#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800211#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000212static unsigned int max_vfs = 0;
213module_param(max_vfs, uint, 0);
214MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
215 "per physical function");
216#endif /* CONFIG_PCI_IOV */
217
Auke Kok9d5c8242008-01-24 02:22:38 -0800218static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
219 pci_channel_state_t);
220static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
221static void igb_io_resume(struct pci_dev *);
222
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700223static const struct pci_error_handlers igb_err_handler = {
Auke Kok9d5c8242008-01-24 02:22:38 -0800224 .error_detected = igb_io_error_detected,
225 .slot_reset = igb_io_slot_reset,
226 .resume = igb_io_resume,
227};
228
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000229static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800230
231static struct pci_driver igb_driver = {
232 .name = igb_driver_name,
233 .id_table = igb_pci_tbl,
234 .probe = igb_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500235 .remove = igb_remove,
Auke Kok9d5c8242008-01-24 02:22:38 -0800236#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000237 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800238#endif
239 .shutdown = igb_shutdown,
Greg Rosefa44f2f2013-01-17 01:03:06 -0800240 .sriov_configure = igb_pci_sriov_configure,
Auke Kok9d5c8242008-01-24 02:22:38 -0800241 .err_handler = &igb_err_handler
242};
243
244MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
245MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
246MODULE_LICENSE("GPL");
247MODULE_VERSION(DRV_VERSION);
248
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000249#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
250static int debug = -1;
251module_param(debug, int, 0);
252MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
253
Taku Izumic97ec422010-04-27 14:39:30 +0000254struct igb_reg_info {
255 u32 ofs;
256 char *name;
257};
258
259static const struct igb_reg_info igb_reg_info_tbl[] = {
260
261 /* General Registers */
262 {E1000_CTRL, "CTRL"},
263 {E1000_STATUS, "STATUS"},
264 {E1000_CTRL_EXT, "CTRL_EXT"},
265
266 /* Interrupt Registers */
267 {E1000_ICR, "ICR"},
268
269 /* RX Registers */
270 {E1000_RCTL, "RCTL"},
271 {E1000_RDLEN(0), "RDLEN"},
272 {E1000_RDH(0), "RDH"},
273 {E1000_RDT(0), "RDT"},
274 {E1000_RXDCTL(0), "RXDCTL"},
275 {E1000_RDBAL(0), "RDBAL"},
276 {E1000_RDBAH(0), "RDBAH"},
277
278 /* TX Registers */
279 {E1000_TCTL, "TCTL"},
280 {E1000_TDBAL(0), "TDBAL"},
281 {E1000_TDBAH(0), "TDBAH"},
282 {E1000_TDLEN(0), "TDLEN"},
283 {E1000_TDH(0), "TDH"},
284 {E1000_TDT(0), "TDT"},
285 {E1000_TXDCTL(0), "TXDCTL"},
286 {E1000_TDFH, "TDFH"},
287 {E1000_TDFT, "TDFT"},
288 {E1000_TDFHS, "TDFHS"},
289 {E1000_TDFPC, "TDFPC"},
290
291 /* List Terminator */
292 {}
293};
294
295/*
296 * igb_regdump - register printout routine
297 */
298static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299{
300 int n = 0;
301 char rname[16];
302 u32 regs[8];
303
304 switch (reginfo->ofs) {
305 case E1000_RDLEN(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RDLEN(n));
308 break;
309 case E1000_RDH(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDH(n));
312 break;
313 case E1000_RDT(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RDT(n));
316 break;
317 case E1000_RXDCTL(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_RXDCTL(n));
320 break;
321 case E1000_RDBAL(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RDBAL(n));
324 break;
325 case E1000_RDBAH(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDBAH(n));
328 break;
329 case E1000_TDBAL(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_RDBAL(n));
332 break;
333 case E1000_TDBAH(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_TDBAH(n));
336 break;
337 case E1000_TDLEN(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TDLEN(n));
340 break;
341 case E1000_TDH(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_TDH(n));
344 break;
345 case E1000_TDT(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_TDT(n));
348 break;
349 case E1000_TXDCTL(0):
350 for (n = 0; n < 4; n++)
351 regs[n] = rd32(E1000_TXDCTL(n));
352 break;
353 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000354 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000355 return;
356 }
357
358 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000359 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
360 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000361}
362
363/*
364 * igb_dump - Print registers, tx-rings and rx-rings
365 */
366static void igb_dump(struct igb_adapter *adapter)
367{
368 struct net_device *netdev = adapter->netdev;
369 struct e1000_hw *hw = &adapter->hw;
370 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000371 struct igb_ring *tx_ring;
372 union e1000_adv_tx_desc *tx_desc;
373 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000374 struct igb_ring *rx_ring;
375 union e1000_adv_rx_desc *rx_desc;
376 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000377 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000378
379 if (!netif_msg_hw(adapter))
380 return;
381
382 /* Print netdevice Info */
383 if (netdev) {
384 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000385 pr_info("Device Name state trans_start "
386 "last_rx\n");
387 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
388 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000389 }
390
391 /* Print Registers */
392 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000393 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000394 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
395 reginfo->name; reginfo++) {
396 igb_regdump(hw, reginfo);
397 }
398
399 /* Print TX Ring Summary */
400 if (!netdev || !netif_running(netdev))
401 goto exit;
402
403 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000404 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000405 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000406 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000407 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000408 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000409 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
410 n, tx_ring->next_to_use, tx_ring->next_to_clean,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000411 (u64)dma_unmap_addr(buffer_info, dma),
412 dma_unmap_len(buffer_info, len),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000413 buffer_info->next_to_watch,
414 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000415 }
416
417 /* Print TX Rings */
418 if (!netif_msg_tx_done(adapter))
419 goto rx_ring_summary;
420
421 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
422
423 /* Transmit Descriptor Formats
424 *
425 * Advanced Transmit Descriptor
426 * +--------------------------------------------------------------+
427 * 0 | Buffer Address [63:0] |
428 * +--------------------------------------------------------------+
429 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
430 * +--------------------------------------------------------------+
431 * 63 46 45 40 39 38 36 35 32 31 24 15 0
432 */
433
434 for (n = 0; n < adapter->num_tx_queues; n++) {
435 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000436 pr_info("------------------------------------\n");
437 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
438 pr_info("------------------------------------\n");
439 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
440 "[bi->dma ] leng ntw timestamp "
441 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000442
443 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000444 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000445 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000446 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000447 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000448 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000449 if (i == tx_ring->next_to_use &&
450 i == tx_ring->next_to_clean)
451 next_desc = " NTC/U";
452 else if (i == tx_ring->next_to_use)
453 next_desc = " NTU";
454 else if (i == tx_ring->next_to_clean)
455 next_desc = " NTC";
456 else
457 next_desc = "";
458
459 pr_info("T [0x%03X] %016llX %016llX %016llX"
460 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000461 le64_to_cpu(u0->a),
462 le64_to_cpu(u0->b),
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000463 (u64)dma_unmap_addr(buffer_info, dma),
464 dma_unmap_len(buffer_info, len),
Taku Izumic97ec422010-04-27 14:39:30 +0000465 buffer_info->next_to_watch,
466 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000467 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000468
Emil Tantilovb6695882012-07-28 05:07:48 +0000469 if (netif_msg_pktdata(adapter) && buffer_info->skb)
Taku Izumic97ec422010-04-27 14:39:30 +0000470 print_hex_dump(KERN_INFO, "",
471 DUMP_PREFIX_ADDRESS,
Emil Tantilovb6695882012-07-28 05:07:48 +0000472 16, 1, buffer_info->skb->data,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +0000473 dma_unmap_len(buffer_info, len),
474 true);
Taku Izumic97ec422010-04-27 14:39:30 +0000475 }
476 }
477
478 /* Print RX Rings Summary */
479rx_ring_summary:
480 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000481 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000482 for (n = 0; n < adapter->num_rx_queues; n++) {
483 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000484 pr_info(" %5d %5X %5X\n",
485 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000486 }
487
488 /* Print RX Rings */
489 if (!netif_msg_rx_status(adapter))
490 goto exit;
491
492 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
493
494 /* Advanced Receive Descriptor (Read) Format
495 * 63 1 0
496 * +-----------------------------------------------------+
497 * 0 | Packet Buffer Address [63:1] |A0/NSE|
498 * +----------------------------------------------+------+
499 * 8 | Header Buffer Address [63:1] | DD |
500 * +-----------------------------------------------------+
501 *
502 *
503 * Advanced Receive Descriptor (Write-Back) Format
504 *
505 * 63 48 47 32 31 30 21 20 17 16 4 3 0
506 * +------------------------------------------------------+
507 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
508 * | Checksum Ident | | | | Type | Type |
509 * +------------------------------------------------------+
510 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
511 * +------------------------------------------------------+
512 * 63 48 47 32 31 20 19 0
513 */
514
515 for (n = 0; n < adapter->num_rx_queues; n++) {
516 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000517 pr_info("------------------------------------\n");
518 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
519 pr_info("------------------------------------\n");
520 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
521 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
522 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
523 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000524
525 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000526 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000527 struct igb_rx_buffer *buffer_info;
528 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000529 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000530 u0 = (struct my_u0 *)rx_desc;
531 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000532
533 if (i == rx_ring->next_to_use)
534 next_desc = " NTU";
535 else if (i == rx_ring->next_to_clean)
536 next_desc = " NTC";
537 else
538 next_desc = "";
539
Taku Izumic97ec422010-04-27 14:39:30 +0000540 if (staterr & E1000_RXD_STAT_DD) {
541 /* Descriptor Done */
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000542 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
543 "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000544 le64_to_cpu(u0->a),
545 le64_to_cpu(u0->b),
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000546 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000547 } else {
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000548 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
549 "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000550 le64_to_cpu(u0->a),
551 le64_to_cpu(u0->b),
552 (u64)buffer_info->dma,
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000553 next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000554
Emil Tantilovb6695882012-07-28 05:07:48 +0000555 if (netif_msg_pktdata(adapter) &&
Alexander Duyck1a1c2252012-09-25 00:30:52 +0000556 buffer_info->dma && buffer_info->page) {
Alexander Duyck44390ca2011-08-26 07:43:38 +0000557 print_hex_dump(KERN_INFO, "",
558 DUMP_PREFIX_ADDRESS,
559 16, 1,
Emil Tantilovb6695882012-07-28 05:07:48 +0000560 page_address(buffer_info->page) +
561 buffer_info->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +0000562 IGB_RX_BUFSZ, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000563 }
564 }
Taku Izumic97ec422010-04-27 14:39:30 +0000565 }
566 }
567
568exit:
569 return;
570}
571
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +0000572/* igb_get_i2c_data - Reads the I2C SDA data bit
573 * @hw: pointer to hardware structure
574 * @i2cctl: Current value of I2CCTL register
575 *
576 * Returns the I2C data bit value
577 */
578static int igb_get_i2c_data(void *data)
579{
580 struct igb_adapter *adapter = (struct igb_adapter *)data;
581 struct e1000_hw *hw = &adapter->hw;
582 s32 i2cctl = rd32(E1000_I2CPARAMS);
583
584 return ((i2cctl & E1000_I2C_DATA_IN) != 0);
585}
586
587/* igb_set_i2c_data - Sets the I2C data bit
588 * @data: pointer to hardware structure
589 * @state: I2C data value (0 or 1) to set
590 *
591 * Sets the I2C data bit
592 */
593static void igb_set_i2c_data(void *data, int state)
594{
595 struct igb_adapter *adapter = (struct igb_adapter *)data;
596 struct e1000_hw *hw = &adapter->hw;
597 s32 i2cctl = rd32(E1000_I2CPARAMS);
598
599 if (state)
600 i2cctl |= E1000_I2C_DATA_OUT;
601 else
602 i2cctl &= ~E1000_I2C_DATA_OUT;
603
604 i2cctl &= ~E1000_I2C_DATA_OE_N;
605 i2cctl |= E1000_I2C_CLK_OE_N;
606 wr32(E1000_I2CPARAMS, i2cctl);
607 wrfl();
608
609}
610
611/* igb_set_i2c_clk - Sets the I2C SCL clock
612 * @data: pointer to hardware structure
613 * @state: state to set clock
614 *
615 * Sets the I2C clock line to state
616 */
617static void igb_set_i2c_clk(void *data, int state)
618{
619 struct igb_adapter *adapter = (struct igb_adapter *)data;
620 struct e1000_hw *hw = &adapter->hw;
621 s32 i2cctl = rd32(E1000_I2CPARAMS);
622
623 if (state) {
624 i2cctl |= E1000_I2C_CLK_OUT;
625 i2cctl &= ~E1000_I2C_CLK_OE_N;
626 } else {
627 i2cctl &= ~E1000_I2C_CLK_OUT;
628 i2cctl &= ~E1000_I2C_CLK_OE_N;
629 }
630 wr32(E1000_I2CPARAMS, i2cctl);
631 wrfl();
632}
633
634/* igb_get_i2c_clk - Gets the I2C SCL clock state
635 * @data: pointer to hardware structure
636 *
637 * Gets the I2C clock state
638 */
639static int igb_get_i2c_clk(void *data)
640{
641 struct igb_adapter *adapter = (struct igb_adapter *)data;
642 struct e1000_hw *hw = &adapter->hw;
643 s32 i2cctl = rd32(E1000_I2CPARAMS);
644
645 return ((i2cctl & E1000_I2C_CLK_IN) != 0);
646}
647
648static const struct i2c_algo_bit_data igb_i2c_algo = {
649 .setsda = igb_set_i2c_data,
650 .setscl = igb_set_i2c_clk,
651 .getsda = igb_get_i2c_data,
652 .getscl = igb_get_i2c_clk,
653 .udelay = 5,
654 .timeout = 20,
655};
656
Auke Kok9d5c8242008-01-24 02:22:38 -0800657/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000658 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800659 * used by hardware layer to print debugging information
660 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000661struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800662{
663 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000664 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800665}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000666
667/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800668 * igb_init_module - Driver Registration Routine
669 *
670 * igb_init_module is the first routine called when the driver is
671 * loaded. All it does is register with the PCI subsystem.
672 **/
673static int __init igb_init_module(void)
674{
675 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000676 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800677 igb_driver_string, igb_driver_version);
678
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000679 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800680
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700681#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700682 dca_register_notify(&dca_notifier);
683#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800684 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800685 return ret;
686}
687
688module_init(igb_init_module);
689
690/**
691 * igb_exit_module - Driver Exit Cleanup Routine
692 *
693 * igb_exit_module is called just before the driver is removed
694 * from memory.
695 **/
696static void __exit igb_exit_module(void)
697{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700698#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700699 dca_unregister_notify(&dca_notifier);
700#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800701 pci_unregister_driver(&igb_driver);
702}
703
704module_exit(igb_exit_module);
705
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800706#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
707/**
708 * igb_cache_ring_register - Descriptor ring to register mapping
709 * @adapter: board private structure to initialize
710 *
711 * Once we know the feature-set enabled for the device, we'll cache
712 * the register offset the descriptor ring is assigned to.
713 **/
714static void igb_cache_ring_register(struct igb_adapter *adapter)
715{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000716 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000717 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800718
719 switch (adapter->hw.mac.type) {
720 case e1000_82576:
721 /* The queues are allocated for virtualization such that VF 0
722 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
723 * In order to avoid collision we start at the first free queue
724 * and continue consuming queues in the same sequence
725 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000726 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000727 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000728 adapter->rx_ring[i]->reg_idx = rbase_offset +
729 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000730 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800731 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000732 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000733 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000734 case e1000_i210:
735 case e1000_i211:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800736 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000737 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000738 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000739 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000740 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800741 break;
742 }
743}
744
Alexander Duyck4be000c2011-08-26 07:45:52 +0000745/**
746 * igb_write_ivar - configure ivar for given MSI-X vector
747 * @hw: pointer to the HW structure
748 * @msix_vector: vector number we are allocating to a given ring
749 * @index: row index of IVAR register to write within IVAR table
750 * @offset: column offset of in IVAR, should be multiple of 8
751 *
752 * This function is intended to handle the writing of the IVAR register
753 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
754 * each containing an cause allocation for an Rx and Tx ring, and a
755 * variable number of rows depending on the number of queues supported.
756 **/
757static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
758 int index, int offset)
759{
760 u32 ivar = array_rd32(E1000_IVAR0, index);
761
762 /* clear any bits that are currently set */
763 ivar &= ~((u32)0xFF << offset);
764
765 /* write vector and valid bit */
766 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
767
768 array_wr32(E1000_IVAR0, index, ivar);
769}
770
Auke Kok9d5c8242008-01-24 02:22:38 -0800771#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000772static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800773{
Alexander Duyck047e0032009-10-27 15:49:27 +0000774 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800775 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000776 int rx_queue = IGB_N0_QUEUE;
777 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000778 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000779
Alexander Duyck0ba82992011-08-26 07:45:47 +0000780 if (q_vector->rx.ring)
781 rx_queue = q_vector->rx.ring->reg_idx;
782 if (q_vector->tx.ring)
783 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700784
785 switch (hw->mac.type) {
786 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800787 /* The 82575 assigns vectors using a bitmask, which matches the
788 bitmask for the EICR/EIMS/EIMC registers. To assign one
789 or more queues to a vector, we write the appropriate bits
790 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000791 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800792 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000793 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800794 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000795 if (!adapter->msix_entries && msix_vector == 0)
796 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800797 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000798 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700799 break;
800 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000801 /*
802 * 82576 uses a table that essentially consists of 2 columns
803 * with 8 rows. The ordering is column-major so we use the
804 * lower 3 bits as the row index, and the 4th bit as the
805 * column offset.
806 */
807 if (rx_queue > IGB_N0_QUEUE)
808 igb_write_ivar(hw, msix_vector,
809 rx_queue & 0x7,
810 (rx_queue & 0x8) << 1);
811 if (tx_queue > IGB_N0_QUEUE)
812 igb_write_ivar(hw, msix_vector,
813 tx_queue & 0x7,
814 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000815 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700816 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000817 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000818 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000819 case e1000_i210:
820 case e1000_i211:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000821 /*
822 * On 82580 and newer adapters the scheme is similar to 82576
823 * however instead of ordering column-major we have things
824 * ordered row-major. So we traverse the table by using
825 * bit 0 as the column offset, and the remaining bits as the
826 * row index.
827 */
828 if (rx_queue > IGB_N0_QUEUE)
829 igb_write_ivar(hw, msix_vector,
830 rx_queue >> 1,
831 (rx_queue & 0x1) << 4);
832 if (tx_queue > IGB_N0_QUEUE)
833 igb_write_ivar(hw, msix_vector,
834 tx_queue >> 1,
835 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000836 q_vector->eims_value = 1 << msix_vector;
837 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700838 default:
839 BUG();
840 break;
841 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000842
843 /* add q_vector eims value to global eims_enable_mask */
844 adapter->eims_enable_mask |= q_vector->eims_value;
845
846 /* configure q_vector to set itr on first interrupt */
847 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800848}
849
850/**
851 * igb_configure_msix - Configure MSI-X hardware
852 *
853 * igb_configure_msix sets up the hardware to properly
854 * generate MSI-X interrupts.
855 **/
856static void igb_configure_msix(struct igb_adapter *adapter)
857{
858 u32 tmp;
859 int i, vector = 0;
860 struct e1000_hw *hw = &adapter->hw;
861
862 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800863
864 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700865 switch (hw->mac.type) {
866 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800867 tmp = rd32(E1000_CTRL_EXT);
868 /* enable MSI-X PBA support*/
869 tmp |= E1000_CTRL_EXT_PBA_CLR;
870
871 /* Auto-Mask interrupts upon ICR read. */
872 tmp |= E1000_CTRL_EXT_EIAME;
873 tmp |= E1000_CTRL_EXT_IRCA;
874
875 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000876
877 /* enable msix_other interrupt */
878 array_wr32(E1000_MSIXBM(0), vector++,
879 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700880 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800881
Alexander Duyck2d064c02008-07-08 15:10:12 -0700882 break;
883
884 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000885 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000886 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000887 case e1000_i210:
888 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000889 /* Turn on MSI-X capability first, or our settings
890 * won't stick. And it will take days to debug. */
891 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
892 E1000_GPIE_PBA | E1000_GPIE_EIAME |
893 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700894
Alexander Duyck047e0032009-10-27 15:49:27 +0000895 /* enable msix_other interrupt */
896 adapter->eims_other = 1 << vector;
897 tmp = (vector++ | E1000_IVAR_VALID) << 8;
898
899 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700900 break;
901 default:
902 /* do nothing, since nothing else supports MSI-X */
903 break;
904 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000905
906 adapter->eims_enable_mask |= adapter->eims_other;
907
Alexander Duyck26b39272010-02-17 01:00:41 +0000908 for (i = 0; i < adapter->num_q_vectors; i++)
909 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000910
Auke Kok9d5c8242008-01-24 02:22:38 -0800911 wrfl();
912}
913
914/**
915 * igb_request_msix - Initialize MSI-X interrupts
916 *
917 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
918 * kernel.
919 **/
920static int igb_request_msix(struct igb_adapter *adapter)
921{
922 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000923 struct e1000_hw *hw = &adapter->hw;
Stefan Assmann52285b72012-12-04 06:00:17 +0000924 int i, err = 0, vector = 0, free_vector = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800925
Auke Kok9d5c8242008-01-24 02:22:38 -0800926 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800927 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000929 goto err_out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000930
931 for (i = 0; i < adapter->num_q_vectors; i++) {
932 struct igb_q_vector *q_vector = adapter->q_vector[i];
933
Stefan Assmann52285b72012-12-04 06:00:17 +0000934 vector++;
935
Alexander Duyck047e0032009-10-27 15:49:27 +0000936 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
937
Alexander Duyck0ba82992011-08-26 07:45:47 +0000938 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000940 q_vector->rx.ring->queue_index);
941 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000942 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000943 q_vector->tx.ring->queue_index);
944 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000945 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000946 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 else
948 sprintf(q_vector->name, "%s-unused", netdev->name);
949
950 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800951 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000952 q_vector);
953 if (err)
Stefan Assmann52285b72012-12-04 06:00:17 +0000954 goto err_free;
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800956
Auke Kok9d5c8242008-01-24 02:22:38 -0800957 igb_configure_msix(adapter);
958 return 0;
Stefan Assmann52285b72012-12-04 06:00:17 +0000959
960err_free:
961 /* free already assigned IRQs */
962 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
963
964 vector--;
965 for (i = 0; i < vector; i++) {
966 free_irq(adapter->msix_entries[free_vector++].vector,
967 adapter->q_vector[i]);
968 }
969err_out:
Auke Kok9d5c8242008-01-24 02:22:38 -0800970 return err;
971}
972
973static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
974{
975 if (adapter->msix_entries) {
976 pci_disable_msix(adapter->pdev);
977 kfree(adapter->msix_entries);
978 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000979 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800980 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000981 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800982}
983
Alexander Duyck047e0032009-10-27 15:49:27 +0000984/**
Alexander Duyck5536d212012-09-25 00:31:17 +0000985 * igb_free_q_vector - Free memory allocated for specific interrupt vector
986 * @adapter: board private structure to initialize
987 * @v_idx: Index of vector to be freed
988 *
989 * This function frees the memory allocated to the q_vector. In addition if
990 * NAPI is enabled it will delete any references to the NAPI struct prior
991 * to freeing the q_vector.
992 **/
993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
994{
995 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
996
997 if (q_vector->tx.ring)
998 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
999
1000 if (q_vector->rx.ring)
1001 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
1002
1003 adapter->q_vector[v_idx] = NULL;
1004 netif_napi_del(&q_vector->napi);
1005
1006 /*
1007 * ixgbe_get_stats64() might access the rings on this vector,
1008 * we must wait a grace period before freeing it.
1009 */
1010 kfree_rcu(q_vector, rcu);
1011}
1012
1013/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001014 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1015 * @adapter: board private structure to initialize
1016 *
1017 * This function frees the memory allocated to the q_vectors. In addition if
1018 * NAPI is enabled it will delete any references to the NAPI struct prior
1019 * to freeing the q_vector.
1020 **/
1021static void igb_free_q_vectors(struct igb_adapter *adapter)
1022{
Alexander Duyck5536d212012-09-25 00:31:17 +00001023 int v_idx = adapter->num_q_vectors;
Alexander Duyck047e0032009-10-27 15:49:27 +00001024
Alexander Duyck5536d212012-09-25 00:31:17 +00001025 adapter->num_tx_queues = 0;
1026 adapter->num_rx_queues = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00001027 adapter->num_q_vectors = 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001028
1029 while (v_idx--)
1030 igb_free_q_vector(adapter, v_idx);
Alexander Duyck047e0032009-10-27 15:49:27 +00001031}
1032
1033/**
1034 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1035 *
1036 * This function resets the device so that it has 0 rx queues, tx queues, and
1037 * MSI-X interrupts allocated.
1038 */
1039static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1040{
Alexander Duyck047e0032009-10-27 15:49:27 +00001041 igb_free_q_vectors(adapter);
1042 igb_reset_interrupt_capability(adapter);
1043}
Auke Kok9d5c8242008-01-24 02:22:38 -08001044
1045/**
1046 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1047 *
1048 * Attempt to configure interrupts using the best available
1049 * capabilities of the hardware and kernel.
1050 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001051static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
Auke Kok9d5c8242008-01-24 02:22:38 -08001052{
1053 int err;
1054 int numvecs, i;
1055
Stefan Assmann53c7d062012-12-04 06:00:12 +00001056 if (!msix)
1057 goto msi_only;
1058
Alexander Duyck83b71802009-02-06 23:15:45 +00001059 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001060 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001061 if (adapter->vfs_allocated_count)
1062 adapter->num_tx_queues = 1;
1063 else
1064 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001065
Alexander Duyck047e0032009-10-27 15:49:27 +00001066 /* start with one vector for every rx queue */
1067 numvecs = adapter->num_rx_queues;
1068
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001069 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001070 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1071 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001072
1073 /* store the number of vectors reserved for queues */
1074 adapter->num_q_vectors = numvecs;
1075
1076 /* add 1 vector for link status interrupts */
1077 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001078 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1079 GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001080
Auke Kok9d5c8242008-01-24 02:22:38 -08001081 if (!adapter->msix_entries)
1082 goto msi_only;
1083
1084 for (i = 0; i < numvecs; i++)
1085 adapter->msix_entries[i].entry = i;
1086
1087 err = pci_enable_msix(adapter->pdev,
1088 adapter->msix_entries,
1089 numvecs);
1090 if (err == 0)
Alexander Duyck0c2cc022012-09-25 00:31:22 +00001091 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08001092
1093 igb_reset_interrupt_capability(adapter);
1094
1095 /* If we can't do MSI-X, try MSI */
1096msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001097#ifdef CONFIG_PCI_IOV
1098 /* disable SR-IOV for non MSI-X configurations */
1099 if (adapter->vf_data) {
1100 struct e1000_hw *hw = &adapter->hw;
1101 /* disable iov and allow time for transactions to clear */
1102 pci_disable_sriov(adapter->pdev);
1103 msleep(500);
1104
1105 kfree(adapter->vf_data);
1106 adapter->vf_data = NULL;
1107 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001108 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001109 msleep(100);
1110 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1111 }
1112#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001113 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001114 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001115 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001116 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001117 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001118 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001119 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001120 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001121}
1122
Alexander Duyck5536d212012-09-25 00:31:17 +00001123static void igb_add_ring(struct igb_ring *ring,
1124 struct igb_ring_container *head)
1125{
1126 head->ring = ring;
1127 head->count++;
1128}
1129
1130/**
1131 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1132 * @adapter: board private structure to initialize
1133 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1134 * @v_idx: index of vector in adapter struct
1135 * @txr_count: total number of Tx rings to allocate
1136 * @txr_idx: index of first Tx ring to allocate
1137 * @rxr_count: total number of Rx rings to allocate
1138 * @rxr_idx: index of first Rx ring to allocate
1139 *
1140 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1141 **/
1142static int igb_alloc_q_vector(struct igb_adapter *adapter,
1143 int v_count, int v_idx,
1144 int txr_count, int txr_idx,
1145 int rxr_count, int rxr_idx)
1146{
1147 struct igb_q_vector *q_vector;
1148 struct igb_ring *ring;
1149 int ring_count, size;
1150
1151 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1152 if (txr_count > 1 || rxr_count > 1)
1153 return -ENOMEM;
1154
1155 ring_count = txr_count + rxr_count;
1156 size = sizeof(struct igb_q_vector) +
1157 (sizeof(struct igb_ring) * ring_count);
1158
1159 /* allocate q_vector and rings */
1160 q_vector = kzalloc(size, GFP_KERNEL);
1161 if (!q_vector)
1162 return -ENOMEM;
1163
1164 /* initialize NAPI */
1165 netif_napi_add(adapter->netdev, &q_vector->napi,
1166 igb_poll, 64);
1167
1168 /* tie q_vector and adapter together */
1169 adapter->q_vector[v_idx] = q_vector;
1170 q_vector->adapter = adapter;
1171
1172 /* initialize work limits */
1173 q_vector->tx.work_limit = adapter->tx_work_limit;
1174
1175 /* initialize ITR configuration */
1176 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1177 q_vector->itr_val = IGB_START_ITR;
1178
1179 /* initialize pointer to rings */
1180 ring = q_vector->ring;
1181
1182 if (txr_count) {
1183 /* assign generic ring traits */
1184 ring->dev = &adapter->pdev->dev;
1185 ring->netdev = adapter->netdev;
1186
1187 /* configure backlink on ring */
1188 ring->q_vector = q_vector;
1189
1190 /* update q_vector Tx values */
1191 igb_add_ring(ring, &q_vector->tx);
1192
1193 /* For 82575, context index must be unique per ring. */
1194 if (adapter->hw.mac.type == e1000_82575)
1195 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1196
1197 /* apply Tx specific ring traits */
1198 ring->count = adapter->tx_ring_count;
1199 ring->queue_index = txr_idx;
1200
1201 /* assign ring to adapter */
1202 adapter->tx_ring[txr_idx] = ring;
1203
1204 /* push pointer to next ring */
1205 ring++;
1206 }
1207
1208 if (rxr_count) {
1209 /* assign generic ring traits */
1210 ring->dev = &adapter->pdev->dev;
1211 ring->netdev = adapter->netdev;
1212
1213 /* configure backlink on ring */
1214 ring->q_vector = q_vector;
1215
1216 /* update q_vector Rx values */
1217 igb_add_ring(ring, &q_vector->rx);
1218
1219 /* set flag indicating ring supports SCTP checksum offload */
1220 if (adapter->hw.mac.type >= e1000_82576)
1221 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1222
1223 /*
1224 * On i350, i210, and i211, loopback VLAN packets
1225 * have the tag byte-swapped.
1226 * */
1227 if (adapter->hw.mac.type >= e1000_i350)
1228 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1229
1230 /* apply Rx specific ring traits */
1231 ring->count = adapter->rx_ring_count;
1232 ring->queue_index = rxr_idx;
1233
1234 /* assign ring to adapter */
1235 adapter->rx_ring[rxr_idx] = ring;
1236 }
1237
1238 return 0;
1239}
1240
1241
Auke Kok9d5c8242008-01-24 02:22:38 -08001242/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001243 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1244 * @adapter: board private structure to initialize
1245 *
1246 * We allocate one q_vector per queue interrupt. If allocation fails we
1247 * return -ENOMEM.
1248 **/
1249static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1250{
Alexander Duyck5536d212012-09-25 00:31:17 +00001251 int q_vectors = adapter->num_q_vectors;
1252 int rxr_remaining = adapter->num_rx_queues;
1253 int txr_remaining = adapter->num_tx_queues;
1254 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1255 int err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001256
Alexander Duyck5536d212012-09-25 00:31:17 +00001257 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1258 for (; rxr_remaining; v_idx++) {
1259 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1260 0, 0, 1, rxr_idx);
1261
1262 if (err)
1263 goto err_out;
1264
1265 /* update counts and index */
1266 rxr_remaining--;
1267 rxr_idx++;
1268 }
1269 }
1270
1271 for (; v_idx < q_vectors; v_idx++) {
1272 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1273 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1274 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1275 tqpv, txr_idx, rqpv, rxr_idx);
1276
1277 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001278 goto err_out;
Alexander Duyck5536d212012-09-25 00:31:17 +00001279
1280 /* update counts and index */
1281 rxr_remaining -= rqpv;
1282 txr_remaining -= tqpv;
1283 rxr_idx++;
1284 txr_idx++;
Alexander Duyck047e0032009-10-27 15:49:27 +00001285 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001286
Alexander Duyck047e0032009-10-27 15:49:27 +00001287 return 0;
1288
1289err_out:
Alexander Duyck5536d212012-09-25 00:31:17 +00001290 adapter->num_tx_queues = 0;
1291 adapter->num_rx_queues = 0;
1292 adapter->num_q_vectors = 0;
1293
1294 while (v_idx--)
1295 igb_free_q_vector(adapter, v_idx);
1296
Alexander Duyck047e0032009-10-27 15:49:27 +00001297 return -ENOMEM;
1298}
1299
Alexander Duyck047e0032009-10-27 15:49:27 +00001300/**
1301 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1302 *
1303 * This function initializes the interrupts and allocates all of the queues.
1304 **/
Stefan Assmann53c7d062012-12-04 06:00:12 +00001305static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
Alexander Duyck047e0032009-10-27 15:49:27 +00001306{
1307 struct pci_dev *pdev = adapter->pdev;
1308 int err;
1309
Stefan Assmann53c7d062012-12-04 06:00:12 +00001310 igb_set_interrupt_capability(adapter, msix);
Alexander Duyck047e0032009-10-27 15:49:27 +00001311
1312 err = igb_alloc_q_vectors(adapter);
1313 if (err) {
1314 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1315 goto err_alloc_q_vectors;
1316 }
1317
Alexander Duyck5536d212012-09-25 00:31:17 +00001318 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001319
1320 return 0;
Alexander Duyck5536d212012-09-25 00:31:17 +00001321
Alexander Duyck047e0032009-10-27 15:49:27 +00001322err_alloc_q_vectors:
1323 igb_reset_interrupt_capability(adapter);
1324 return err;
1325}
1326
1327/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001328 * igb_request_irq - initialize interrupts
1329 *
1330 * Attempts to configure interrupts using the best available
1331 * capabilities of the hardware and kernel.
1332 **/
1333static int igb_request_irq(struct igb_adapter *adapter)
1334{
1335 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001336 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001337 int err = 0;
1338
1339 if (adapter->msix_entries) {
1340 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001341 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001342 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001343 /* fall back to MSI */
Alexander Duyck5536d212012-09-25 00:31:17 +00001344 igb_free_all_tx_resources(adapter);
1345 igb_free_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001346
Alexander Duyck047e0032009-10-27 15:49:27 +00001347 igb_clear_interrupt_scheme(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001348 err = igb_init_interrupt_scheme(adapter, false);
1349 if (err)
Alexander Duyck047e0032009-10-27 15:49:27 +00001350 goto request_done;
Stefan Assmann53c7d062012-12-04 06:00:12 +00001351
Alexander Duyck047e0032009-10-27 15:49:27 +00001352 igb_setup_all_tx_resources(adapter);
1353 igb_setup_all_rx_resources(adapter);
Stefan Assmann53c7d062012-12-04 06:00:12 +00001354 igb_configure(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001355 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001356
Alexander Duyckc74d5882011-08-26 07:46:45 +00001357 igb_assign_vector(adapter->q_vector[0], 0);
1358
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001359 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001360 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001361 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001362 if (!err)
1363 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001364
Auke Kok9d5c8242008-01-24 02:22:38 -08001365 /* fall back to legacy interrupts */
1366 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001367 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001368 }
1369
Alexander Duyckc74d5882011-08-26 07:46:45 +00001370 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001371 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001372
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001373 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001374 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001375 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001376
1377request_done:
1378 return err;
1379}
1380
1381static void igb_free_irq(struct igb_adapter *adapter)
1382{
Auke Kok9d5c8242008-01-24 02:22:38 -08001383 if (adapter->msix_entries) {
1384 int vector = 0, i;
1385
Alexander Duyck047e0032009-10-27 15:49:27 +00001386 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001387
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001388 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001389 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001390 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001391 } else {
1392 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001393 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001394}
1395
1396/**
1397 * igb_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure
1399 **/
1400static void igb_irq_disable(struct igb_adapter *adapter)
1401{
1402 struct e1000_hw *hw = &adapter->hw;
1403
Alexander Duyck25568a52009-10-27 23:49:59 +00001404 /*
1405 * we need to be careful when disabling interrupts. The VFs are also
1406 * mapped into these registers and so clearing the bits can cause
1407 * issues on the VF drivers so we only need to clear what we set
1408 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001409 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001410 u32 regval = rd32(E1000_EIAM);
1411 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1412 wr32(E1000_EIMC, adapter->eims_enable_mask);
1413 regval = rd32(E1000_EIAC);
1414 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001415 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001416
1417 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001418 wr32(E1000_IMC, ~0);
1419 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001420 if (adapter->msix_entries) {
1421 int i;
1422 for (i = 0; i < adapter->num_q_vectors; i++)
1423 synchronize_irq(adapter->msix_entries[i].vector);
1424 } else {
1425 synchronize_irq(adapter->pdev->irq);
1426 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001427}
1428
1429/**
1430 * igb_irq_enable - Enable default interrupt generation settings
1431 * @adapter: board private structure
1432 **/
1433static void igb_irq_enable(struct igb_adapter *adapter)
1434{
1435 struct e1000_hw *hw = &adapter->hw;
1436
1437 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001438 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001439 u32 regval = rd32(E1000_EIAC);
1440 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1441 regval = rd32(E1000_EIAM);
1442 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001443 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001444 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001445 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001446 ims |= E1000_IMS_VMMB;
1447 }
1448 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001449 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001450 wr32(E1000_IMS, IMS_ENABLE_MASK |
1451 E1000_IMS_DRSTA);
1452 wr32(E1000_IAM, IMS_ENABLE_MASK |
1453 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001454 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001455}
1456
1457static void igb_update_mng_vlan(struct igb_adapter *adapter)
1458{
Alexander Duyck51466232009-10-27 23:47:35 +00001459 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001460 u16 vid = adapter->hw.mng_cookie.vlan_id;
1461 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001462
Alexander Duyck51466232009-10-27 23:47:35 +00001463 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1464 /* add VID to filter table */
1465 igb_vfta_set(hw, vid, true);
1466 adapter->mng_vlan_id = vid;
1467 } else {
1468 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1469 }
1470
1471 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1472 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001473 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001474 /* remove VID from filter table */
1475 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001476 }
1477}
1478
1479/**
1480 * igb_release_hw_control - release control of the h/w to f/w
1481 * @adapter: address of board private structure
1482 *
1483 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1484 * For ASF and Pass Through versions of f/w this means that the
1485 * driver is no longer loaded.
1486 *
1487 **/
1488static void igb_release_hw_control(struct igb_adapter *adapter)
1489{
1490 struct e1000_hw *hw = &adapter->hw;
1491 u32 ctrl_ext;
1492
1493 /* Let firmware take over control of h/w */
1494 ctrl_ext = rd32(E1000_CTRL_EXT);
1495 wr32(E1000_CTRL_EXT,
1496 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1497}
1498
Auke Kok9d5c8242008-01-24 02:22:38 -08001499/**
1500 * igb_get_hw_control - get control of the h/w from f/w
1501 * @adapter: address of board private structure
1502 *
1503 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1504 * For ASF and Pass Through versions of f/w this means that
1505 * the driver is loaded.
1506 *
1507 **/
1508static void igb_get_hw_control(struct igb_adapter *adapter)
1509{
1510 struct e1000_hw *hw = &adapter->hw;
1511 u32 ctrl_ext;
1512
1513 /* Let firmware know the driver has taken over */
1514 ctrl_ext = rd32(E1000_CTRL_EXT);
1515 wr32(E1000_CTRL_EXT,
1516 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1517}
1518
Auke Kok9d5c8242008-01-24 02:22:38 -08001519/**
1520 * igb_configure - configure the hardware for RX and TX
1521 * @adapter: private board structure
1522 **/
1523static void igb_configure(struct igb_adapter *adapter)
1524{
1525 struct net_device *netdev = adapter->netdev;
1526 int i;
1527
1528 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001529 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001530
1531 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001532
Alexander Duyck85b430b2009-10-27 15:50:29 +00001533 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001534 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001535 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001536
1537 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001538 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001539
1540 igb_rx_fifo_flush_82575(&adapter->hw);
1541
Alexander Duyckc493ea42009-03-20 00:16:50 +00001542 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001543 * at least 1 descriptor unused to make sure
1544 * next_to_use != next_to_clean */
1545 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001546 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001547 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001548 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001549}
1550
Nick Nunley88a268c2010-02-17 01:01:59 +00001551/**
1552 * igb_power_up_link - Power up the phy/serdes link
1553 * @adapter: address of board private structure
1554 **/
1555void igb_power_up_link(struct igb_adapter *adapter)
1556{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001557 igb_reset_phy(&adapter->hw);
1558
Nick Nunley88a268c2010-02-17 01:01:59 +00001559 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1560 igb_power_up_phy_copper(&adapter->hw);
1561 else
1562 igb_power_up_serdes_link_82575(&adapter->hw);
1563}
1564
1565/**
1566 * igb_power_down_link - Power down the phy/serdes link
1567 * @adapter: address of board private structure
1568 */
1569static void igb_power_down_link(struct igb_adapter *adapter)
1570{
1571 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1572 igb_power_down_phy_copper_82575(&adapter->hw);
1573 else
1574 igb_shutdown_serdes_link_82575(&adapter->hw);
1575}
Auke Kok9d5c8242008-01-24 02:22:38 -08001576
1577/**
1578 * igb_up - Open the interface and prepare it to handle traffic
1579 * @adapter: board private structure
1580 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001581int igb_up(struct igb_adapter *adapter)
1582{
1583 struct e1000_hw *hw = &adapter->hw;
1584 int i;
1585
1586 /* hardware has been reset, we need to reload some things */
1587 igb_configure(adapter);
1588
1589 clear_bit(__IGB_DOWN, &adapter->state);
1590
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001591 for (i = 0; i < adapter->num_q_vectors; i++)
1592 napi_enable(&(adapter->q_vector[i]->napi));
1593
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001594 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001595 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001596 else
1597 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001598
1599 /* Clear any pending interrupts. */
1600 rd32(E1000_ICR);
1601 igb_irq_enable(adapter);
1602
Alexander Duyckd4960302009-10-27 15:53:45 +00001603 /* notify VFs that reset has been completed */
1604 if (adapter->vfs_allocated_count) {
1605 u32 reg_data = rd32(E1000_CTRL_EXT);
1606 reg_data |= E1000_CTRL_EXT_PFRSTD;
1607 wr32(E1000_CTRL_EXT, reg_data);
1608 }
1609
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001610 netif_tx_start_all_queues(adapter->netdev);
1611
Alexander Duyck25568a52009-10-27 23:49:59 +00001612 /* start the watchdog. */
1613 hw->mac.get_link_status = 1;
1614 schedule_work(&adapter->watchdog_task);
1615
Auke Kok9d5c8242008-01-24 02:22:38 -08001616 return 0;
1617}
1618
1619void igb_down(struct igb_adapter *adapter)
1620{
Auke Kok9d5c8242008-01-24 02:22:38 -08001621 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001622 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001623 u32 tctl, rctl;
1624 int i;
1625
1626 /* signal that we're down so the interrupt handler does not
1627 * reschedule our watchdog timer */
1628 set_bit(__IGB_DOWN, &adapter->state);
1629
1630 /* disable receives in the hardware */
1631 rctl = rd32(E1000_RCTL);
1632 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1633 /* flush and sleep below */
1634
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001635 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001636
1637 /* disable transmits in the hardware */
1638 tctl = rd32(E1000_TCTL);
1639 tctl &= ~E1000_TCTL_EN;
1640 wr32(E1000_TCTL, tctl);
1641 /* flush both disables and wait for them to finish */
1642 wrfl();
1643 msleep(10);
1644
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001645 for (i = 0; i < adapter->num_q_vectors; i++)
1646 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001647
Auke Kok9d5c8242008-01-24 02:22:38 -08001648 igb_irq_disable(adapter);
1649
1650 del_timer_sync(&adapter->watchdog_timer);
1651 del_timer_sync(&adapter->phy_info_timer);
1652
Auke Kok9d5c8242008-01-24 02:22:38 -08001653 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001654
1655 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001656 spin_lock(&adapter->stats64_lock);
1657 igb_update_stats(adapter, &adapter->stats64);
1658 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001659
Auke Kok9d5c8242008-01-24 02:22:38 -08001660 adapter->link_speed = 0;
1661 adapter->link_duplex = 0;
1662
Jeff Kirsher30236822008-06-24 17:01:15 -07001663 if (!pci_channel_offline(adapter->pdev))
1664 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001665 igb_clean_all_tx_rings(adapter);
1666 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001667#ifdef CONFIG_IGB_DCA
1668
1669 /* since we reset the hardware DCA settings were cleared */
1670 igb_setup_dca(adapter);
1671#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001672}
1673
1674void igb_reinit_locked(struct igb_adapter *adapter)
1675{
1676 WARN_ON(in_interrupt());
1677 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1678 msleep(1);
1679 igb_down(adapter);
1680 igb_up(adapter);
1681 clear_bit(__IGB_RESETTING, &adapter->state);
1682}
1683
1684void igb_reset(struct igb_adapter *adapter)
1685{
Alexander Duyck090b1792009-10-27 23:51:55 +00001686 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001687 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001688 struct e1000_mac_info *mac = &hw->mac;
1689 struct e1000_fc_info *fc = &hw->fc;
Matthew Vickd48507f2012-11-08 04:03:58 +00001690 u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
Auke Kok9d5c8242008-01-24 02:22:38 -08001691
1692 /* Repartition Pba for greater than 9k mtu
1693 * To take effect CTRL.RST is required.
1694 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001695 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001696 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001697 case e1000_82580:
1698 pba = rd32(E1000_RXPBS);
1699 pba = igb_rxpbs_adjust_82580(pba);
1700 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001701 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001702 pba = rd32(E1000_RXPBS);
1703 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001704 break;
1705 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001706 case e1000_i210:
1707 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001708 default:
1709 pba = E1000_PBA_34K;
1710 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001711 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001712
Alexander Duyck2d064c02008-07-08 15:10:12 -07001713 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1714 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001715 /* adjust PBA for jumbo frames */
1716 wr32(E1000_PBA, pba);
1717
1718 /* To maintain wire speed transmits, the Tx FIFO should be
1719 * large enough to accommodate two full transmit packets,
1720 * rounded up to the next 1KB and expressed in KB. Likewise,
1721 * the Rx FIFO should be large enough to accommodate at least
1722 * one full receive packet and is similarly rounded up and
1723 * expressed in KB. */
1724 pba = rd32(E1000_PBA);
1725 /* upper 16 bits has Tx packet buffer allocation size in KB */
1726 tx_space = pba >> 16;
1727 /* lower 16 bits has Rx packet buffer allocation size in KB */
1728 pba &= 0xffff;
1729 /* the tx fifo also stores 16 bytes of information about the tx
1730 * but don't include ethernet FCS because hardware appends it */
1731 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001732 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001733 ETH_FCS_LEN) * 2;
1734 min_tx_space = ALIGN(min_tx_space, 1024);
1735 min_tx_space >>= 10;
1736 /* software strips receive CRC, so leave room for it */
1737 min_rx_space = adapter->max_frame_size;
1738 min_rx_space = ALIGN(min_rx_space, 1024);
1739 min_rx_space >>= 10;
1740
1741 /* If current Tx allocation is less than the min Tx FIFO size,
1742 * and the min Tx FIFO size is less than the current Rx FIFO
1743 * allocation, take space away from current Rx allocation */
1744 if (tx_space < min_tx_space &&
1745 ((min_tx_space - tx_space) < pba)) {
1746 pba = pba - (min_tx_space - tx_space);
1747
1748 /* if short on rx space, rx wins and must trump tx
1749 * adjustment */
1750 if (pba < min_rx_space)
1751 pba = min_rx_space;
1752 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001753 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001754 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001755
1756 /* flow control settings */
1757 /* The high water mark must be low enough to fit one full frame
1758 * (or the size used for early receive) above it in the Rx FIFO.
1759 * Set it to the lower of:
1760 * - 90% of the Rx FIFO size, or
1761 * - the full Rx FIFO size minus one full frame */
1762 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001763 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001764
Matthew Vickd48507f2012-11-08 04:03:58 +00001765 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
Alexander Duyckd405ea32009-12-23 13:21:27 +00001766 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001767 fc->pause_time = 0xFFFF;
1768 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001769 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001770
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001771 /* disable receive for all VFs and wait one second */
1772 if (adapter->vfs_allocated_count) {
1773 int i;
1774 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001775 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001776
1777 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001778 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001779
1780 /* disable transmits and receives */
1781 wr32(E1000_VFRE, 0);
1782 wr32(E1000_VFTE, 0);
1783 }
1784
Auke Kok9d5c8242008-01-24 02:22:38 -08001785 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001786 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001787 wr32(E1000_WUC, 0);
1788
Alexander Duyck330a6d62009-10-27 23:51:35 +00001789 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001790 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001791
Matthew Vicka27416b2012-04-18 02:57:44 +00001792 /*
1793 * Flow control settings reset on hardware reset, so guarantee flow
1794 * control is off when forcing speed.
1795 */
1796 if (!hw->mac.autoneg)
1797 igb_force_mac_fc(hw);
1798
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001799 igb_init_dmac(adapter, pba);
Carolyn Wybornye4288932012-12-07 03:01:42 +00001800#ifdef CONFIG_IGB_HWMON
1801 /* Re-initialize the thermal sensor on i350 devices. */
1802 if (!test_bit(__IGB_DOWN, &adapter->state)) {
1803 if (mac->type == e1000_i350 && hw->bus.func == 0) {
1804 /* If present, re-initialize the external thermal sensor
1805 * interface.
1806 */
1807 if (adapter->ets)
1808 mac->ops.init_thermal_sensor_thresh(hw);
1809 }
1810 }
1811#endif
Nick Nunley88a268c2010-02-17 01:01:59 +00001812 if (!netif_running(adapter->netdev))
1813 igb_power_down_link(adapter);
1814
Auke Kok9d5c8242008-01-24 02:22:38 -08001815 igb_update_mng_vlan(adapter);
1816
1817 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1818 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1819
Matthew Vick1f6e8172012-08-18 07:26:33 +00001820 /* Re-enable PTP, where applicable. */
1821 igb_ptp_reset(adapter);
Matthew Vick1f6e8172012-08-18 07:26:33 +00001822
Alexander Duyck330a6d62009-10-27 23:51:35 +00001823 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001824}
1825
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001826static netdev_features_t igb_fix_features(struct net_device *netdev,
1827 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001828{
1829 /*
1830 * Since there is no support for separate rx/tx vlan accel
1831 * enable/disable make sure tx flag is always in same state as rx.
1832 */
1833 if (features & NETIF_F_HW_VLAN_RX)
1834 features |= NETIF_F_HW_VLAN_TX;
1835 else
1836 features &= ~NETIF_F_HW_VLAN_TX;
1837
1838 return features;
1839}
1840
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001841static int igb_set_features(struct net_device *netdev,
1842 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001843{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001844 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001845 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001846
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001847 if (changed & NETIF_F_HW_VLAN_RX)
1848 igb_vlan_mode(netdev, features);
1849
Ben Greear89eaefb2012-03-06 09:41:58 +00001850 if (!(changed & NETIF_F_RXALL))
1851 return 0;
1852
1853 netdev->features = features;
1854
1855 if (netif_running(netdev))
1856 igb_reinit_locked(adapter);
1857 else
1858 igb_reset(adapter);
1859
Michał Mirosławac52caa2011-06-08 08:38:01 +00001860 return 0;
1861}
1862
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001863static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001864 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001865 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001866 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001867 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001868 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001869 .ndo_set_mac_address = igb_set_mac,
1870 .ndo_change_mtu = igb_change_mtu,
1871 .ndo_do_ioctl = igb_ioctl,
1872 .ndo_tx_timeout = igb_tx_timeout,
1873 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001874 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1875 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001876 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1877 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1878 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1879 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001880#ifdef CONFIG_NET_POLL_CONTROLLER
1881 .ndo_poll_controller = igb_netpoll,
1882#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001883 .ndo_fix_features = igb_fix_features,
1884 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001885};
1886
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001887/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001888 * igb_set_fw_version - Configure version string for ethtool
1889 * @adapter: adapter struct
1890 *
1891 **/
1892void igb_set_fw_version(struct igb_adapter *adapter)
1893{
1894 struct e1000_hw *hw = &adapter->hw;
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001895 struct e1000_fw_version fw;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001896
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001897 igb_get_fw_version(hw, &fw);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001898
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001899 switch (hw->mac.type) {
1900 case e1000_i211:
1901 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1902 "%2d.%2d-%d",
1903 fw.invm_major, fw.invm_minor, fw.invm_img_type);
1904 break;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001905
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001906 default:
1907 /* if option is rom valid, display its version too */
1908 if (fw.or_valid) {
1909 snprintf(adapter->fw_version,
1910 sizeof(adapter->fw_version),
1911 "%d.%d, 0x%08x, %d.%d.%d",
1912 fw.eep_major, fw.eep_minor, fw.etrack_id,
1913 fw.or_major, fw.or_build, fw.or_patch);
1914 /* no option rom */
1915 } else {
1916 snprintf(adapter->fw_version,
1917 sizeof(adapter->fw_version),
1918 "%d.%d, 0x%08x",
1919 fw.eep_major, fw.eep_minor, fw.etrack_id);
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001920 }
Carolyn Wyborny0b1a6f22012-10-18 07:16:19 +00001921 break;
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001922 }
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001923 return;
1924}
1925
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00001926/* igb_init_i2c - Init I2C interface
1927 * @adapter: pointer to adapter structure
1928 *
1929 */
1930static s32 igb_init_i2c(struct igb_adapter *adapter)
1931{
1932 s32 status = E1000_SUCCESS;
1933
1934 /* I2C interface supported on i350 devices */
1935 if (adapter->hw.mac.type != e1000_i350)
1936 return E1000_SUCCESS;
1937
1938 /* Initialize the i2c bus which is controlled by the registers.
1939 * This bus will use the i2c_algo_bit structue that implements
1940 * the protocol through toggling of the 4 bits in the register.
1941 */
1942 adapter->i2c_adap.owner = THIS_MODULE;
1943 adapter->i2c_algo = igb_i2c_algo;
1944 adapter->i2c_algo.data = adapter;
1945 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
1946 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
1947 strlcpy(adapter->i2c_adap.name, "igb BB",
1948 sizeof(adapter->i2c_adap.name));
1949 status = i2c_bit_add_bus(&adapter->i2c_adap);
1950 return status;
1951}
1952
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001953/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001954 * igb_probe - Device Initialization Routine
1955 * @pdev: PCI device information struct
1956 * @ent: entry in igb_pci_tbl
1957 *
1958 * Returns 0 on success, negative on failure
1959 *
1960 * igb_probe initializes an adapter identified by a pci_dev structure.
1961 * The OS initialization, configuring of the adapter private structure,
1962 * and a hardware reset occur.
1963 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00001964static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Auke Kok9d5c8242008-01-24 02:22:38 -08001965{
1966 struct net_device *netdev;
1967 struct igb_adapter *adapter;
1968 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001969 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001970 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001971 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001972 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1973 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001974 int err, pci_using_dac;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001975 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001976
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001977 /* Catch broken hardware that put the wrong VF device ID in
1978 * the PCIe SR-IOV capability.
1979 */
1980 if (pdev->is_virtfn) {
1981 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001982 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001983 return -EINVAL;
1984 }
1985
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001986 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001987 if (err)
1988 return err;
1989
1990 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001991 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001992 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001993 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001994 if (!err)
1995 pci_using_dac = 1;
1996 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001997 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001998 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001999 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08002000 if (err) {
2001 dev_err(&pdev->dev, "No usable DMA "
2002 "configuration, aborting\n");
2003 goto err_dma;
2004 }
2005 }
2006 }
2007
Alexander Duyckaed5dec2009-02-06 23:16:04 +00002008 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2009 IORESOURCE_MEM),
2010 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 if (err)
2012 goto err_pci_reg;
2013
Frans Pop19d5afd2009-10-02 10:04:12 -07002014 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002015
Auke Kok9d5c8242008-01-24 02:22:38 -08002016 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07002017 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002018
2019 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08002020 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00002021 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08002022 if (!netdev)
2023 goto err_alloc_etherdev;
2024
2025 SET_NETDEV_DEV(netdev, &pdev->dev);
2026
2027 pci_set_drvdata(pdev, netdev);
2028 adapter = netdev_priv(netdev);
2029 adapter->netdev = netdev;
2030 adapter->pdev = pdev;
2031 hw = &adapter->hw;
2032 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00002033 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08002034
2035 mmio_start = pci_resource_start(pdev, 0);
2036 mmio_len = pci_resource_len(pdev, 0);
2037
2038 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00002039 hw->hw_addr = ioremap(mmio_start, mmio_len);
2040 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08002041 goto err_ioremap;
2042
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08002043 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08002044 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002045 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08002046
2047 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2048
2049 netdev->mem_start = mmio_start;
2050 netdev->mem_end = mmio_start + mmio_len;
2051
Auke Kok9d5c8242008-01-24 02:22:38 -08002052 /* PCI config space info */
2053 hw->vendor_id = pdev->vendor;
2054 hw->device_id = pdev->device;
2055 hw->revision_id = pdev->revision;
2056 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2057 hw->subsystem_device_id = pdev->subsystem_device;
2058
Auke Kok9d5c8242008-01-24 02:22:38 -08002059 /* Copy the default MAC, PHY and NVM function pointers */
2060 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2061 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2062 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2063 /* Initialize skew-specific constants */
2064 err = ei->get_invariants(hw);
2065 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00002066 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08002067
Alexander Duyck450c87c2009-02-06 23:22:11 +00002068 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08002069 err = igb_sw_init(adapter);
2070 if (err)
2071 goto err_sw_init;
2072
2073 igb_get_bus_info_pcie(hw);
2074
2075 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002076
2077 /* Copper options */
2078 if (hw->phy.media_type == e1000_media_type_copper) {
2079 hw->phy.mdix = AUTO_ALL_MODES;
2080 hw->phy.disable_polarity_correction = false;
2081 hw->phy.ms_type = e1000_ms_hw_default;
2082 }
2083
2084 if (igb_check_reset_block(hw))
2085 dev_info(&pdev->dev,
2086 "PHY reset is blocked due to SOL/IDER session.\n");
2087
Alexander Duyck077887c2011-08-26 07:46:29 +00002088 /*
2089 * features is initialized to 0 in allocation, it might have bits
2090 * set by igb_sw_init so we should use an or instead of an
2091 * assignment.
2092 */
2093 netdev->features |= NETIF_F_SG |
2094 NETIF_F_IP_CSUM |
2095 NETIF_F_IPV6_CSUM |
2096 NETIF_F_TSO |
2097 NETIF_F_TSO6 |
2098 NETIF_F_RXHASH |
2099 NETIF_F_RXCSUM |
2100 NETIF_F_HW_VLAN_RX |
2101 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002102
Alexander Duyck077887c2011-08-26 07:46:29 +00002103 /* copy netdev features into list of user selectable features */
2104 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00002105 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08002106
Alexander Duyck077887c2011-08-26 07:46:29 +00002107 /* set this bit last since it cannot be part of hw_features */
2108 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2109
2110 netdev->vlan_features |= NETIF_F_TSO |
2111 NETIF_F_TSO6 |
2112 NETIF_F_IP_CSUM |
2113 NETIF_F_IPV6_CSUM |
2114 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002115
Ben Greear6b8f0922012-03-06 09:41:53 +00002116 netdev->priv_flags |= IFF_SUPP_NOFCS;
2117
Yi Zou7b872a52010-09-22 17:57:58 +00002118 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002120 netdev->vlan_features |= NETIF_F_HIGHDMA;
2121 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002122
Michał Mirosławac52caa2011-06-08 08:38:01 +00002123 if (hw->mac.type >= e1000_82576) {
2124 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002125 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002126 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002127
Jiri Pirko01789342011-08-16 06:29:00 +00002128 netdev->priv_flags |= IFF_UNICAST_FLT;
2129
Alexander Duyck330a6d62009-10-27 23:51:35 +00002130 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002131
2132 /* before reading the NVM, reset the controller to put the device in a
2133 * known good starting state */
2134 hw->mac.ops.reset_hw(hw);
2135
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002136 /*
2137 * make sure the NVM is good , i211 parts have special NVM that
2138 * doesn't contain a checksum
2139 */
2140 if (hw->mac.type != e1000_i211) {
2141 if (hw->nvm.ops.validate(hw) < 0) {
2142 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2143 err = -EIO;
2144 goto err_eeprom;
2145 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002146 }
2147
2148 /* copy the MAC address out of the NVM */
2149 if (hw->mac.ops.read_mac_addr(hw))
2150 dev_err(&pdev->dev, "NVM Read Error\n");
2151
2152 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08002153
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00002154 if (!is_valid_ether_addr(netdev->dev_addr)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002155 dev_err(&pdev->dev, "Invalid MAC Address\n");
2156 err = -EIO;
2157 goto err_eeprom;
2158 }
2159
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002160 /* get firmware version for ethtool -i */
2161 igb_set_fw_version(adapter);
2162
Joe Perchesc061b182010-08-23 18:20:03 +00002163 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002164 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002165 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002166 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002167
2168 INIT_WORK(&adapter->reset_task, igb_reset_task);
2169 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2170
Alexander Duyck450c87c2009-02-06 23:22:11 +00002171 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002172 adapter->fc_autoneg = true;
2173 hw->mac.autoneg = true;
2174 hw->phy.autoneg_advertised = 0x2f;
2175
Alexander Duyck0cce1192009-07-23 18:10:24 +00002176 hw->fc.requested_mode = e1000_fc_default;
2177 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002178
Auke Kok9d5c8242008-01-24 02:22:38 -08002179 igb_validate_mdi_setting(hw);
2180
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002181 /* By default, support wake on port A */
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002182 if (hw->bus.func == 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002183 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2184
2185 /* Check the NVM for wake support on non-port A ports */
2186 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002187 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2188 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2189 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002190 else if (hw->bus.func == 1)
2191 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002192
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002193 if (eeprom_data & IGB_EEPROM_APME)
2194 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002195
2196 /* now that we have the eeprom settings, apply the special cases where
2197 * the eeprom may be wrong or the board simply won't support wake on
2198 * lan on a particular port */
2199 switch (pdev->device) {
2200 case E1000_DEV_ID_82575GB_QUAD_COPPER:
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002201 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002202 break;
2203 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002204 case E1000_DEV_ID_82576_FIBER:
2205 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002206 /* Wake events only supported on port A for dual fiber
2207 * regardless of eeprom setting */
2208 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002209 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002210 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002211 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002212 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002213 /* if quad port adapter, disable WoL on all but port A */
2214 if (global_quad_port_a != 0)
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002215 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002216 else
2217 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2218 /* Reset for multiple quad port adapters */
2219 if (++global_quad_port_a == 4)
2220 global_quad_port_a = 0;
2221 break;
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002222 default:
2223 /* If the device can't wake, don't set software support */
2224 if (!device_can_wakeup(&adapter->pdev->dev))
2225 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
Auke Kok9d5c8242008-01-24 02:22:38 -08002226 }
2227
2228 /* initialize the wol settings based on the eeprom settings */
Matthew Vick63d4a8f2012-11-09 05:49:54 +00002229 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2230 adapter->wol |= E1000_WUFC_MAG;
2231
2232 /* Some vendors want WoL disabled by default, but still supported */
2233 if ((hw->mac.type == e1000_i350) &&
2234 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2235 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2236 adapter->wol = 0;
2237 }
2238
2239 device_set_wakeup_enable(&adapter->pdev->dev,
2240 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
Auke Kok9d5c8242008-01-24 02:22:38 -08002241
2242 /* reset the hardware with the new settings */
2243 igb_reset(adapter);
2244
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002245 /* Init the I2C interface */
2246 err = igb_init_i2c(adapter);
2247 if (err) {
2248 dev_err(&pdev->dev, "failed to init i2c interface\n");
2249 goto err_eeprom;
2250 }
2251
Auke Kok9d5c8242008-01-24 02:22:38 -08002252 /* let the f/w know that the h/w is now under the control of the
2253 * driver. */
2254 igb_get_hw_control(adapter);
2255
Auke Kok9d5c8242008-01-24 02:22:38 -08002256 strcpy(netdev->name, "eth%d");
2257 err = register_netdev(netdev);
2258 if (err)
2259 goto err_register;
2260
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002261 /* carrier off reporting is important to ethtool even BEFORE open */
2262 netif_carrier_off(netdev);
2263
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002264#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002265 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002266 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002267 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002268 igb_setup_dca(adapter);
2269 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002270
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002271#endif
Carolyn Wybornye4288932012-12-07 03:01:42 +00002272#ifdef CONFIG_IGB_HWMON
2273 /* Initialize the thermal sensor on i350 devices. */
2274 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2275 u16 ets_word;
Matthew Vick3c89f6d2012-08-10 05:40:43 +00002276
Carolyn Wybornye4288932012-12-07 03:01:42 +00002277 /*
2278 * Read the NVM to determine if this i350 device supports an
2279 * external thermal sensor.
2280 */
2281 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
2282 if (ets_word != 0x0000 && ets_word != 0xFFFF)
2283 adapter->ets = true;
2284 else
2285 adapter->ets = false;
2286 if (igb_sysfs_init(adapter))
2287 dev_err(&pdev->dev,
2288 "failed to allocate sysfs resources\n");
2289 } else {
2290 adapter->ets = false;
2291 }
2292#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002293 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002294 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00002295
Auke Kok9d5c8242008-01-24 02:22:38 -08002296 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2297 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002298 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002299 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002300 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002301 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002302 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002303 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2304 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2305 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2306 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002307 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002308
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002309 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2310 if (ret_val)
2311 strcpy(part_str, "Unknown");
2312 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002313 dev_info(&pdev->dev,
2314 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2315 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002316 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002317 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002318 switch (hw->mac.type) {
2319 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002320 case e1000_i210:
2321 case e1000_i211:
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002322 igb_set_eee_i350(hw);
2323 break;
2324 default:
2325 break;
2326 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002327
2328 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002329 return 0;
2330
2331err_register:
2332 igb_release_hw_control(adapter);
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002333 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
Auke Kok9d5c8242008-01-24 02:22:38 -08002334err_eeprom:
2335 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002336 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002337
2338 if (hw->flash_address)
2339 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002340err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002341 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002342 iounmap(hw->hw_addr);
2343err_ioremap:
2344 free_netdev(netdev);
2345err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002346 pci_release_selected_regions(pdev,
2347 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002348err_pci_reg:
2349err_dma:
2350 pci_disable_device(pdev);
2351 return err;
2352}
2353
Greg Rosefa44f2f2013-01-17 01:03:06 -08002354#ifdef CONFIG_PCI_IOV
2355static int igb_disable_sriov(struct pci_dev *pdev)
2356{
2357 struct net_device *netdev = pci_get_drvdata(pdev);
2358 struct igb_adapter *adapter = netdev_priv(netdev);
2359 struct e1000_hw *hw = &adapter->hw;
2360
2361 /* reclaim resources allocated to VFs */
2362 if (adapter->vf_data) {
2363 /* disable iov and allow time for transactions to clear */
2364 if (igb_vfs_are_assigned(adapter)) {
2365 dev_warn(&pdev->dev,
2366 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2367 return -EPERM;
2368 } else {
2369 pci_disable_sriov(pdev);
2370 msleep(500);
2371 }
2372
2373 kfree(adapter->vf_data);
2374 adapter->vf_data = NULL;
2375 adapter->vfs_allocated_count = 0;
2376 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2377 wrfl();
2378 msleep(100);
2379 dev_info(&pdev->dev, "IOV Disabled\n");
2380
2381 /* Re-enable DMA Coalescing flag since IOV is turned off */
2382 adapter->flags |= IGB_FLAG_DMAC;
2383 }
2384
2385 return 0;
2386}
2387
2388static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2389{
2390 struct net_device *netdev = pci_get_drvdata(pdev);
2391 struct igb_adapter *adapter = netdev_priv(netdev);
2392 int old_vfs = pci_num_vf(pdev);
2393 int err = 0;
2394 int i;
2395
2396 if (!num_vfs)
2397 goto out;
2398 else if (old_vfs && old_vfs == num_vfs)
2399 goto out;
2400 else if (old_vfs && old_vfs != num_vfs)
2401 err = igb_disable_sriov(pdev);
2402
2403 if (err)
2404 goto out;
2405
2406 if (num_vfs > 7) {
2407 err = -EPERM;
2408 goto out;
2409 }
2410
2411 adapter->vfs_allocated_count = num_vfs;
2412
2413 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2414 sizeof(struct vf_data_storage), GFP_KERNEL);
2415
2416 /* if allocation failed then we do not support SR-IOV */
2417 if (!adapter->vf_data) {
2418 adapter->vfs_allocated_count = 0;
2419 dev_err(&pdev->dev,
2420 "Unable to allocate memory for VF Data Storage\n");
2421 err = -ENOMEM;
2422 goto out;
2423 }
2424
2425 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
2426 if (err)
2427 goto err_out;
2428
2429 dev_info(&pdev->dev, "%d VFs allocated\n",
2430 adapter->vfs_allocated_count);
2431 for (i = 0; i < adapter->vfs_allocated_count; i++)
2432 igb_vf_configure(adapter, i);
2433
2434 /* DMA Coalescing is not supported in IOV mode. */
2435 adapter->flags &= ~IGB_FLAG_DMAC;
2436 goto out;
2437
2438err_out:
2439 kfree(adapter->vf_data);
2440 adapter->vf_data = NULL;
2441 adapter->vfs_allocated_count = 0;
2442out:
2443 return err;
2444}
2445
2446#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002447/*
2448 * igb_remove_i2c - Cleanup I2C interface
2449 * @adapter: pointer to adapter structure
2450 *
2451 */
2452static void igb_remove_i2c(struct igb_adapter *adapter)
2453{
2454
2455 /* free the adapter bus structure */
2456 i2c_del_adapter(&adapter->i2c_adap);
2457}
2458
Auke Kok9d5c8242008-01-24 02:22:38 -08002459/**
2460 * igb_remove - Device Removal Routine
2461 * @pdev: PCI device information struct
2462 *
2463 * igb_remove is called by the PCI subsystem to alert the driver
2464 * that it should release a PCI device. The could be caused by a
2465 * Hot-Plug event, or because the driver is going to be removed from
2466 * memory.
2467 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002468static void igb_remove(struct pci_dev *pdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08002469{
2470 struct net_device *netdev = pci_get_drvdata(pdev);
2471 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002472 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002473
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002474 pm_runtime_get_noresume(&pdev->dev);
Carolyn Wybornye4288932012-12-07 03:01:42 +00002475#ifdef CONFIG_IGB_HWMON
2476 igb_sysfs_exit(adapter);
2477#endif
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00002478 igb_remove_i2c(adapter);
Matthew Vicka79f4f82012-08-10 05:40:44 +00002479 igb_ptp_stop(adapter);
Tejun Heo760141a2010-12-12 16:45:14 +01002480 /*
2481 * The watchdog timer may be rescheduled, so explicitly
2482 * disable watchdog from being rescheduled.
2483 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002484 set_bit(__IGB_DOWN, &adapter->state);
2485 del_timer_sync(&adapter->watchdog_timer);
2486 del_timer_sync(&adapter->phy_info_timer);
2487
Tejun Heo760141a2010-12-12 16:45:14 +01002488 cancel_work_sync(&adapter->reset_task);
2489 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002490
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002491#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002492 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002493 dev_info(&pdev->dev, "DCA disabled\n");
2494 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002495 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002496 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002497 }
2498#endif
2499
Auke Kok9d5c8242008-01-24 02:22:38 -08002500 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2501 * would have already happened in close and is redundant. */
2502 igb_release_hw_control(adapter);
2503
2504 unregister_netdev(netdev);
2505
Alexander Duyck047e0032009-10-27 15:49:27 +00002506 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002507
Alexander Duyck37680112009-02-19 20:40:30 -08002508#ifdef CONFIG_PCI_IOV
Greg Rosefa44f2f2013-01-17 01:03:06 -08002509 igb_disable_sriov(pdev);
Alexander Duyck37680112009-02-19 20:40:30 -08002510#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002511
Alexander Duyck28b07592009-02-06 23:20:31 +00002512 iounmap(hw->hw_addr);
2513 if (hw->flash_address)
2514 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002515 pci_release_selected_regions(pdev,
2516 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002517
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002518 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002519 free_netdev(netdev);
2520
Frans Pop19d5afd2009-10-02 10:04:12 -07002521 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002522
Auke Kok9d5c8242008-01-24 02:22:38 -08002523 pci_disable_device(pdev);
2524}
2525
2526/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002527 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2528 * @adapter: board private structure to initialize
2529 *
2530 * This function initializes the vf specific data storage and then attempts to
2531 * allocate the VFs. The reason for ordering it this way is because it is much
2532 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2533 * the memory for the VFs.
2534 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002535static void igb_probe_vfs(struct igb_adapter *adapter)
Alexander Duycka6b623e2009-10-27 23:47:53 +00002536{
2537#ifdef CONFIG_PCI_IOV
2538 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002539 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002540
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002541 /* Virtualization features not supported on i210 family. */
2542 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2543 return;
2544
Greg Rosefa44f2f2013-01-17 01:03:06 -08002545 pci_sriov_set_totalvfs(pdev, 7);
Alex Williamsond5e51a12013-03-13 15:50:29 +00002546 igb_enable_sriov(pdev, max_vfs);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002547
Alexander Duycka6b623e2009-10-27 23:47:53 +00002548#endif /* CONFIG_PCI_IOV */
2549}
2550
Greg Rosefa44f2f2013-01-17 01:03:06 -08002551static void igb_init_queue_configuration(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002552{
2553 struct e1000_hw *hw = &adapter->hw;
Matthew Vick374a5422012-05-18 04:54:58 +00002554 u32 max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08002555
Matthew Vick374a5422012-05-18 04:54:58 +00002556 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002557 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002558 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00002559 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002560 break;
Matthew Vick374a5422012-05-18 04:54:58 +00002561 case e1000_82575:
2562 case e1000_i210:
2563 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2564 break;
2565 case e1000_i350:
2566 /* I350 cannot do RSS and SR-IOV at the same time */
2567 if (!!adapter->vfs_allocated_count) {
2568 max_rss_queues = 1;
2569 break;
2570 }
2571 /* fall through */
2572 case e1000_82576:
2573 if (!!adapter->vfs_allocated_count) {
2574 max_rss_queues = 2;
2575 break;
2576 }
2577 /* fall through */
2578 case e1000_82580:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002579 default:
Matthew Vick374a5422012-05-18 04:54:58 +00002580 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002581 break;
2582 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002583
Matthew Vick374a5422012-05-18 04:54:58 +00002584 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2585
2586 /* Determine if we need to pair queues. */
2587 switch (hw->mac.type) {
2588 case e1000_82575:
2589 case e1000_i211:
2590 /* Device supports enough interrupts without queue pairing. */
2591 break;
2592 case e1000_82576:
2593 /*
2594 * If VFs are going to be allocated with RSS queues then we
2595 * should pair the queues in order to conserve interrupts due
2596 * to limited supply.
2597 */
2598 if ((adapter->rss_queues > 1) &&
2599 (adapter->vfs_allocated_count > 6))
2600 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2601 /* fall through */
2602 case e1000_82580:
2603 case e1000_i350:
2604 case e1000_i210:
2605 default:
2606 /*
2607 * If rss_queues > half of max_rss_queues, pair the queues in
2608 * order to conserve interrupts due to limited supply.
2609 */
2610 if (adapter->rss_queues > (max_rss_queues / 2))
2611 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2612 break;
2613 }
Greg Rosefa44f2f2013-01-17 01:03:06 -08002614}
2615
2616/**
2617 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2618 * @adapter: board private structure to initialize
2619 *
2620 * igb_sw_init initializes the Adapter private data structure.
2621 * Fields are initialized based on PCI device information and
2622 * OS network device settings (MTU size).
2623 **/
2624static int igb_sw_init(struct igb_adapter *adapter)
2625{
2626 struct e1000_hw *hw = &adapter->hw;
2627 struct net_device *netdev = adapter->netdev;
2628 struct pci_dev *pdev = adapter->pdev;
2629
2630 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2631
2632 /* set default ring sizes */
2633 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2634 adapter->rx_ring_count = IGB_DEFAULT_RXD;
2635
2636 /* set default ITR values */
2637 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2638 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2639
2640 /* set default work limits */
2641 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2642
2643 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2644 VLAN_HLEN;
2645 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2646
2647 spin_lock_init(&adapter->stats64_lock);
2648#ifdef CONFIG_PCI_IOV
2649 switch (hw->mac.type) {
2650 case e1000_82576:
2651 case e1000_i350:
2652 if (max_vfs > 7) {
2653 dev_warn(&pdev->dev,
2654 "Maximum of 7 VFs per PF, using max\n");
Alex Williamsond0f63ac2013-03-13 15:50:24 +00002655 max_vfs = adapter->vfs_allocated_count = 7;
Greg Rosefa44f2f2013-01-17 01:03:06 -08002656 } else
2657 adapter->vfs_allocated_count = max_vfs;
2658 if (adapter->vfs_allocated_count)
2659 dev_warn(&pdev->dev,
2660 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
2661 break;
2662 default:
2663 break;
2664 }
2665#endif /* CONFIG_PCI_IOV */
2666
2667 igb_init_queue_configuration(adapter);
Alexander Duycka99955f2009-11-12 18:37:19 +00002668
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002669 /* Setup and initialize a copy of the hw vlan table array */
Joe Perchesb2adaca2013-02-03 17:43:58 +00002670 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
2671 GFP_ATOMIC);
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002672
Alexander Duycka6b623e2009-10-27 23:47:53 +00002673 /* This call may decrease the number of queues */
Stefan Assmann53c7d062012-12-04 06:00:12 +00002674 if (igb_init_interrupt_scheme(adapter, true)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002675 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2676 return -ENOMEM;
2677 }
2678
Alexander Duycka6b623e2009-10-27 23:47:53 +00002679 igb_probe_vfs(adapter);
2680
Auke Kok9d5c8242008-01-24 02:22:38 -08002681 /* Explicitly disable IRQ since the NIC can be in any state. */
2682 igb_irq_disable(adapter);
2683
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002684 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002685 adapter->flags &= ~IGB_FLAG_DMAC;
2686
Auke Kok9d5c8242008-01-24 02:22:38 -08002687 set_bit(__IGB_DOWN, &adapter->state);
2688 return 0;
2689}
2690
2691/**
2692 * igb_open - Called when a network interface is made active
2693 * @netdev: network interface device structure
2694 *
2695 * Returns 0 on success, negative value on failure
2696 *
2697 * The open entry point is called when a network interface is made
2698 * active by the system (IFF_UP). At this point all resources needed
2699 * for transmit and receive operations are allocated, the interrupt
2700 * handler is registered with the OS, the watchdog timer is started,
2701 * and the stack is notified that the interface is ready.
2702 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002703static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002704{
2705 struct igb_adapter *adapter = netdev_priv(netdev);
2706 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002707 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 int err;
2709 int i;
2710
2711 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002712 if (test_bit(__IGB_TESTING, &adapter->state)) {
2713 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002714 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002715 }
2716
2717 if (!resuming)
2718 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002719
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002720 netif_carrier_off(netdev);
2721
Auke Kok9d5c8242008-01-24 02:22:38 -08002722 /* allocate transmit descriptors */
2723 err = igb_setup_all_tx_resources(adapter);
2724 if (err)
2725 goto err_setup_tx;
2726
2727 /* allocate receive descriptors */
2728 err = igb_setup_all_rx_resources(adapter);
2729 if (err)
2730 goto err_setup_rx;
2731
Nick Nunley88a268c2010-02-17 01:01:59 +00002732 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002733
Auke Kok9d5c8242008-01-24 02:22:38 -08002734 /* before we allocate an interrupt, we must be ready to handle it.
2735 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2736 * as soon as we call pci_request_irq, so we have to setup our
2737 * clean_rx handler before we do so. */
2738 igb_configure(adapter);
2739
2740 err = igb_request_irq(adapter);
2741 if (err)
2742 goto err_req_irq;
2743
Alexander Duyck0c2cc022012-09-25 00:31:22 +00002744 /* Notify the stack of the actual queue counts. */
2745 err = netif_set_real_num_tx_queues(adapter->netdev,
2746 adapter->num_tx_queues);
2747 if (err)
2748 goto err_set_queues;
2749
2750 err = netif_set_real_num_rx_queues(adapter->netdev,
2751 adapter->num_rx_queues);
2752 if (err)
2753 goto err_set_queues;
2754
Auke Kok9d5c8242008-01-24 02:22:38 -08002755 /* From here on the code is the same as igb_up() */
2756 clear_bit(__IGB_DOWN, &adapter->state);
2757
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002758 for (i = 0; i < adapter->num_q_vectors; i++)
2759 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002760
2761 /* Clear any pending interrupts. */
2762 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002763
2764 igb_irq_enable(adapter);
2765
Alexander Duyckd4960302009-10-27 15:53:45 +00002766 /* notify VFs that reset has been completed */
2767 if (adapter->vfs_allocated_count) {
2768 u32 reg_data = rd32(E1000_CTRL_EXT);
2769 reg_data |= E1000_CTRL_EXT_PFRSTD;
2770 wr32(E1000_CTRL_EXT, reg_data);
2771 }
2772
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002773 netif_tx_start_all_queues(netdev);
2774
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002775 if (!resuming)
2776 pm_runtime_put(&pdev->dev);
2777
Alexander Duyck25568a52009-10-27 23:49:59 +00002778 /* start the watchdog. */
2779 hw->mac.get_link_status = 1;
2780 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002781
2782 return 0;
2783
Alexander Duyck0c2cc022012-09-25 00:31:22 +00002784err_set_queues:
2785 igb_free_irq(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002786err_req_irq:
2787 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002788 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002789 igb_free_all_rx_resources(adapter);
2790err_setup_rx:
2791 igb_free_all_tx_resources(adapter);
2792err_setup_tx:
2793 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002794 if (!resuming)
2795 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002796
2797 return err;
2798}
2799
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002800static int igb_open(struct net_device *netdev)
2801{
2802 return __igb_open(netdev, false);
2803}
2804
Auke Kok9d5c8242008-01-24 02:22:38 -08002805/**
2806 * igb_close - Disables a network interface
2807 * @netdev: network interface device structure
2808 *
2809 * Returns 0, this is not allowed to fail
2810 *
2811 * The close entry point is called when an interface is de-activated
2812 * by the OS. The hardware is still under the driver's control, but
2813 * needs to be disabled. A global MAC reset is issued to stop the
2814 * hardware, and all transmit and receive resources are freed.
2815 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002816static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002817{
2818 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002819 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002820
2821 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002822
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002823 if (!suspending)
2824 pm_runtime_get_sync(&pdev->dev);
2825
2826 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002827 igb_free_irq(adapter);
2828
2829 igb_free_all_tx_resources(adapter);
2830 igb_free_all_rx_resources(adapter);
2831
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002832 if (!suspending)
2833 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002834 return 0;
2835}
2836
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002837static int igb_close(struct net_device *netdev)
2838{
2839 return __igb_close(netdev, false);
2840}
2841
Auke Kok9d5c8242008-01-24 02:22:38 -08002842/**
2843 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002844 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2845 *
2846 * Return 0 on success, negative on failure
2847 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002848int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002849{
Alexander Duyck59d71982010-04-27 13:09:25 +00002850 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002851 int size;
2852
Alexander Duyck06034642011-08-26 07:44:22 +00002853 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002854
2855 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002856 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002857 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002858
2859 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002860 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002861 tx_ring->size = ALIGN(tx_ring->size, 4096);
2862
Alexander Duyck5536d212012-09-25 00:31:17 +00002863 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2864 &tx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002865 if (!tx_ring->desc)
2866 goto err;
2867
Auke Kok9d5c8242008-01-24 02:22:38 -08002868 tx_ring->next_to_use = 0;
2869 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002870
Auke Kok9d5c8242008-01-24 02:22:38 -08002871 return 0;
2872
2873err:
Alexander Duyck06034642011-08-26 07:44:22 +00002874 vfree(tx_ring->tx_buffer_info);
Alexander Duyckf33005a2012-09-13 06:27:55 +00002875 tx_ring->tx_buffer_info = NULL;
2876 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002877 return -ENOMEM;
2878}
2879
2880/**
2881 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2882 * (Descriptors) for all queues
2883 * @adapter: board private structure
2884 *
2885 * Return 0 on success, negative on failure
2886 **/
2887static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2888{
Alexander Duyck439705e2009-10-27 23:49:20 +00002889 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002890 int i, err = 0;
2891
2892 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002893 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002894 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002895 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002896 "Allocation for Tx Queue %u failed\n", i);
2897 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002898 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002899 break;
2900 }
2901 }
2902
2903 return err;
2904}
2905
2906/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002907 * igb_setup_tctl - configure the transmit control registers
2908 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002909 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002910void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002911{
Auke Kok9d5c8242008-01-24 02:22:38 -08002912 struct e1000_hw *hw = &adapter->hw;
2913 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002914
Alexander Duyck85b430b2009-10-27 15:50:29 +00002915 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2916 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002917
2918 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002919 tctl = rd32(E1000_TCTL);
2920 tctl &= ~E1000_TCTL_CT;
2921 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2922 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2923
2924 igb_config_collision_dist(hw);
2925
Auke Kok9d5c8242008-01-24 02:22:38 -08002926 /* Enable transmits */
2927 tctl |= E1000_TCTL_EN;
2928
2929 wr32(E1000_TCTL, tctl);
2930}
2931
2932/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002933 * igb_configure_tx_ring - Configure transmit ring after Reset
2934 * @adapter: board private structure
2935 * @ring: tx ring to configure
2936 *
2937 * Configure a transmit ring after a reset.
2938 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002939void igb_configure_tx_ring(struct igb_adapter *adapter,
2940 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002941{
2942 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002943 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002944 u64 tdba = ring->dma;
2945 int reg_idx = ring->reg_idx;
2946
2947 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002948 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002949 wrfl();
2950 mdelay(10);
2951
2952 wr32(E1000_TDLEN(reg_idx),
2953 ring->count * sizeof(union e1000_adv_tx_desc));
2954 wr32(E1000_TDBAL(reg_idx),
2955 tdba & 0x00000000ffffffffULL);
2956 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2957
Alexander Duyckfce99e32009-10-27 15:51:27 +00002958 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002959 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002960 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002961
2962 txdctl |= IGB_TX_PTHRESH;
2963 txdctl |= IGB_TX_HTHRESH << 8;
2964 txdctl |= IGB_TX_WTHRESH << 16;
2965
2966 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2967 wr32(E1000_TXDCTL(reg_idx), txdctl);
2968}
2969
2970/**
2971 * igb_configure_tx - Configure transmit Unit after Reset
2972 * @adapter: board private structure
2973 *
2974 * Configure the Tx unit of the MAC after a reset.
2975 **/
2976static void igb_configure_tx(struct igb_adapter *adapter)
2977{
2978 int i;
2979
2980 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002981 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002982}
2983
2984/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002985 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002986 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2987 *
2988 * Returns 0 on success, negative on failure
2989 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002990int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002991{
Alexander Duyck59d71982010-04-27 13:09:25 +00002992 struct device *dev = rx_ring->dev;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002993 int size;
Auke Kok9d5c8242008-01-24 02:22:38 -08002994
Alexander Duyck06034642011-08-26 07:44:22 +00002995 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyckf33005a2012-09-13 06:27:55 +00002996
2997 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002998 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002999 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08003000
Auke Kok9d5c8242008-01-24 02:22:38 -08003001 /* Round up to nearest 4K */
Alexander Duyckf33005a2012-09-13 06:27:55 +00003002 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08003003 rx_ring->size = ALIGN(rx_ring->size, 4096);
3004
Alexander Duyck5536d212012-09-25 00:31:17 +00003005 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3006 &rx_ring->dma, GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08003007 if (!rx_ring->desc)
3008 goto err;
3009
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003010 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003011 rx_ring->next_to_clean = 0;
3012 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003013
Auke Kok9d5c8242008-01-24 02:22:38 -08003014 return 0;
3015
3016err:
Alexander Duyck06034642011-08-26 07:44:22 +00003017 vfree(rx_ring->rx_buffer_info);
3018 rx_ring->rx_buffer_info = NULL;
Alexander Duyckf33005a2012-09-13 06:27:55 +00003019 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08003020 return -ENOMEM;
3021}
3022
3023/**
3024 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3025 * (Descriptors) for all queues
3026 * @adapter: board private structure
3027 *
3028 * Return 0 on success, negative on failure
3029 **/
3030static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3031{
Alexander Duyck439705e2009-10-27 23:49:20 +00003032 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003033 int i, err = 0;
3034
3035 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003036 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003037 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00003038 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08003039 "Allocation for Rx Queue %u failed\n", i);
3040 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00003041 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003042 break;
3043 }
3044 }
3045
3046 return err;
3047}
3048
3049/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00003050 * igb_setup_mrqc - configure the multiple receive queue control registers
3051 * @adapter: Board private structure
3052 **/
3053static void igb_setup_mrqc(struct igb_adapter *adapter)
3054{
3055 struct e1000_hw *hw = &adapter->hw;
3056 u32 mrqc, rxcsum;
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003057 u32 j, num_rx_queues, shift = 0;
Alexander Duycka57fe232012-09-13 06:28:16 +00003058 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3059 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3060 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
3061 0xFA01ACBE };
Alexander Duyck06cf2662009-10-27 15:53:25 +00003062
3063 /* Fill out hash function seeds */
Alexander Duycka57fe232012-09-13 06:28:16 +00003064 for (j = 0; j < 10; j++)
3065 wr32(E1000_RSSRK(j), rsskey[j]);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003066
Alexander Duycka99955f2009-11-12 18:37:19 +00003067 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003068
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003069 switch (hw->mac.type) {
3070 case e1000_82575:
3071 shift = 6;
3072 break;
3073 case e1000_82576:
3074 /* 82576 supports 2 RSS queues for SR-IOV */
3075 if (adapter->vfs_allocated_count) {
Alexander Duyck06cf2662009-10-27 15:53:25 +00003076 shift = 3;
3077 num_rx_queues = 2;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003078 }
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003079 break;
3080 default:
3081 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003082 }
3083
Alexander Duyck797fd4b2012-09-13 06:28:11 +00003084 /*
3085 * Populate the indirection table 4 entries at a time. To do this
3086 * we are generating the results for n and n+2 and then interleaving
3087 * those with the results with n+1 and n+3.
3088 */
3089 for (j = 0; j < 32; j++) {
3090 /* first pass generates n and n+2 */
3091 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
3092 u32 reta = (base & 0x07800780) >> (7 - shift);
3093
3094 /* second pass generates n+1 and n+3 */
3095 base += 0x00010001 * num_rx_queues;
3096 reta |= (base & 0x07800780) << (1 + shift);
3097
3098 wr32(E1000_RETA(j), reta);
Alexander Duyck06cf2662009-10-27 15:53:25 +00003099 }
3100
3101 /*
3102 * Disable raw packet checksumming so that RSS hash is placed in
3103 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3104 * offloads as they are enabled by default
3105 */
3106 rxcsum = rd32(E1000_RXCSUM);
3107 rxcsum |= E1000_RXCSUM_PCSD;
3108
3109 if (adapter->hw.mac.type >= e1000_82576)
3110 /* Enable Receive Checksum Offload for SCTP */
3111 rxcsum |= E1000_RXCSUM_CRCOFL;
3112
3113 /* Don't need to set TUOFL or IPOFL, they default to 1 */
3114 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003115
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003116 /* Generate RSS hash based on packet types, TCP/UDP
3117 * port numbers and/or IPv4/v6 src and dst addresses
3118 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003119 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3120 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3121 E1000_MRQC_RSS_FIELD_IPV6 |
3122 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3123 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003124
Akeem G. Abodunrin039454a2012-11-13 04:03:21 +00003125 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3126 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3127 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3128 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3129
Alexander Duyck06cf2662009-10-27 15:53:25 +00003130 /* If VMDq is enabled then we set the appropriate mode for that, else
3131 * we default to RSS so that an RSS hash is calculated per packet even
3132 * if we are only using one queue */
3133 if (adapter->vfs_allocated_count) {
3134 if (hw->mac.type > e1000_82575) {
3135 /* Set the default pool for the PF's first queue */
3136 u32 vtctl = rd32(E1000_VT_CTL);
3137 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3138 E1000_VT_CTL_DISABLE_DEF_POOL);
3139 vtctl |= adapter->vfs_allocated_count <<
3140 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3141 wr32(E1000_VT_CTL, vtctl);
3142 }
Alexander Duycka99955f2009-11-12 18:37:19 +00003143 if (adapter->rss_queues > 1)
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003144 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003145 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003146 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003147 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003148 if (hw->mac.type != e1000_i211)
3149 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003150 }
3151 igb_vmm_control(adapter);
3152
Alexander Duyck06cf2662009-10-27 15:53:25 +00003153 wr32(E1000_MRQC, mrqc);
3154}
3155
3156/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003157 * igb_setup_rctl - configure the receive control registers
3158 * @adapter: Board private structure
3159 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003160void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003161{
3162 struct e1000_hw *hw = &adapter->hw;
3163 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08003164
3165 rctl = rd32(E1000_RCTL);
3166
3167 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08003168 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003169
Alexander Duyck69d728b2008-11-25 01:04:03 -08003170 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00003171 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08003172
Auke Kok87cb7e82008-07-08 15:08:29 -07003173 /*
3174 * enable stripping of CRC. It's unlikely this will break BMC
3175 * redirection as it did with e1000. Newer features require
3176 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003177 */
Auke Kok87cb7e82008-07-08 15:08:29 -07003178 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08003179
Alexander Duyck559e9c42009-10-27 23:52:50 +00003180 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08003181 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08003182
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003183 /* enable LPE to prevent packets larger than max_frame_size */
3184 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08003185
Alexander Duyck952f72a2009-10-27 15:51:07 +00003186 /* disable queue 0 to prevent tail write w/o re-config */
3187 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003188
Alexander Duycke1739522009-02-19 20:39:44 -08003189 /* Attention!!! For SR-IOV PF driver operations you must enable
3190 * queue drop for all VF and PF queues to prevent head of line blocking
3191 * if an un-trusted VF does not provide descriptors to hardware.
3192 */
3193 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003194 /* set all queue drop enable bits */
3195 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003196 }
3197
Ben Greear89eaefb2012-03-06 09:41:58 +00003198 /* This is useful for sniffing bad packets. */
3199 if (adapter->netdev->features & NETIF_F_RXALL) {
3200 /* UPE and MPE will be handled by normal PROMISC logic
3201 * in e1000e_set_rx_mode */
3202 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3203 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3204 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3205
3206 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3207 E1000_RCTL_DPF | /* Allow filtered pause */
3208 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3209 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3210 * and that breaks VLANs.
3211 */
3212 }
3213
Auke Kok9d5c8242008-01-24 02:22:38 -08003214 wr32(E1000_RCTL, rctl);
3215}
3216
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003217static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3218 int vfn)
3219{
3220 struct e1000_hw *hw = &adapter->hw;
3221 u32 vmolr;
3222
3223 /* if it isn't the PF check to see if VFs are enabled and
3224 * increase the size to support vlan tags */
3225 if (vfn < adapter->vfs_allocated_count &&
3226 adapter->vf_data[vfn].vlans_enabled)
3227 size += VLAN_TAG_SIZE;
3228
3229 vmolr = rd32(E1000_VMOLR(vfn));
3230 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3231 vmolr |= size | E1000_VMOLR_LPE;
3232 wr32(E1000_VMOLR(vfn), vmolr);
3233
3234 return 0;
3235}
3236
Auke Kok9d5c8242008-01-24 02:22:38 -08003237/**
Alexander Duycke1739522009-02-19 20:39:44 -08003238 * igb_rlpml_set - set maximum receive packet size
3239 * @adapter: board private structure
3240 *
3241 * Configure maximum receivable packet size.
3242 **/
3243static void igb_rlpml_set(struct igb_adapter *adapter)
3244{
Alexander Duyck153285f2011-08-26 07:43:32 +00003245 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003246 struct e1000_hw *hw = &adapter->hw;
3247 u16 pf_id = adapter->vfs_allocated_count;
3248
Alexander Duycke1739522009-02-19 20:39:44 -08003249 if (pf_id) {
3250 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003251 /*
3252 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3253 * to our max jumbo frame size, in case we need to enable
3254 * jumbo frames on one of the rings later.
3255 * This will not pass over-length frames into the default
3256 * queue because it's gated by the VMOLR.RLPML.
3257 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003258 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003259 }
3260
3261 wr32(E1000_RLPML, max_frame_size);
3262}
3263
Williams, Mitch A8151d292010-02-10 01:44:24 +00003264static inline void igb_set_vmolr(struct igb_adapter *adapter,
3265 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003266{
3267 struct e1000_hw *hw = &adapter->hw;
3268 u32 vmolr;
3269
3270 /*
3271 * This register exists only on 82576 and newer so if we are older then
3272 * we should exit and do nothing
3273 */
3274 if (hw->mac.type < e1000_82576)
3275 return;
3276
3277 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003278 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3279 if (aupe)
3280 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3281 else
3282 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003283
3284 /* clear all bits that might not be set */
3285 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3286
Alexander Duycka99955f2009-11-12 18:37:19 +00003287 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003288 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3289 /*
3290 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3291 * multicast packets
3292 */
3293 if (vfn <= adapter->vfs_allocated_count)
3294 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3295
3296 wr32(E1000_VMOLR(vfn), vmolr);
3297}
3298
Alexander Duycke1739522009-02-19 20:39:44 -08003299/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003300 * igb_configure_rx_ring - Configure a receive ring after Reset
3301 * @adapter: board private structure
3302 * @ring: receive ring to be configured
3303 *
3304 * Configure the Rx unit of the MAC after a reset.
3305 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003306void igb_configure_rx_ring(struct igb_adapter *adapter,
3307 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003308{
3309 struct e1000_hw *hw = &adapter->hw;
3310 u64 rdba = ring->dma;
3311 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003312 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003313
3314 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003315 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003316
3317 /* Set DMA base address registers */
3318 wr32(E1000_RDBAL(reg_idx),
3319 rdba & 0x00000000ffffffffULL);
3320 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3321 wr32(E1000_RDLEN(reg_idx),
3322 ring->count * sizeof(union e1000_adv_rx_desc));
3323
3324 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003325 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003326 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003327 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003328
Alexander Duyck952f72a2009-10-27 15:51:07 +00003329 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003330 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyckde78d1f2012-09-25 00:31:12 +00003331 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003332 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
Alexander Duyck06218a82011-08-26 07:46:55 +00003333 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003334 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003335 /* Only set Drop Enable if we are supporting multiple queues */
3336 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3337 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003338
3339 wr32(E1000_SRRCTL(reg_idx), srrctl);
3340
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003341 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003342 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003343
Alexander Duyck85b430b2009-10-27 15:50:29 +00003344 rxdctl |= IGB_RX_PTHRESH;
3345 rxdctl |= IGB_RX_HTHRESH << 8;
3346 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003347
3348 /* enable receive descriptor fetching */
3349 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003350 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3351}
3352
Alexander Duyck74e238e2013-02-02 05:07:11 +00003353static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3354 struct igb_ring *rx_ring)
3355{
3356#define IGB_MAX_BUILD_SKB_SIZE \
3357 (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
3358 (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
3359
3360 /* set build_skb flag */
3361 if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
3362 set_ring_build_skb_enabled(rx_ring);
3363 else
3364 clear_ring_build_skb_enabled(rx_ring);
3365}
3366
Alexander Duyck85b430b2009-10-27 15:50:29 +00003367/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003368 * igb_configure_rx - Configure receive Unit after Reset
3369 * @adapter: board private structure
3370 *
3371 * Configure the Rx unit of the MAC after a reset.
3372 **/
3373static void igb_configure_rx(struct igb_adapter *adapter)
3374{
Hannes Eder91075842009-02-18 19:36:04 -08003375 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003376
Alexander Duyck68d480c2009-10-05 06:33:08 +00003377 /* set UTA to appropriate mode */
3378 igb_set_uta(adapter);
3379
Alexander Duyck26ad9172009-10-05 06:32:49 +00003380 /* set the correct pool for the PF default MAC address in entry 0 */
3381 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3382 adapter->vfs_allocated_count);
3383
Alexander Duyck06cf2662009-10-27 15:53:25 +00003384 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3385 * the Base and Length of the Rx Descriptor Ring */
Alexander Duyck74e238e2013-02-02 05:07:11 +00003386 for (i = 0; i < adapter->num_rx_queues; i++) {
3387 struct igb_ring *rx_ring = adapter->rx_ring[i];
3388 igb_set_rx_buffer_len(adapter, rx_ring);
3389 igb_configure_rx_ring(adapter, rx_ring);
3390 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003391}
3392
3393/**
3394 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003395 * @tx_ring: Tx descriptor ring for a specific queue
3396 *
3397 * Free all transmit software resources
3398 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003399void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003400{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003401 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003402
Alexander Duyck06034642011-08-26 07:44:22 +00003403 vfree(tx_ring->tx_buffer_info);
3404 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003405
Alexander Duyck439705e2009-10-27 23:49:20 +00003406 /* if not set, then don't free */
3407 if (!tx_ring->desc)
3408 return;
3409
Alexander Duyck59d71982010-04-27 13:09:25 +00003410 dma_free_coherent(tx_ring->dev, tx_ring->size,
3411 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003412
3413 tx_ring->desc = NULL;
3414}
3415
3416/**
3417 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3418 * @adapter: board private structure
3419 *
3420 * Free all transmit software resources
3421 **/
3422static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3423{
3424 int i;
3425
3426 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003427 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003428}
3429
Alexander Duyckebe42d12011-08-26 07:45:09 +00003430void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3431 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003432{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003433 if (tx_buffer->skb) {
3434 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003435 if (dma_unmap_len(tx_buffer, len))
Alexander Duyckebe42d12011-08-26 07:45:09 +00003436 dma_unmap_single(ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003437 dma_unmap_addr(tx_buffer, dma),
3438 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00003439 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003440 } else if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00003441 dma_unmap_page(ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003442 dma_unmap_addr(tx_buffer, dma),
3443 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00003444 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003445 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003446 tx_buffer->next_to_watch = NULL;
3447 tx_buffer->skb = NULL;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00003448 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00003449 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003450}
3451
3452/**
3453 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003454 * @tx_ring: ring to be cleaned
3455 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003456static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003457{
Alexander Duyck06034642011-08-26 07:44:22 +00003458 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003459 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003460 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003461
Alexander Duyck06034642011-08-26 07:44:22 +00003462 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003463 return;
3464 /* Free all the Tx ring sk_buffs */
3465
3466 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003467 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003468 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003469 }
3470
John Fastabenddad8a3b2012-04-23 12:22:39 +00003471 netdev_tx_reset_queue(txring_txq(tx_ring));
3472
Alexander Duyck06034642011-08-26 07:44:22 +00003473 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3474 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003475
3476 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003477 memset(tx_ring->desc, 0, tx_ring->size);
3478
3479 tx_ring->next_to_use = 0;
3480 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003481}
3482
3483/**
3484 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3485 * @adapter: board private structure
3486 **/
3487static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3488{
3489 int i;
3490
3491 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003492 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003493}
3494
3495/**
3496 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003497 * @rx_ring: ring to clean the resources from
3498 *
3499 * Free all receive software resources
3500 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003501void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003502{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003503 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003504
Alexander Duyck06034642011-08-26 07:44:22 +00003505 vfree(rx_ring->rx_buffer_info);
3506 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003507
Alexander Duyck439705e2009-10-27 23:49:20 +00003508 /* if not set, then don't free */
3509 if (!rx_ring->desc)
3510 return;
3511
Alexander Duyck59d71982010-04-27 13:09:25 +00003512 dma_free_coherent(rx_ring->dev, rx_ring->size,
3513 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003514
3515 rx_ring->desc = NULL;
3516}
3517
3518/**
3519 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3520 * @adapter: board private structure
3521 *
3522 * Free all receive software resources
3523 **/
3524static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3525{
3526 int i;
3527
3528 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003529 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003530}
3531
3532/**
3533 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003534 * @rx_ring: ring to free buffers from
3535 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003536static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003537{
Auke Kok9d5c8242008-01-24 02:22:38 -08003538 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003539 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003540
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003541 if (rx_ring->skb)
3542 dev_kfree_skb(rx_ring->skb);
3543 rx_ring->skb = NULL;
3544
Alexander Duyck06034642011-08-26 07:44:22 +00003545 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003546 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003547
Auke Kok9d5c8242008-01-24 02:22:38 -08003548 /* Free all the Rx ring sk_buffs */
3549 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003550 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003551
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003552 if (!buffer_info->page)
3553 continue;
3554
3555 dma_unmap_page(rx_ring->dev,
3556 buffer_info->dma,
3557 PAGE_SIZE,
3558 DMA_FROM_DEVICE);
3559 __free_page(buffer_info->page);
3560
Alexander Duyck1a1c2252012-09-25 00:30:52 +00003561 buffer_info->page = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003562 }
3563
Alexander Duyck06034642011-08-26 07:44:22 +00003564 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3565 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003566
3567 /* Zero out the descriptor ring */
3568 memset(rx_ring->desc, 0, rx_ring->size);
3569
Alexander Duyckcbc8e552012-09-25 00:31:02 +00003570 rx_ring->next_to_alloc = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003571 rx_ring->next_to_clean = 0;
3572 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003573}
3574
3575/**
3576 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3577 * @adapter: board private structure
3578 **/
3579static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3580{
3581 int i;
3582
3583 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003584 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003585}
3586
3587/**
3588 * igb_set_mac - Change the Ethernet Address of the NIC
3589 * @netdev: network interface device structure
3590 * @p: pointer to an address structure
3591 *
3592 * Returns 0 on success, negative on failure
3593 **/
3594static int igb_set_mac(struct net_device *netdev, void *p)
3595{
3596 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003597 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003598 struct sockaddr *addr = p;
3599
3600 if (!is_valid_ether_addr(addr->sa_data))
3601 return -EADDRNOTAVAIL;
3602
3603 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003604 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003605
Alexander Duyck26ad9172009-10-05 06:32:49 +00003606 /* set the correct pool for the new PF MAC address in entry 0 */
3607 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3608 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003609
Auke Kok9d5c8242008-01-24 02:22:38 -08003610 return 0;
3611}
3612
3613/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003614 * igb_write_mc_addr_list - write multicast addresses to MTA
3615 * @netdev: network interface device structure
3616 *
3617 * Writes multicast address list to the MTA hash table.
3618 * Returns: -ENOMEM on failure
3619 * 0 on no addresses written
3620 * X on writing X addresses to MTA
3621 **/
3622static int igb_write_mc_addr_list(struct net_device *netdev)
3623{
3624 struct igb_adapter *adapter = netdev_priv(netdev);
3625 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003626 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003627 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003628 int i;
3629
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003630 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003631 /* nothing to program, so clear mc list */
3632 igb_update_mc_addr_list(hw, NULL, 0);
3633 igb_restore_vf_multicasts(adapter);
3634 return 0;
3635 }
3636
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003637 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003638 if (!mta_list)
3639 return -ENOMEM;
3640
Alexander Duyck68d480c2009-10-05 06:33:08 +00003641 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003642 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003643 netdev_for_each_mc_addr(ha, netdev)
3644 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003645
Alexander Duyck68d480c2009-10-05 06:33:08 +00003646 igb_update_mc_addr_list(hw, mta_list, i);
3647 kfree(mta_list);
3648
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003649 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003650}
3651
3652/**
3653 * igb_write_uc_addr_list - write unicast addresses to RAR table
3654 * @netdev: network interface device structure
3655 *
3656 * Writes unicast address list to the RAR table.
3657 * Returns: -ENOMEM on failure/insufficient address space
3658 * 0 on no addresses written
3659 * X on writing X addresses to the RAR table
3660 **/
3661static int igb_write_uc_addr_list(struct net_device *netdev)
3662{
3663 struct igb_adapter *adapter = netdev_priv(netdev);
3664 struct e1000_hw *hw = &adapter->hw;
3665 unsigned int vfn = adapter->vfs_allocated_count;
3666 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3667 int count = 0;
3668
3669 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003670 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003671 return -ENOMEM;
3672
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003673 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003674 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003675
3676 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003677 if (!rar_entries)
3678 break;
3679 igb_rar_set_qsel(adapter, ha->addr,
3680 rar_entries--,
3681 vfn);
3682 count++;
3683 }
3684 }
3685 /* write the addresses in reverse order to avoid write combining */
3686 for (; rar_entries > 0 ; rar_entries--) {
3687 wr32(E1000_RAH(rar_entries), 0);
3688 wr32(E1000_RAL(rar_entries), 0);
3689 }
3690 wrfl();
3691
3692 return count;
3693}
3694
3695/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003696 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003697 * @netdev: network interface device structure
3698 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003699 * The set_rx_mode entry point is called whenever the unicast or multicast
3700 * address lists or the network interface flags are updated. This routine is
3701 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003702 * promiscuous mode, and all-multi behavior.
3703 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003704static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003705{
3706 struct igb_adapter *adapter = netdev_priv(netdev);
3707 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003708 unsigned int vfn = adapter->vfs_allocated_count;
3709 u32 rctl, vmolr = 0;
3710 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003711
3712 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003713 rctl = rd32(E1000_RCTL);
3714
Alexander Duyck68d480c2009-10-05 06:33:08 +00003715 /* clear the effected bits */
3716 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3717
Patrick McHardy746b9f02008-07-16 20:15:45 -07003718 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003719 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003720 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003721 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003722 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003723 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003724 vmolr |= E1000_VMOLR_MPME;
3725 } else {
3726 /*
3727 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003728 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003729 * that we can at least receive multicast traffic
3730 */
3731 count = igb_write_mc_addr_list(netdev);
3732 if (count < 0) {
3733 rctl |= E1000_RCTL_MPE;
3734 vmolr |= E1000_VMOLR_MPME;
3735 } else if (count) {
3736 vmolr |= E1000_VMOLR_ROMPE;
3737 }
3738 }
3739 /*
3740 * Write addresses to available RAR registers, if there is not
3741 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003742 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003743 */
3744 count = igb_write_uc_addr_list(netdev);
3745 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003746 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003747 vmolr |= E1000_VMOLR_ROPE;
3748 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003749 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003750 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003751 wr32(E1000_RCTL, rctl);
3752
Alexander Duyck68d480c2009-10-05 06:33:08 +00003753 /*
3754 * In order to support SR-IOV and eventually VMDq it is necessary to set
3755 * the VMOLR to enable the appropriate modes. Without this workaround
3756 * we will have issues with VLAN tag stripping not being done for frames
3757 * that are only arriving because we are the default pool
3758 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003759 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003760 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003761
Alexander Duyck68d480c2009-10-05 06:33:08 +00003762 vmolr |= rd32(E1000_VMOLR(vfn)) &
3763 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3764 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003765 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003766}
3767
Greg Rose13800462010-11-06 02:08:26 +00003768static void igb_check_wvbr(struct igb_adapter *adapter)
3769{
3770 struct e1000_hw *hw = &adapter->hw;
3771 u32 wvbr = 0;
3772
3773 switch (hw->mac.type) {
3774 case e1000_82576:
3775 case e1000_i350:
3776 if (!(wvbr = rd32(E1000_WVBR)))
3777 return;
3778 break;
3779 default:
3780 break;
3781 }
3782
3783 adapter->wvbr |= wvbr;
3784}
3785
3786#define IGB_STAGGERED_QUEUE_OFFSET 8
3787
3788static void igb_spoof_check(struct igb_adapter *adapter)
3789{
3790 int j;
3791
3792 if (!adapter->wvbr)
3793 return;
3794
3795 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3796 if (adapter->wvbr & (1 << j) ||
3797 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3798 dev_warn(&adapter->pdev->dev,
3799 "Spoof event(s) detected on VF %d\n", j);
3800 adapter->wvbr &=
3801 ~((1 << j) |
3802 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3803 }
3804 }
3805}
3806
Auke Kok9d5c8242008-01-24 02:22:38 -08003807/* Need to wait a few seconds after link up to get diagnostic information from
3808 * the phy */
3809static void igb_update_phy_info(unsigned long data)
3810{
3811 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003812 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003813}
3814
3815/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003816 * igb_has_link - check shared code for link and determine up/down
3817 * @adapter: pointer to driver private info
3818 **/
Nick Nunley31455352010-02-17 01:01:21 +00003819bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003820{
3821 struct e1000_hw *hw = &adapter->hw;
3822 bool link_active = false;
3823 s32 ret_val = 0;
3824
3825 /* get_link_status is set on LSC (link status) interrupt or
3826 * rx sequence error interrupt. get_link_status will stay
3827 * false until the e1000_check_for_link establishes link
3828 * for copper adapters ONLY
3829 */
3830 switch (hw->phy.media_type) {
3831 case e1000_media_type_copper:
3832 if (hw->mac.get_link_status) {
3833 ret_val = hw->mac.ops.check_for_link(hw);
3834 link_active = !hw->mac.get_link_status;
3835 } else {
3836 link_active = true;
3837 }
3838 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003839 case e1000_media_type_internal_serdes:
3840 ret_val = hw->mac.ops.check_for_link(hw);
3841 link_active = hw->mac.serdes_has_link;
3842 break;
3843 default:
3844 case e1000_media_type_unknown:
3845 break;
3846 }
3847
3848 return link_active;
3849}
3850
Stefan Assmann563988d2011-04-05 04:27:15 +00003851static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3852{
3853 bool ret = false;
3854 u32 ctrl_ext, thstat;
3855
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003856 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00003857 if (hw->mac.type == e1000_i350) {
3858 thstat = rd32(E1000_THSTAT);
3859 ctrl_ext = rd32(E1000_CTRL_EXT);
3860
3861 if ((hw->phy.media_type == e1000_media_type_copper) &&
Akeem G. Abodunrin5c17a202013-01-29 10:15:31 +00003862 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
Stefan Assmann563988d2011-04-05 04:27:15 +00003863 ret = !!(thstat & event);
Stefan Assmann563988d2011-04-05 04:27:15 +00003864 }
3865
3866 return ret;
3867}
3868
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003869/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003870 * igb_watchdog - Timer Call-back
3871 * @data: pointer to adapter cast into an unsigned long
3872 **/
3873static void igb_watchdog(unsigned long data)
3874{
3875 struct igb_adapter *adapter = (struct igb_adapter *)data;
3876 /* Do the rest outside of interrupt context */
3877 schedule_work(&adapter->watchdog_task);
3878}
3879
3880static void igb_watchdog_task(struct work_struct *work)
3881{
3882 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003883 struct igb_adapter,
3884 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003885 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003886 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003887 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003888 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003889
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003890 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003891 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003892 /* Cancel scheduled suspend requests. */
3893 pm_runtime_resume(netdev->dev.parent);
3894
Auke Kok9d5c8242008-01-24 02:22:38 -08003895 if (!netif_carrier_ok(netdev)) {
3896 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003897 hw->mac.ops.get_speed_and_duplex(hw,
3898 &adapter->link_speed,
3899 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003900
3901 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003902 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003903 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3904 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003905 netdev->name,
3906 adapter->link_speed,
3907 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003908 "Full" : "Half",
3909 (ctrl & E1000_CTRL_TFCE) &&
3910 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3911 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3912 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003913
Stefan Assmann563988d2011-04-05 04:27:15 +00003914 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003915 if (igb_thermal_sensor_event(hw,
3916 E1000_THSTAT_LINK_THROTTLE)) {
3917 netdev_info(netdev, "The network adapter link "
3918 "speed was downshifted because it "
3919 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003920 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003921
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003922 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003923 adapter->tx_timeout_factor = 1;
3924 switch (adapter->link_speed) {
3925 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003926 adapter->tx_timeout_factor = 14;
3927 break;
3928 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003929 /* maybe add some timeout factor ? */
3930 break;
3931 }
3932
3933 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003934
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003935 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003936 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003937
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003938 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003939 if (!test_bit(__IGB_DOWN, &adapter->state))
3940 mod_timer(&adapter->phy_info_timer,
3941 round_jiffies(jiffies + 2 * HZ));
3942 }
3943 } else {
3944 if (netif_carrier_ok(netdev)) {
3945 adapter->link_speed = 0;
3946 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003947
3948 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003949 if (igb_thermal_sensor_event(hw,
3950 E1000_THSTAT_PWR_DOWN)) {
3951 netdev_err(netdev, "The network adapter was "
3952 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003953 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003954
Alexander Duyck527d47c2008-11-27 00:21:39 -08003955 /* Links status message must follow this format */
3956 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3957 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003958 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003959
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003960 igb_ping_all_vfs(adapter);
3961
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003962 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003963 if (!test_bit(__IGB_DOWN, &adapter->state))
3964 mod_timer(&adapter->phy_info_timer,
3965 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003966
3967 pm_schedule_suspend(netdev->dev.parent,
3968 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003969 }
3970 }
3971
Eric Dumazet12dcd862010-10-15 17:27:10 +00003972 spin_lock(&adapter->stats64_lock);
3973 igb_update_stats(adapter, &adapter->stats64);
3974 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003975
Alexander Duyckdbabb062009-11-12 18:38:16 +00003976 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003977 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003978 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003979 /* We've lost link, so the controller stops DMA,
3980 * but we've got queued Tx work that's never going
3981 * to get done, so reset controller to flush Tx.
3982 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003983 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3984 adapter->tx_timeout_count++;
3985 schedule_work(&adapter->reset_task);
3986 /* return immediately since reset is imminent */
3987 return;
3988 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003989 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003990
Alexander Duyckdbabb062009-11-12 18:38:16 +00003991 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003992 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003993 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003994
Auke Kok9d5c8242008-01-24 02:22:38 -08003995 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003996 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003997 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003998 for (i = 0; i < adapter->num_q_vectors; i++)
3999 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07004000 wr32(E1000_EICS, eics);
4001 } else {
4002 wr32(E1000_ICS, E1000_ICS_RXDMT0);
4003 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004004
Greg Rose13800462010-11-06 02:08:26 +00004005 igb_spoof_check(adapter);
Matthew Vickfc580752012-12-13 07:20:35 +00004006 igb_ptp_rx_hang(adapter);
Greg Rose13800462010-11-06 02:08:26 +00004007
Auke Kok9d5c8242008-01-24 02:22:38 -08004008 /* Reset the timer */
4009 if (!test_bit(__IGB_DOWN, &adapter->state))
4010 mod_timer(&adapter->watchdog_timer,
4011 round_jiffies(jiffies + 2 * HZ));
4012}
4013
4014enum latency_range {
4015 lowest_latency = 0,
4016 low_latency = 1,
4017 bulk_latency = 2,
4018 latency_invalid = 255
4019};
4020
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004021/**
4022 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4023 *
4024 * Stores a new ITR value based on strictly on packet size. This
4025 * algorithm is less sophisticated than that used in igb_update_itr,
4026 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02004027 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004028 * were determined based on theoretical maximum wire speed and testing
4029 * data, in order to minimize response time while increasing bulk
4030 * throughput.
4031 * This functionality is controlled by the InterruptThrottleRate module
4032 * parameter (see igb_param.c)
4033 * NOTE: This function is called only when operating in a multiqueue
4034 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00004035 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004036 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00004037static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004038{
Alexander Duyck047e0032009-10-27 15:49:27 +00004039 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004040 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00004041 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004042 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004043
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004044 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4045 * ints/sec - ITR timer value of 120 ticks.
4046 */
4047 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004048 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004049 goto set_itr_val;
4050 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004051
Alexander Duyck0ba82992011-08-26 07:45:47 +00004052 packets = q_vector->rx.total_packets;
4053 if (packets)
4054 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004055
Alexander Duyck0ba82992011-08-26 07:45:47 +00004056 packets = q_vector->tx.total_packets;
4057 if (packets)
4058 avg_wire_size = max_t(u32, avg_wire_size,
4059 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00004060
4061 /* if avg_wire_size isn't set no work was done */
4062 if (!avg_wire_size)
4063 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004064
4065 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4066 avg_wire_size += 24;
4067
4068 /* Don't starve jumbo frames */
4069 avg_wire_size = min(avg_wire_size, 3000);
4070
4071 /* Give a little boost to mid-size frames */
4072 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
4073 new_val = avg_wire_size / 3;
4074 else
4075 new_val = avg_wire_size / 2;
4076
Alexander Duyck0ba82992011-08-26 07:45:47 +00004077 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4078 if (new_val < IGB_20K_ITR &&
4079 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4080 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4081 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00004082
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004083set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00004084 if (new_val != q_vector->itr_val) {
4085 q_vector->itr_val = new_val;
4086 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004087 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004088clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004089 q_vector->rx.total_bytes = 0;
4090 q_vector->rx.total_packets = 0;
4091 q_vector->tx.total_bytes = 0;
4092 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004093}
4094
4095/**
4096 * igb_update_itr - update the dynamic ITR value based on statistics
4097 * Stores a new ITR value based on packets and byte
4098 * counts during the last interrupt. The advantage of per interrupt
4099 * computation is faster updates and more accurate ITR for the current
4100 * traffic pattern. Constants in this function were computed
4101 * based on theoretical maximum wire speed and thresholds were set based
4102 * on testing data as well as attempting to minimize response time
4103 * while increasing bulk throughput.
4104 * this functionality is controlled by the InterruptThrottleRate module
4105 * parameter (see igb_param.c)
4106 * NOTE: These calculations are only valid when operating in a single-
4107 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00004108 * @q_vector: pointer to q_vector
4109 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08004110 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00004111static void igb_update_itr(struct igb_q_vector *q_vector,
4112 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08004113{
Alexander Duyck0ba82992011-08-26 07:45:47 +00004114 unsigned int packets = ring_container->total_packets;
4115 unsigned int bytes = ring_container->total_bytes;
4116 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08004117
Alexander Duyck0ba82992011-08-26 07:45:47 +00004118 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08004119 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004120 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08004121
Alexander Duyck0ba82992011-08-26 07:45:47 +00004122 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004123 case lowest_latency:
4124 /* handle TSO and jumbo frames */
4125 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004126 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004127 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00004128 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004129 break;
4130 case low_latency: /* 50 usec aka 20000 ints/s */
4131 if (bytes > 10000) {
4132 /* this if handles the TSO accounting */
4133 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004134 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004135 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004136 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004137 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004138 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004139 }
4140 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004141 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004142 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004143 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004144 }
4145 break;
4146 case bulk_latency: /* 250 usec aka 4000 ints/s */
4147 if (bytes > 25000) {
4148 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00004149 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00004150 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00004151 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08004152 }
4153 break;
4154 }
4155
Alexander Duyck0ba82992011-08-26 07:45:47 +00004156 /* clear work counters since we have the values we need */
4157 ring_container->total_bytes = 0;
4158 ring_container->total_packets = 0;
4159
4160 /* write updated itr to ring container */
4161 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08004162}
4163
Alexander Duyck0ba82992011-08-26 07:45:47 +00004164static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004165{
Alexander Duyck0ba82992011-08-26 07:45:47 +00004166 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004167 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004168 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004169
4170 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4171 if (adapter->link_speed != SPEED_1000) {
4172 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004173 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08004174 goto set_itr_now;
4175 }
4176
Alexander Duyck0ba82992011-08-26 07:45:47 +00004177 igb_update_itr(q_vector, &q_vector->tx);
4178 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004179
Alexander Duyck0ba82992011-08-26 07:45:47 +00004180 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08004181
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004182 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00004183 if (current_itr == lowest_latency &&
4184 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4185 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004186 current_itr = low_latency;
4187
Auke Kok9d5c8242008-01-24 02:22:38 -08004188 switch (current_itr) {
4189 /* counts and packets in update_itr are dependent on these numbers */
4190 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004191 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004192 break;
4193 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004194 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004195 break;
4196 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004197 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004198 break;
4199 default:
4200 break;
4201 }
4202
4203set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00004204 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004205 /* this attempts to bias the interrupt rate towards Bulk
4206 * by adding intermediate steps when interrupt rate is
4207 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00004208 new_itr = new_itr > q_vector->itr_val ?
4209 max((new_itr * q_vector->itr_val) /
4210 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00004211 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08004212 new_itr;
4213 /* Don't write the value here; it resets the adapter's
4214 * internal timer, and causes us to delay far longer than
4215 * we should between interrupts. Instead, we write the ITR
4216 * value at the beginning of the next interrupt so the timing
4217 * ends up being correct.
4218 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004219 q_vector->itr_val = new_itr;
4220 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004221 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004222}
4223
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00004224static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4225 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004226{
4227 struct e1000_adv_tx_context_desc *context_desc;
4228 u16 i = tx_ring->next_to_use;
4229
4230 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4231
4232 i++;
4233 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4234
4235 /* set bits to identify this as an advanced context descriptor */
4236 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4237
4238 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004239 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004240 mss_l4len_idx |= tx_ring->reg_idx << 4;
4241
4242 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4243 context_desc->seqnum_seed = 0;
4244 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4245 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4246}
4247
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004248static int igb_tso(struct igb_ring *tx_ring,
4249 struct igb_tx_buffer *first,
4250 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004251{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004252 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004253 u32 vlan_macip_lens, type_tucmd;
4254 u32 mss_l4len_idx, l4len;
4255
Alexander Duycked6aa102012-11-13 04:03:22 +00004256 if (skb->ip_summed != CHECKSUM_PARTIAL)
4257 return 0;
4258
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004259 if (!skb_is_gso(skb))
4260 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004261
4262 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004263 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004264 if (err)
4265 return err;
4266 }
4267
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004268 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4269 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004270
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004271 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004272 struct iphdr *iph = ip_hdr(skb);
4273 iph->tot_len = 0;
4274 iph->check = 0;
4275 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4276 iph->daddr, 0,
4277 IPPROTO_TCP,
4278 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004279 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004280 first->tx_flags |= IGB_TX_FLAGS_TSO |
4281 IGB_TX_FLAGS_CSUM |
4282 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004283 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004284 ipv6_hdr(skb)->payload_len = 0;
4285 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4286 &ipv6_hdr(skb)->daddr,
4287 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004288 first->tx_flags |= IGB_TX_FLAGS_TSO |
4289 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 }
4291
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004292 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004293 l4len = tcp_hdrlen(skb);
4294 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004295
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004296 /* update gso size and bytecount with header size */
4297 first->gso_segs = skb_shinfo(skb)->gso_segs;
4298 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4299
Auke Kok9d5c8242008-01-24 02:22:38 -08004300 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004301 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4302 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004303
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004304 /* VLAN MACLEN IPLEN */
4305 vlan_macip_lens = skb_network_header_len(skb);
4306 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004307 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004308
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004309 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004310
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004311 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004312}
4313
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004314static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004315{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004316 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004317 u32 vlan_macip_lens = 0;
4318 u32 mss_l4len_idx = 0;
4319 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004320
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004321 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004322 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4323 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004324 } else {
4325 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004326 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004327 case __constant_htons(ETH_P_IP):
4328 vlan_macip_lens |= skb_network_header_len(skb);
4329 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4330 l4_hdr = ip_hdr(skb)->protocol;
4331 break;
4332 case __constant_htons(ETH_P_IPV6):
4333 vlan_macip_lens |= skb_network_header_len(skb);
4334 l4_hdr = ipv6_hdr(skb)->nexthdr;
4335 break;
4336 default:
4337 if (unlikely(net_ratelimit())) {
4338 dev_warn(tx_ring->dev,
4339 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004340 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004341 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004342 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004343 }
4344
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004345 switch (l4_hdr) {
4346 case IPPROTO_TCP:
4347 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4348 mss_l4len_idx = tcp_hdrlen(skb) <<
4349 E1000_ADVTXD_L4LEN_SHIFT;
4350 break;
4351 case IPPROTO_SCTP:
4352 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4353 mss_l4len_idx = sizeof(struct sctphdr) <<
4354 E1000_ADVTXD_L4LEN_SHIFT;
4355 break;
4356 case IPPROTO_UDP:
4357 mss_l4len_idx = sizeof(struct udphdr) <<
4358 E1000_ADVTXD_L4LEN_SHIFT;
4359 break;
4360 default:
4361 if (unlikely(net_ratelimit())) {
4362 dev_warn(tx_ring->dev,
4363 "partial checksum but l4 proto=%x!\n",
4364 l4_hdr);
4365 }
4366 break;
4367 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004368
4369 /* update TX checksum flag */
4370 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004371 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004372
4373 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004374 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004375
4376 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004377}
4378
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004379#define IGB_SET_FLAG(_input, _flag, _result) \
4380 ((_flag <= _result) ? \
4381 ((u32)(_input & _flag) * (_result / _flag)) : \
4382 ((u32)(_input & _flag) / (_flag / _result)))
4383
4384static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
Alexander Duycke032afc2011-08-26 07:44:48 +00004385{
4386 /* set type for advanced descriptor with frame checksum insertion */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004387 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
4388 E1000_ADVTXD_DCMD_DEXT |
4389 E1000_ADVTXD_DCMD_IFCS;
Alexander Duycke032afc2011-08-26 07:44:48 +00004390
4391 /* set HW vlan bit if vlan is present */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004392 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
4393 (E1000_ADVTXD_DCMD_VLE));
Alexander Duycke032afc2011-08-26 07:44:48 +00004394
4395 /* set segmentation bits for TSO */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004396 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
4397 (E1000_ADVTXD_DCMD_TSE));
4398
4399 /* set timestamp bit if present */
4400 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
4401 (E1000_ADVTXD_MAC_TSTAMP));
4402
4403 /* insert frame checksum */
4404 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
Alexander Duycke032afc2011-08-26 07:44:48 +00004405
4406 return cmd_type;
4407}
4408
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004409static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4410 union e1000_adv_tx_desc *tx_desc,
4411 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004412{
4413 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4414
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004415 /* 82575 requires a unique index per ring */
4416 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004417 olinfo_status |= tx_ring->reg_idx << 4;
4418
4419 /* insert L4 checksum */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004420 olinfo_status |= IGB_SET_FLAG(tx_flags,
4421 IGB_TX_FLAGS_CSUM,
4422 (E1000_TXD_POPTS_TXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00004423
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004424 /* insert IPv4 checksum */
4425 olinfo_status |= IGB_SET_FLAG(tx_flags,
4426 IGB_TX_FLAGS_IPV4,
4427 (E1000_TXD_POPTS_IXSM << 8));
Alexander Duycke032afc2011-08-26 07:44:48 +00004428
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004429 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004430}
4431
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004432static void igb_tx_map(struct igb_ring *tx_ring,
4433 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004434 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004435{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004436 struct sk_buff *skb = first->skb;
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004437 struct igb_tx_buffer *tx_buffer;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004438 union e1000_adv_tx_desc *tx_desc;
Alexander Duyck80d07592012-11-13 04:03:24 +00004439 struct skb_frag_struct *frag;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004440 dma_addr_t dma;
Alexander Duyck80d07592012-11-13 04:03:24 +00004441 unsigned int data_len, size;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004442 u32 tx_flags = first->tx_flags;
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004443 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004444 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004445
4446 tx_desc = IGB_TX_DESC(tx_ring, i);
4447
Alexander Duyck80d07592012-11-13 04:03:24 +00004448 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
4449
4450 size = skb_headlen(skb);
4451 data_len = skb->data_len;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004452
4453 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08004454
Alexander Duyck80d07592012-11-13 04:03:24 +00004455 tx_buffer = first;
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004456
Alexander Duyck80d07592012-11-13 04:03:24 +00004457 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
4458 if (dma_mapping_error(tx_ring->dev, dma))
4459 goto dma_error;
4460
4461 /* record length, and DMA address */
4462 dma_unmap_len_set(tx_buffer, len, size);
4463 dma_unmap_addr_set(tx_buffer, dma, dma);
4464
4465 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4466
Alexander Duyckebe42d12011-08-26 07:45:09 +00004467 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4468 tx_desc->read.cmd_type_len =
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004469 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004470
Alexander Duyckebe42d12011-08-26 07:45:09 +00004471 i++;
4472 tx_desc++;
4473 if (i == tx_ring->count) {
4474 tx_desc = IGB_TX_DESC(tx_ring, 0);
4475 i = 0;
4476 }
Alexander Duyck80d07592012-11-13 04:03:24 +00004477 tx_desc->read.olinfo_status = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004478
4479 dma += IGB_MAX_DATA_PER_TXD;
4480 size -= IGB_MAX_DATA_PER_TXD;
4481
Alexander Duyckebe42d12011-08-26 07:45:09 +00004482 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4483 }
4484
4485 if (likely(!data_len))
4486 break;
4487
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004488 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004489
Alexander Duyck65689fe2009-03-20 00:17:43 +00004490 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004491 tx_desc++;
4492 if (i == tx_ring->count) {
4493 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004494 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004495 }
Alexander Duyck80d07592012-11-13 04:03:24 +00004496 tx_desc->read.olinfo_status = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004497
Eric Dumazet9e903e02011-10-18 21:00:24 +00004498 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004499 data_len -= size;
4500
4501 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
Alexander Duyck80d07592012-11-13 04:03:24 +00004502 size, DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004503
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004504 tx_buffer = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004505 }
4506
Alexander Duyckebe42d12011-08-26 07:45:09 +00004507 /* write last descriptor with RS and EOP bits */
Alexander Duyck1d9daf42012-11-13 04:03:23 +00004508 cmd_type |= size | IGB_TXD_DCMD;
4509 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
Alexander Duyck8542db02011-08-26 07:44:43 +00004510
Alexander Duyck80d07592012-11-13 04:03:24 +00004511 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4512
Alexander Duyck8542db02011-08-26 07:44:43 +00004513 /* set the timestamp */
4514 first->time_stamp = jiffies;
4515
Alexander Duyckebe42d12011-08-26 07:45:09 +00004516 /*
4517 * Force memory writes to complete before letting h/w know there
4518 * are new descriptors to fetch. (Only applicable for weak-ordered
4519 * memory model archs, such as IA-64).
4520 *
4521 * We also need this memory barrier to make certain all of the
4522 * status bits have been updated before next_to_watch is written.
4523 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004524 wmb();
4525
Alexander Duyckebe42d12011-08-26 07:45:09 +00004526 /* set next_to_watch value indicating a packet is present */
4527 first->next_to_watch = tx_desc;
4528
4529 i++;
4530 if (i == tx_ring->count)
4531 i = 0;
4532
Auke Kok9d5c8242008-01-24 02:22:38 -08004533 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004534
Alexander Duyckfce99e32009-10-27 15:51:27 +00004535 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004536
Auke Kok9d5c8242008-01-24 02:22:38 -08004537 /* we need this if more than one processor can write to our tail
4538 * at a time, it syncronizes IO on IA64/Altix systems */
4539 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004540
4541 return;
4542
4543dma_error:
4544 dev_err(tx_ring->dev, "TX DMA map failed\n");
4545
4546 /* clear dma mappings for failed tx_buffer_info map */
4547 for (;;) {
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00004548 tx_buffer = &tx_ring->tx_buffer_info[i];
4549 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4550 if (tx_buffer == first)
Alexander Duyckebe42d12011-08-26 07:45:09 +00004551 break;
4552 if (i == 0)
4553 i = tx_ring->count;
4554 i--;
4555 }
4556
4557 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004558}
4559
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004560static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004561{
Alexander Duycke694e962009-10-27 15:53:06 +00004562 struct net_device *netdev = tx_ring->netdev;
4563
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004564 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004565
Auke Kok9d5c8242008-01-24 02:22:38 -08004566 /* Herbert's original patch had:
4567 * smp_mb__after_netif_stop_queue();
4568 * but since that doesn't exist yet, just open code it. */
4569 smp_mb();
4570
4571 /* We need to check again in a case another CPU has just
4572 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004573 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004574 return -EBUSY;
4575
4576 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004577 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004578
4579 u64_stats_update_begin(&tx_ring->tx_syncp2);
4580 tx_ring->tx_stats.restart_queue2++;
4581 u64_stats_update_end(&tx_ring->tx_syncp2);
4582
Auke Kok9d5c8242008-01-24 02:22:38 -08004583 return 0;
4584}
4585
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004586static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004587{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004588 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004589 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004590 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004591}
4592
Alexander Duyckcd392f52011-08-26 07:43:59 +00004593netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4594 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004595{
Alexander Duyck8542db02011-08-26 07:44:43 +00004596 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004597 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004598 u32 tx_flags = 0;
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004599 u16 count = TXD_USE_COUNT(skb_headlen(skb));
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004600 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004601 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004602
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004603 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
4604 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
Auke Kok9d5c8242008-01-24 02:22:38 -08004605 * + 2 desc gap to keep tail from touching head,
Auke Kok9d5c8242008-01-24 02:22:38 -08004606 * + 1 desc for context descriptor,
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004607 * otherwise try next time
4608 */
4609 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4610 unsigned short f;
4611 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4612 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4613 } else {
4614 count += skb_shinfo(skb)->nr_frags;
4615 }
4616
4617 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004618 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004619 return NETDEV_TX_BUSY;
4620 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004621
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004622 /* record the location of the first descriptor for this packet */
4623 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4624 first->skb = skb;
4625 first->bytecount = skb->len;
4626 first->gso_segs = 1;
4627
Matthew Vickb66e2392012-12-13 07:20:33 +00004628 skb_tx_timestamp(skb);
4629
Alexander Duyckb646c222013-02-07 08:55:46 +00004630 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4631 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
Matthew Vick1f6e8172012-08-18 07:26:33 +00004632
Alexander Duyckb646c222013-02-07 08:55:46 +00004633 if (!(adapter->ptp_tx_skb)) {
4634 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4635 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4636
4637 adapter->ptp_tx_skb = skb_get(skb);
4638 adapter->ptp_tx_start = jiffies;
4639 if (adapter->hw.mac.type == e1000_82576)
4640 schedule_work(&adapter->ptp_tx_work);
4641 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004642 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004643
Jesse Grosseab6d182010-10-20 13:56:03 +00004644 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004645 tx_flags |= IGB_TX_FLAGS_VLAN;
4646 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4647 }
4648
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004649 /* record initial flags and protocol */
4650 first->tx_flags = tx_flags;
4651 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004652
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004653 tso = igb_tso(tx_ring, first, &hdr_len);
4654 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004655 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004656 else if (!tso)
4657 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004658
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004659 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004660
4661 /* Make sure there is space in the ring for the next send. */
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00004662 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004663
Auke Kok9d5c8242008-01-24 02:22:38 -08004664 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004665
4666out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004667 igb_unmap_and_free_tx_resource(tx_ring, first);
4668
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004669 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004670}
4671
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004672static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4673 struct sk_buff *skb)
4674{
4675 unsigned int r_idx = skb->queue_mapping;
4676
4677 if (r_idx >= adapter->num_tx_queues)
4678 r_idx = r_idx % adapter->num_tx_queues;
4679
4680 return adapter->tx_ring[r_idx];
4681}
4682
Alexander Duyckcd392f52011-08-26 07:43:59 +00004683static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4684 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004685{
4686 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004687
4688 if (test_bit(__IGB_DOWN, &adapter->state)) {
4689 dev_kfree_skb_any(skb);
4690 return NETDEV_TX_OK;
4691 }
4692
4693 if (skb->len <= 0) {
4694 dev_kfree_skb_any(skb);
4695 return NETDEV_TX_OK;
4696 }
4697
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004698 /*
4699 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4700 * in order to meet this minimum size requirement.
4701 */
Tushar Daveea5ceea2012-09-14 03:43:43 +00004702 if (unlikely(skb->len < 17)) {
4703 if (skb_pad(skb, 17 - skb->len))
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004704 return NETDEV_TX_OK;
4705 skb->len = 17;
Tushar Daveea5ceea2012-09-14 03:43:43 +00004706 skb_set_tail_pointer(skb, 17);
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004707 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004708
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004709 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004710}
4711
4712/**
4713 * igb_tx_timeout - Respond to a Tx Hang
4714 * @netdev: network interface device structure
4715 **/
4716static void igb_tx_timeout(struct net_device *netdev)
4717{
4718 struct igb_adapter *adapter = netdev_priv(netdev);
4719 struct e1000_hw *hw = &adapter->hw;
4720
4721 /* Do the reset outside of interrupt context */
4722 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004723
Alexander Duyck06218a82011-08-26 07:46:55 +00004724 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004725 hw->dev_spec._82575.global_device_reset = true;
4726
Auke Kok9d5c8242008-01-24 02:22:38 -08004727 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004728 wr32(E1000_EICS,
4729 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004730}
4731
4732static void igb_reset_task(struct work_struct *work)
4733{
4734 struct igb_adapter *adapter;
4735 adapter = container_of(work, struct igb_adapter, reset_task);
4736
Taku Izumic97ec422010-04-27 14:39:30 +00004737 igb_dump(adapter);
4738 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004739 igb_reinit_locked(adapter);
4740}
4741
4742/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004743 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004744 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004745 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004746 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004747 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004748static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4749 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004750{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004751 struct igb_adapter *adapter = netdev_priv(netdev);
4752
4753 spin_lock(&adapter->stats64_lock);
4754 igb_update_stats(adapter, &adapter->stats64);
4755 memcpy(stats, &adapter->stats64, sizeof(*stats));
4756 spin_unlock(&adapter->stats64_lock);
4757
4758 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004759}
4760
4761/**
4762 * igb_change_mtu - Change the Maximum Transfer Unit
4763 * @netdev: network interface device structure
4764 * @new_mtu: new value for maximum frame size
4765 *
4766 * Returns 0 on success, negative on failure
4767 **/
4768static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4769{
4770 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004771 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004772 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004773
Alexander Duyckc809d222009-10-27 23:52:13 +00004774 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004775 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004776 return -EINVAL;
4777 }
4778
Alexander Duyck153285f2011-08-26 07:43:32 +00004779#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004780 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004781 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004782 return -EINVAL;
4783 }
4784
4785 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4786 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004787
Auke Kok9d5c8242008-01-24 02:22:38 -08004788 /* igb_down has a dependency on max_frame_size */
4789 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004790
Alexander Duyck4c844852009-10-27 15:52:07 +00004791 if (netif_running(netdev))
4792 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004793
Alexander Duyck090b1792009-10-27 23:51:55 +00004794 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004795 netdev->mtu, new_mtu);
4796 netdev->mtu = new_mtu;
4797
4798 if (netif_running(netdev))
4799 igb_up(adapter);
4800 else
4801 igb_reset(adapter);
4802
4803 clear_bit(__IGB_RESETTING, &adapter->state);
4804
4805 return 0;
4806}
4807
4808/**
4809 * igb_update_stats - Update the board statistics counters
4810 * @adapter: board private structure
4811 **/
4812
Eric Dumazet12dcd862010-10-15 17:27:10 +00004813void igb_update_stats(struct igb_adapter *adapter,
4814 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004815{
4816 struct e1000_hw *hw = &adapter->hw;
4817 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004818 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004819 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004820 int i;
4821 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004822 unsigned int start;
4823 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004824
4825#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4826
4827 /*
4828 * Prevent stats update while adapter is being reset, or if the pci
4829 * connection is down.
4830 */
4831 if (adapter->link_speed == 0)
4832 return;
4833 if (pci_channel_offline(pdev))
4834 return;
4835
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004836 bytes = 0;
4837 packets = 0;
4838 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004839 u32 rqdpc = rd32(E1000_RQDPC(i));
Alexander Duyck3025a442010-02-17 01:02:39 +00004840 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004841
Alexander Duyckae1c07a2012-08-08 05:23:22 +00004842 if (rqdpc) {
4843 ring->rx_stats.drops += rqdpc;
4844 net_stats->rx_fifo_errors += rqdpc;
4845 }
Eric Dumazet12dcd862010-10-15 17:27:10 +00004846
4847 do {
4848 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4849 _bytes = ring->rx_stats.bytes;
4850 _packets = ring->rx_stats.packets;
4851 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4852 bytes += _bytes;
4853 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004854 }
4855
Alexander Duyck128e45e2009-11-12 18:37:38 +00004856 net_stats->rx_bytes = bytes;
4857 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004858
4859 bytes = 0;
4860 packets = 0;
4861 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004862 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004863 do {
4864 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4865 _bytes = ring->tx_stats.bytes;
4866 _packets = ring->tx_stats.packets;
4867 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4868 bytes += _bytes;
4869 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004870 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004871 net_stats->tx_bytes = bytes;
4872 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004873
4874 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004875 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4876 adapter->stats.gprc += rd32(E1000_GPRC);
4877 adapter->stats.gorc += rd32(E1000_GORCL);
4878 rd32(E1000_GORCH); /* clear GORCL */
4879 adapter->stats.bprc += rd32(E1000_BPRC);
4880 adapter->stats.mprc += rd32(E1000_MPRC);
4881 adapter->stats.roc += rd32(E1000_ROC);
4882
4883 adapter->stats.prc64 += rd32(E1000_PRC64);
4884 adapter->stats.prc127 += rd32(E1000_PRC127);
4885 adapter->stats.prc255 += rd32(E1000_PRC255);
4886 adapter->stats.prc511 += rd32(E1000_PRC511);
4887 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4888 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4889 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4890 adapter->stats.sec += rd32(E1000_SEC);
4891
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004892 mpc = rd32(E1000_MPC);
4893 adapter->stats.mpc += mpc;
4894 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004895 adapter->stats.scc += rd32(E1000_SCC);
4896 adapter->stats.ecol += rd32(E1000_ECOL);
4897 adapter->stats.mcc += rd32(E1000_MCC);
4898 adapter->stats.latecol += rd32(E1000_LATECOL);
4899 adapter->stats.dc += rd32(E1000_DC);
4900 adapter->stats.rlec += rd32(E1000_RLEC);
4901 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4902 adapter->stats.xontxc += rd32(E1000_XONTXC);
4903 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4904 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4905 adapter->stats.fcruc += rd32(E1000_FCRUC);
4906 adapter->stats.gptc += rd32(E1000_GPTC);
4907 adapter->stats.gotc += rd32(E1000_GOTCL);
4908 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004909 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004910 adapter->stats.ruc += rd32(E1000_RUC);
4911 adapter->stats.rfc += rd32(E1000_RFC);
4912 adapter->stats.rjc += rd32(E1000_RJC);
4913 adapter->stats.tor += rd32(E1000_TORH);
4914 adapter->stats.tot += rd32(E1000_TOTH);
4915 adapter->stats.tpr += rd32(E1000_TPR);
4916
4917 adapter->stats.ptc64 += rd32(E1000_PTC64);
4918 adapter->stats.ptc127 += rd32(E1000_PTC127);
4919 adapter->stats.ptc255 += rd32(E1000_PTC255);
4920 adapter->stats.ptc511 += rd32(E1000_PTC511);
4921 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4922 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4923
4924 adapter->stats.mptc += rd32(E1000_MPTC);
4925 adapter->stats.bptc += rd32(E1000_BPTC);
4926
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004927 adapter->stats.tpt += rd32(E1000_TPT);
4928 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004929
4930 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004931 /* read internal phy specific stats */
4932 reg = rd32(E1000_CTRL_EXT);
4933 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4934 adapter->stats.rxerrc += rd32(E1000_RXERRC);
Carolyn Wyborny3dbdf962012-09-12 04:36:24 +00004935
4936 /* this stat has invalid values on i210/i211 */
4937 if ((hw->mac.type != e1000_i210) &&
4938 (hw->mac.type != e1000_i211))
4939 adapter->stats.tncrs += rd32(E1000_TNCRS);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004940 }
4941
Auke Kok9d5c8242008-01-24 02:22:38 -08004942 adapter->stats.tsctc += rd32(E1000_TSCTC);
4943 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4944
4945 adapter->stats.iac += rd32(E1000_IAC);
4946 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4947 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4948 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4949 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4950 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4951 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4952 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4953 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4954
4955 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004956 net_stats->multicast = adapter->stats.mprc;
4957 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004958
4959 /* Rx Errors */
4960
4961 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004962 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004963 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004964 adapter->stats.crcerrs + adapter->stats.algnerrc +
4965 adapter->stats.ruc + adapter->stats.roc +
4966 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004967 net_stats->rx_length_errors = adapter->stats.ruc +
4968 adapter->stats.roc;
4969 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4970 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4971 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004972
4973 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004974 net_stats->tx_errors = adapter->stats.ecol +
4975 adapter->stats.latecol;
4976 net_stats->tx_aborted_errors = adapter->stats.ecol;
4977 net_stats->tx_window_errors = adapter->stats.latecol;
4978 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004979
4980 /* Tx Dropped needs to be maintained elsewhere */
4981
4982 /* Phy Stats */
4983 if (hw->phy.media_type == e1000_media_type_copper) {
4984 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004985 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004986 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4987 adapter->phy_stats.idle_errors += phy_tmp;
4988 }
4989 }
4990
4991 /* Management Stats */
4992 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4993 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4994 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004995
4996 /* OS2BMC Stats */
4997 reg = rd32(E1000_MANC);
4998 if (reg & E1000_MANC_EN_BMC2OS) {
4999 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
5000 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
5001 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
5002 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
5003 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005004}
5005
Auke Kok9d5c8242008-01-24 02:22:38 -08005006static irqreturn_t igb_msix_other(int irq, void *data)
5007{
Alexander Duyck047e0032009-10-27 15:49:27 +00005008 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08005009 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005010 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005011 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00005012
Alexander Duyck7f081d42010-01-07 17:41:00 +00005013 if (icr & E1000_ICR_DRSTA)
5014 schedule_work(&adapter->reset_task);
5015
Alexander Duyck047e0032009-10-27 15:49:27 +00005016 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005017 /* HW is reporting DMA is out of sync */
5018 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00005019 /* The DMA Out of Sync is also indication of a spoof event
5020 * in IOV mode. Check the Wrong VM Behavior register to
5021 * see if it is really a spoof event. */
5022 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00005023 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00005024
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005025 /* Check for a mailbox event */
5026 if (icr & E1000_ICR_VMMB)
5027 igb_msg_task(adapter);
5028
5029 if (icr & E1000_ICR_LSC) {
5030 hw->mac.get_link_status = 1;
5031 /* guard against interrupt when we're going down */
5032 if (!test_bit(__IGB_DOWN, &adapter->state))
5033 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5034 }
5035
Matthew Vick1f6e8172012-08-18 07:26:33 +00005036 if (icr & E1000_ICR_TS) {
5037 u32 tsicr = rd32(E1000_TSICR);
5038
5039 if (tsicr & E1000_TSICR_TXTS) {
5040 /* acknowledge the interrupt */
5041 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5042 /* retrieve hardware timestamp */
5043 schedule_work(&adapter->ptp_tx_work);
5044 }
5045 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005046
PJ Waskiewicz844290e2008-06-27 11:00:39 -07005047 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08005048
5049 return IRQ_HANDLED;
5050}
5051
Alexander Duyck047e0032009-10-27 15:49:27 +00005052static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005053{
Alexander Duyck26b39272010-02-17 01:00:41 +00005054 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00005055 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08005056
Alexander Duyck047e0032009-10-27 15:49:27 +00005057 if (!q_vector->set_itr)
5058 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005059
Alexander Duyck047e0032009-10-27 15:49:27 +00005060 if (!itr_val)
5061 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005062
Alexander Duyck26b39272010-02-17 01:00:41 +00005063 if (adapter->hw.mac.type == e1000_82575)
5064 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005065 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00005066 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00005067
5068 writel(itr_val, q_vector->itr_register);
5069 q_vector->set_itr = 0;
5070}
5071
5072static irqreturn_t igb_msix_ring(int irq, void *data)
5073{
5074 struct igb_q_vector *q_vector = data;
5075
5076 /* Write the ITR value calculated from the previous interrupt. */
5077 igb_write_itr(q_vector);
5078
5079 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005080
Auke Kok9d5c8242008-01-24 02:22:38 -08005081 return IRQ_HANDLED;
5082}
5083
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005084#ifdef CONFIG_IGB_DCA
Alexander Duyck6a050042012-09-25 00:31:27 +00005085static void igb_update_tx_dca(struct igb_adapter *adapter,
5086 struct igb_ring *tx_ring,
5087 int cpu)
5088{
5089 struct e1000_hw *hw = &adapter->hw;
5090 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
5091
5092 if (hw->mac.type != e1000_82575)
5093 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5094
5095 /*
5096 * We can enable relaxed ordering for reads, but not writes when
5097 * DCA is enabled. This is due to a known issue in some chipsets
5098 * which will cause the DCA tag to be cleared.
5099 */
5100 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
5101 E1000_DCA_TXCTRL_DATA_RRO_EN |
5102 E1000_DCA_TXCTRL_DESC_DCA_EN;
5103
5104 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
5105}
5106
5107static void igb_update_rx_dca(struct igb_adapter *adapter,
5108 struct igb_ring *rx_ring,
5109 int cpu)
5110{
5111 struct e1000_hw *hw = &adapter->hw;
5112 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
5113
5114 if (hw->mac.type != e1000_82575)
5115 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5116
5117 /*
5118 * We can enable relaxed ordering for reads, but not writes when
5119 * DCA is enabled. This is due to a known issue in some chipsets
5120 * which will cause the DCA tag to be cleared.
5121 */
5122 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
5123 E1000_DCA_RXCTRL_DESC_DCA_EN;
5124
5125 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
5126}
5127
Alexander Duyck047e0032009-10-27 15:49:27 +00005128static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005129{
Alexander Duyck047e0032009-10-27 15:49:27 +00005130 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005131 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005132
Alexander Duyck047e0032009-10-27 15:49:27 +00005133 if (q_vector->cpu == cpu)
5134 goto out_no_update;
5135
Alexander Duyck6a050042012-09-25 00:31:27 +00005136 if (q_vector->tx.ring)
5137 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
5138
5139 if (q_vector->rx.ring)
5140 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
5141
Alexander Duyck047e0032009-10-27 15:49:27 +00005142 q_vector->cpu = cpu;
5143out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005144 put_cpu();
5145}
5146
5147static void igb_setup_dca(struct igb_adapter *adapter)
5148{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00005149 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005150 int i;
5151
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005152 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005153 return;
5154
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00005155 /* Always use CB2 mode, difference is masked in the CB driver. */
5156 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
5157
Alexander Duyck047e0032009-10-27 15:49:27 +00005158 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00005159 adapter->q_vector[i]->cpu = -1;
5160 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005161 }
5162}
5163
5164static int __igb_notify_dca(struct device *dev, void *data)
5165{
5166 struct net_device *netdev = dev_get_drvdata(dev);
5167 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005168 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005169 struct e1000_hw *hw = &adapter->hw;
5170 unsigned long event = *(unsigned long *)data;
5171
5172 switch (event) {
5173 case DCA_PROVIDER_ADD:
5174 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005175 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005176 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005177 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08005178 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00005179 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005180 igb_setup_dca(adapter);
5181 break;
5182 }
5183 /* Fall Through since DCA is disabled. */
5184 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005185 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005186 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00005187 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005188 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00005189 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07005190 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08005191 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005192 }
5193 break;
5194 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08005195
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005196 return 0;
5197}
5198
5199static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5200 void *p)
5201{
5202 int ret_val;
5203
5204 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5205 __igb_notify_dca);
5206
5207 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5208}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005209#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08005210
Greg Rose0224d662011-10-14 02:57:14 +00005211#ifdef CONFIG_PCI_IOV
5212static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5213{
5214 unsigned char mac_addr[ETH_ALEN];
Greg Rose0224d662011-10-14 02:57:14 +00005215
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005216 eth_zero_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00005217 igb_set_vf_mac(adapter, vf, mac_addr);
5218
Stefan Assmannf5571472012-08-18 04:06:11 +00005219 return 0;
Greg Rose0224d662011-10-14 02:57:14 +00005220}
5221
Stefan Assmannf5571472012-08-18 04:06:11 +00005222static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
Greg Rose0224d662011-10-14 02:57:14 +00005223{
Greg Rose0224d662011-10-14 02:57:14 +00005224 struct pci_dev *pdev = adapter->pdev;
Stefan Assmannf5571472012-08-18 04:06:11 +00005225 struct pci_dev *vfdev;
5226 int dev_id;
Greg Rose0224d662011-10-14 02:57:14 +00005227
5228 switch (adapter->hw.mac.type) {
5229 case e1000_82576:
Stefan Assmannf5571472012-08-18 04:06:11 +00005230 dev_id = IGB_82576_VF_DEV_ID;
Greg Rose0224d662011-10-14 02:57:14 +00005231 break;
5232 case e1000_i350:
Stefan Assmannf5571472012-08-18 04:06:11 +00005233 dev_id = IGB_I350_VF_DEV_ID;
Greg Rose0224d662011-10-14 02:57:14 +00005234 break;
5235 default:
Stefan Assmannf5571472012-08-18 04:06:11 +00005236 return false;
Greg Rose0224d662011-10-14 02:57:14 +00005237 }
5238
Stefan Assmannf5571472012-08-18 04:06:11 +00005239 /* loop through all the VFs to see if we own any that are assigned */
5240 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5241 while (vfdev) {
5242 /* if we don't own it we don't care */
5243 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5244 /* if it is assigned we cannot release it */
5245 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
Greg Rose0224d662011-10-14 02:57:14 +00005246 return true;
5247 }
Stefan Assmannf5571472012-08-18 04:06:11 +00005248
5249 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
Greg Rose0224d662011-10-14 02:57:14 +00005250 }
Stefan Assmannf5571472012-08-18 04:06:11 +00005251
Greg Rose0224d662011-10-14 02:57:14 +00005252 return false;
5253}
5254
5255#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005256static void igb_ping_all_vfs(struct igb_adapter *adapter)
5257{
5258 struct e1000_hw *hw = &adapter->hw;
5259 u32 ping;
5260 int i;
5261
5262 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5263 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005264 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005265 ping |= E1000_VT_MSGTYPE_CTS;
5266 igb_write_mbx(hw, &ping, 1, i);
5267 }
5268}
5269
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005270static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5271{
5272 struct e1000_hw *hw = &adapter->hw;
5273 u32 vmolr = rd32(E1000_VMOLR(vf));
5274 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5275
Alexander Duyckd85b90042010-09-22 17:56:20 +00005276 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005277 IGB_VF_FLAG_MULTI_PROMISC);
5278 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5279
5280 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5281 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005282 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005283 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5284 } else {
5285 /*
5286 * if we have hashes and we are clearing a multicast promisc
5287 * flag we need to write the hashes to the MTA as this step
5288 * was previously skipped
5289 */
5290 if (vf_data->num_vf_mc_hashes > 30) {
5291 vmolr |= E1000_VMOLR_MPME;
5292 } else if (vf_data->num_vf_mc_hashes) {
5293 int j;
5294 vmolr |= E1000_VMOLR_ROMPE;
5295 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5296 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5297 }
5298 }
5299
5300 wr32(E1000_VMOLR(vf), vmolr);
5301
5302 /* there are flags left unprocessed, likely not supported */
5303 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5304 return -EINVAL;
5305
5306 return 0;
5307
5308}
5309
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005310static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5311 u32 *msgbuf, u32 vf)
5312{
5313 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5314 u16 *hash_list = (u16 *)&msgbuf[1];
5315 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5316 int i;
5317
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005318 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005319 * to this VF for later use to restore when the PF multi cast
5320 * list changes
5321 */
5322 vf_data->num_vf_mc_hashes = n;
5323
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005324 /* only up to 30 hash values supported */
5325 if (n > 30)
5326 n = 30;
5327
5328 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005329 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005330 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005331
5332 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005333 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005334
5335 return 0;
5336}
5337
5338static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5339{
5340 struct e1000_hw *hw = &adapter->hw;
5341 struct vf_data_storage *vf_data;
5342 int i, j;
5343
5344 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005345 u32 vmolr = rd32(E1000_VMOLR(i));
5346 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5347
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005348 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005349
5350 if ((vf_data->num_vf_mc_hashes > 30) ||
5351 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5352 vmolr |= E1000_VMOLR_MPME;
5353 } else if (vf_data->num_vf_mc_hashes) {
5354 vmolr |= E1000_VMOLR_ROMPE;
5355 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5356 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5357 }
5358 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005359 }
5360}
5361
5362static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5363{
5364 struct e1000_hw *hw = &adapter->hw;
5365 u32 pool_mask, reg, vid;
5366 int i;
5367
5368 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5369
5370 /* Find the vlan filter for this id */
5371 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5372 reg = rd32(E1000_VLVF(i));
5373
5374 /* remove the vf from the pool */
5375 reg &= ~pool_mask;
5376
5377 /* if pool is empty then remove entry from vfta */
5378 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5379 (reg & E1000_VLVF_VLANID_ENABLE)) {
5380 reg = 0;
5381 vid = reg & E1000_VLVF_VLANID_MASK;
5382 igb_vfta_set(hw, vid, false);
5383 }
5384
5385 wr32(E1000_VLVF(i), reg);
5386 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005387
5388 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389}
5390
5391static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5392{
5393 struct e1000_hw *hw = &adapter->hw;
5394 u32 reg, i;
5395
Alexander Duyck51466232009-10-27 23:47:35 +00005396 /* The vlvf table only exists on 82576 hardware and newer */
5397 if (hw->mac.type < e1000_82576)
5398 return -1;
5399
5400 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005401 if (!adapter->vfs_allocated_count)
5402 return -1;
5403
5404 /* Find the vlan filter for this id */
5405 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5406 reg = rd32(E1000_VLVF(i));
5407 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5408 vid == (reg & E1000_VLVF_VLANID_MASK))
5409 break;
5410 }
5411
5412 if (add) {
5413 if (i == E1000_VLVF_ARRAY_SIZE) {
5414 /* Did not find a matching VLAN ID entry that was
5415 * enabled. Search for a free filter entry, i.e.
5416 * one without the enable bit set
5417 */
5418 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5419 reg = rd32(E1000_VLVF(i));
5420 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5421 break;
5422 }
5423 }
5424 if (i < E1000_VLVF_ARRAY_SIZE) {
5425 /* Found an enabled/available entry */
5426 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5427
5428 /* if !enabled we need to set this up in vfta */
5429 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005430 /* add VID to filter table */
5431 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005432 reg |= E1000_VLVF_VLANID_ENABLE;
5433 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005434 reg &= ~E1000_VLVF_VLANID_MASK;
5435 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005436 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005437
5438 /* do not modify RLPML for PF devices */
5439 if (vf >= adapter->vfs_allocated_count)
5440 return 0;
5441
5442 if (!adapter->vf_data[vf].vlans_enabled) {
5443 u32 size;
5444 reg = rd32(E1000_VMOLR(vf));
5445 size = reg & E1000_VMOLR_RLPML_MASK;
5446 size += 4;
5447 reg &= ~E1000_VMOLR_RLPML_MASK;
5448 reg |= size;
5449 wr32(E1000_VMOLR(vf), reg);
5450 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005451
Alexander Duyck51466232009-10-27 23:47:35 +00005452 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005453 }
5454 } else {
5455 if (i < E1000_VLVF_ARRAY_SIZE) {
5456 /* remove vf from the pool */
5457 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5458 /* if pool is empty then remove entry from vfta */
5459 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5460 reg = 0;
5461 igb_vfta_set(hw, vid, false);
5462 }
5463 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005464
5465 /* do not modify RLPML for PF devices */
5466 if (vf >= adapter->vfs_allocated_count)
5467 return 0;
5468
5469 adapter->vf_data[vf].vlans_enabled--;
5470 if (!adapter->vf_data[vf].vlans_enabled) {
5471 u32 size;
5472 reg = rd32(E1000_VMOLR(vf));
5473 size = reg & E1000_VMOLR_RLPML_MASK;
5474 size -= 4;
5475 reg &= ~E1000_VMOLR_RLPML_MASK;
5476 reg |= size;
5477 wr32(E1000_VMOLR(vf), reg);
5478 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005479 }
5480 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005481 return 0;
5482}
5483
5484static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5485{
5486 struct e1000_hw *hw = &adapter->hw;
5487
5488 if (vid)
5489 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5490 else
5491 wr32(E1000_VMVIR(vf), 0);
5492}
5493
5494static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5495 int vf, u16 vlan, u8 qos)
5496{
5497 int err = 0;
5498 struct igb_adapter *adapter = netdev_priv(netdev);
5499
5500 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5501 return -EINVAL;
5502 if (vlan || qos) {
5503 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5504 if (err)
5505 goto out;
5506 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5507 igb_set_vmolr(adapter, vf, !vlan);
5508 adapter->vf_data[vf].pf_vlan = vlan;
5509 adapter->vf_data[vf].pf_qos = qos;
5510 dev_info(&adapter->pdev->dev,
5511 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5512 if (test_bit(__IGB_DOWN, &adapter->state)) {
5513 dev_warn(&adapter->pdev->dev,
5514 "The VF VLAN has been set,"
5515 " but the PF device is not up.\n");
5516 dev_warn(&adapter->pdev->dev,
5517 "Bring the PF device up before"
5518 " attempting to use the VF device.\n");
5519 }
5520 } else {
5521 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5522 false, vf);
5523 igb_set_vmvir(adapter, vlan, vf);
5524 igb_set_vmolr(adapter, vf, true);
5525 adapter->vf_data[vf].pf_vlan = 0;
5526 adapter->vf_data[vf].pf_qos = 0;
5527 }
5528out:
5529 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005530}
5531
5532static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5533{
5534 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5535 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5536
5537 return igb_vlvf_set(adapter, vid, add, vf);
5538}
5539
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005540static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005541{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005542 /* clear flags - except flag that indicates PF has set the MAC */
5543 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005544 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005545
5546 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005547 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005548
5549 /* reset vlans for device */
5550 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005551 if (adapter->vf_data[vf].pf_vlan)
5552 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5553 adapter->vf_data[vf].pf_vlan,
5554 adapter->vf_data[vf].pf_qos);
5555 else
5556 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005557
5558 /* reset multicast table array for vf */
5559 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5560
5561 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005562 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005563}
5564
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005565static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5566{
5567 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5568
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005569 /* clear mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005570 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
Mitch A Williams5ac6f912013-01-18 08:57:20 +00005571 eth_zero_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005572
5573 /* process remaining reset events */
5574 igb_vf_reset(adapter, vf);
5575}
5576
5577static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005578{
5579 struct e1000_hw *hw = &adapter->hw;
5580 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005581 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005582 u32 reg, msgbuf[3];
5583 u8 *addr = (u8 *)(&msgbuf[1]);
5584
5585 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005586 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005587
5588 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005589 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005590
5591 /* enable transmit and receive for vf */
5592 reg = rd32(E1000_VFTE);
5593 wr32(E1000_VFTE, reg | (1 << vf));
5594 reg = rd32(E1000_VFRE);
5595 wr32(E1000_VFRE, reg | (1 << vf));
5596
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005597 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005598
5599 /* reply to reset with ack and vf mac address */
5600 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5601 memcpy(addr, vf_mac, 6);
5602 igb_write_mbx(hw, msgbuf, 3, vf);
5603}
5604
5605static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5606{
Greg Rosede42edd2010-07-01 13:39:23 +00005607 /*
5608 * The VF MAC Address is stored in a packed array of bytes
5609 * starting at the second 32 bit word of the msg array
5610 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005611 unsigned char *addr = (char *)&msg[1];
5612 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005613
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005614 if (is_valid_ether_addr(addr))
5615 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005616
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005617 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005618}
5619
5620static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5621{
5622 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005623 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005624 u32 msg = E1000_VT_MSGTYPE_NACK;
5625
5626 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005627 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5628 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005629 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005630 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005631 }
5632}
5633
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005634static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005635{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005636 struct pci_dev *pdev = adapter->pdev;
5637 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005638 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005639 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005640 s32 retval;
5641
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005642 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005643
Alexander Duyckfef45f42009-12-11 22:57:34 -08005644 if (retval) {
5645 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005646 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005647 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5648 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5649 return;
5650 goto out;
5651 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005652
5653 /* this is a message we already processed, do nothing */
5654 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005655 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005656
5657 /*
5658 * until the vf completes a reset it should not be
5659 * allowed to start any configuration.
5660 */
5661
5662 if (msgbuf[0] == E1000_VF_RESET) {
5663 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005664 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005665 }
5666
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005667 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005668 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5669 return;
5670 retval = -1;
5671 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005672 }
5673
5674 switch ((msgbuf[0] & 0xFFFF)) {
5675 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005676 retval = -EINVAL;
5677 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5678 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5679 else
5680 dev_warn(&pdev->dev,
5681 "VF %d attempted to override administratively "
5682 "set MAC address\nReload the VF driver to "
5683 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005684 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005685 case E1000_VF_SET_PROMISC:
5686 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5687 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005688 case E1000_VF_SET_MULTICAST:
5689 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5690 break;
5691 case E1000_VF_SET_LPE:
5692 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5693 break;
5694 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005695 retval = -1;
5696 if (vf_data->pf_vlan)
5697 dev_warn(&pdev->dev,
5698 "VF %d attempted to override administratively "
5699 "set VLAN tag\nReload the VF driver to "
5700 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005701 else
5702 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005703 break;
5704 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005705 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005706 retval = -1;
5707 break;
5708 }
5709
Alexander Duyckfef45f42009-12-11 22:57:34 -08005710 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5711out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005712 /* notify the VF of the results of what it sent us */
5713 if (retval)
5714 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5715 else
5716 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5717
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005718 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005719}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005720
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005721static void igb_msg_task(struct igb_adapter *adapter)
5722{
5723 struct e1000_hw *hw = &adapter->hw;
5724 u32 vf;
5725
5726 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5727 /* process any reset requests */
5728 if (!igb_check_for_rst(hw, vf))
5729 igb_vf_reset_event(adapter, vf);
5730
5731 /* process any messages pending */
5732 if (!igb_check_for_msg(hw, vf))
5733 igb_rcv_msg_from_vf(adapter, vf);
5734
5735 /* process any acks */
5736 if (!igb_check_for_ack(hw, vf))
5737 igb_rcv_ack_from_vf(adapter, vf);
5738 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005739}
5740
Auke Kok9d5c8242008-01-24 02:22:38 -08005741/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005742 * igb_set_uta - Set unicast filter table address
5743 * @adapter: board private structure
5744 *
5745 * The unicast table address is a register array of 32-bit registers.
5746 * The table is meant to be used in a way similar to how the MTA is used
5747 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005748 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5749 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005750 **/
5751static void igb_set_uta(struct igb_adapter *adapter)
5752{
5753 struct e1000_hw *hw = &adapter->hw;
5754 int i;
5755
5756 /* The UTA table only exists on 82576 hardware and newer */
5757 if (hw->mac.type < e1000_82576)
5758 return;
5759
5760 /* we only need to do this if VMDq is enabled */
5761 if (!adapter->vfs_allocated_count)
5762 return;
5763
5764 for (i = 0; i < hw->mac.uta_reg_count; i++)
5765 array_wr32(E1000_UTA, i, ~0);
5766}
5767
5768/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005769 * igb_intr_msi - Interrupt Handler
5770 * @irq: interrupt number
5771 * @data: pointer to a network interface device structure
5772 **/
5773static irqreturn_t igb_intr_msi(int irq, void *data)
5774{
Alexander Duyck047e0032009-10-27 15:49:27 +00005775 struct igb_adapter *adapter = data;
5776 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005777 struct e1000_hw *hw = &adapter->hw;
5778 /* read ICR disables interrupts using IAM */
5779 u32 icr = rd32(E1000_ICR);
5780
Alexander Duyck047e0032009-10-27 15:49:27 +00005781 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005782
Alexander Duyck7f081d42010-01-07 17:41:00 +00005783 if (icr & E1000_ICR_DRSTA)
5784 schedule_work(&adapter->reset_task);
5785
Alexander Duyck047e0032009-10-27 15:49:27 +00005786 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005787 /* HW is reporting DMA is out of sync */
5788 adapter->stats.doosync++;
5789 }
5790
Auke Kok9d5c8242008-01-24 02:22:38 -08005791 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5792 hw->mac.get_link_status = 1;
5793 if (!test_bit(__IGB_DOWN, &adapter->state))
5794 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5795 }
5796
Matthew Vick1f6e8172012-08-18 07:26:33 +00005797 if (icr & E1000_ICR_TS) {
5798 u32 tsicr = rd32(E1000_TSICR);
5799
5800 if (tsicr & E1000_TSICR_TXTS) {
5801 /* acknowledge the interrupt */
5802 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5803 /* retrieve hardware timestamp */
5804 schedule_work(&adapter->ptp_tx_work);
5805 }
5806 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005807
Alexander Duyck047e0032009-10-27 15:49:27 +00005808 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005809
5810 return IRQ_HANDLED;
5811}
5812
5813/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005814 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005815 * @irq: interrupt number
5816 * @data: pointer to a network interface device structure
5817 **/
5818static irqreturn_t igb_intr(int irq, void *data)
5819{
Alexander Duyck047e0032009-10-27 15:49:27 +00005820 struct igb_adapter *adapter = data;
5821 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005822 struct e1000_hw *hw = &adapter->hw;
5823 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5824 * need for the IMC write */
5825 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005826
5827 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5828 * not set, then the adapter didn't send an interrupt */
5829 if (!(icr & E1000_ICR_INT_ASSERTED))
5830 return IRQ_NONE;
5831
Alexander Duyck0ba82992011-08-26 07:45:47 +00005832 igb_write_itr(q_vector);
5833
Alexander Duyck7f081d42010-01-07 17:41:00 +00005834 if (icr & E1000_ICR_DRSTA)
5835 schedule_work(&adapter->reset_task);
5836
Alexander Duyck047e0032009-10-27 15:49:27 +00005837 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005838 /* HW is reporting DMA is out of sync */
5839 adapter->stats.doosync++;
5840 }
5841
Auke Kok9d5c8242008-01-24 02:22:38 -08005842 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5843 hw->mac.get_link_status = 1;
5844 /* guard against interrupt when we're going down */
5845 if (!test_bit(__IGB_DOWN, &adapter->state))
5846 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5847 }
5848
Matthew Vick1f6e8172012-08-18 07:26:33 +00005849 if (icr & E1000_ICR_TS) {
5850 u32 tsicr = rd32(E1000_TSICR);
5851
5852 if (tsicr & E1000_TSICR_TXTS) {
5853 /* acknowledge the interrupt */
5854 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5855 /* retrieve hardware timestamp */
5856 schedule_work(&adapter->ptp_tx_work);
5857 }
5858 }
Matthew Vick1f6e8172012-08-18 07:26:33 +00005859
Alexander Duyck047e0032009-10-27 15:49:27 +00005860 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005861
5862 return IRQ_HANDLED;
5863}
5864
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005865static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005866{
Alexander Duyck047e0032009-10-27 15:49:27 +00005867 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005868 struct e1000_hw *hw = &adapter->hw;
5869
Alexander Duyck0ba82992011-08-26 07:45:47 +00005870 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5871 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5872 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5873 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005874 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005875 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005876 }
5877
5878 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5879 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005880 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005881 else
5882 igb_irq_enable(adapter);
5883 }
5884}
5885
Auke Kok9d5c8242008-01-24 02:22:38 -08005886/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005887 * igb_poll - NAPI Rx polling callback
5888 * @napi: napi polling structure
5889 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005890 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005891static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005892{
Alexander Duyck047e0032009-10-27 15:49:27 +00005893 struct igb_q_vector *q_vector = container_of(napi,
5894 struct igb_q_vector,
5895 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005896 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005897
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005898#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005899 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5900 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005901#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005902 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005903 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005904
Alexander Duyck0ba82992011-08-26 07:45:47 +00005905 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005906 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005907
Alexander Duyck16eb8812011-08-26 07:43:54 +00005908 /* If all work not completed, return budget and keep polling */
5909 if (!clean_complete)
5910 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005911
Alexander Duyck46544252009-02-19 20:39:04 -08005912 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005913 napi_complete(napi);
5914 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005915
Alexander Duyck16eb8812011-08-26 07:43:54 +00005916 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005917}
Al Viro6d8126f2008-03-16 22:23:24 +00005918
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005919/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005920 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005921 * @q_vector: pointer to q_vector containing needed info
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005922 *
Auke Kok9d5c8242008-01-24 02:22:38 -08005923 * returns true if ring is completely cleaned
5924 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005925static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005926{
Alexander Duyck047e0032009-10-27 15:49:27 +00005927 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005928 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005929 struct igb_tx_buffer *tx_buffer;
Alexander Duyckf4128782012-09-13 06:28:01 +00005930 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005931 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005932 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005933 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005934
Alexander Duyck13fde972011-10-05 13:35:24 +00005935 if (test_bit(__IGB_DOWN, &adapter->state))
5936 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005937
Alexander Duyck06034642011-08-26 07:44:22 +00005938 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005939 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005940 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005941
Alexander Duyckf4128782012-09-13 06:28:01 +00005942 do {
5943 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duyck8542db02011-08-26 07:44:43 +00005944
5945 /* if next_to_watch is not set then there is no work pending */
5946 if (!eop_desc)
5947 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005948
Alexander Duyckf4128782012-09-13 06:28:01 +00005949 /* prevent any other reads prior to eop_desc */
Alexander Duyck70d289b2013-01-08 07:01:03 +00005950 read_barrier_depends();
Alexander Duyckf4128782012-09-13 06:28:01 +00005951
Alexander Duyck13fde972011-10-05 13:35:24 +00005952 /* if DD is not set pending work has not been completed */
5953 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5954 break;
5955
Alexander Duyck8542db02011-08-26 07:44:43 +00005956 /* clear next_to_watch to prevent false hangs */
5957 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005958
Alexander Duyckebe42d12011-08-26 07:45:09 +00005959 /* update the statistics for this packet */
5960 total_bytes += tx_buffer->bytecount;
5961 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005962
Alexander Duyckebe42d12011-08-26 07:45:09 +00005963 /* free the skb */
5964 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005965
5966 /* unmap skb header data */
5967 dma_unmap_single(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005968 dma_unmap_addr(tx_buffer, dma),
5969 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00005970 DMA_TO_DEVICE);
5971
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005972 /* clear tx_buffer data */
5973 tx_buffer->skb = NULL;
5974 dma_unmap_len_set(tx_buffer, len, 0);
5975
Alexander Duyckebe42d12011-08-26 07:45:09 +00005976 /* clear last DMA location and unmap remaining buffers */
5977 while (tx_desc != eop_desc) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005978 tx_buffer++;
5979 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005980 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005981 if (unlikely(!i)) {
5982 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005983 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005984 tx_desc = IGB_TX_DESC(tx_ring, 0);
5985 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005986
5987 /* unmap any remaining paged data */
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005988 if (dma_unmap_len(tx_buffer, len)) {
Alexander Duyckebe42d12011-08-26 07:45:09 +00005989 dma_unmap_page(tx_ring->dev,
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005990 dma_unmap_addr(tx_buffer, dma),
5991 dma_unmap_len(tx_buffer, len),
Alexander Duyckebe42d12011-08-26 07:45:09 +00005992 DMA_TO_DEVICE);
Alexander Duyckc9f14bf32012-09-18 01:56:27 +00005993 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duyckebe42d12011-08-26 07:45:09 +00005994 }
5995 }
5996
Alexander Duyckebe42d12011-08-26 07:45:09 +00005997 /* move us one more past the eop_desc for start of next pkt */
5998 tx_buffer++;
5999 tx_desc++;
6000 i++;
6001 if (unlikely(!i)) {
6002 i -= tx_ring->count;
6003 tx_buffer = tx_ring->tx_buffer_info;
6004 tx_desc = IGB_TX_DESC(tx_ring, 0);
6005 }
Alexander Duyckf4128782012-09-13 06:28:01 +00006006
6007 /* issue prefetch for next Tx descriptor */
6008 prefetch(tx_desc);
6009
6010 /* update budget accounting */
6011 budget--;
6012 } while (likely(budget));
Alexander Duyck0e014cb2008-12-26 01:33:18 -08006013
Eric Dumazetbdbc0632012-01-04 20:23:36 +00006014 netdev_tx_completed_queue(txring_txq(tx_ring),
6015 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00006016 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006017 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00006018 u64_stats_update_begin(&tx_ring->tx_syncp);
6019 tx_ring->tx_stats.bytes += total_bytes;
6020 tx_ring->tx_stats.packets += total_packets;
6021 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006022 q_vector->tx.total_bytes += total_bytes;
6023 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08006024
Alexander Duyck6d095fa2011-08-26 07:46:19 +00006025 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00006026 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00006027
Auke Kok9d5c8242008-01-24 02:22:38 -08006028 /* Detect a transmit hang in hardware, this serializes the
6029 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00006030 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckf4128782012-09-13 06:28:01 +00006031 if (tx_buffer->next_to_watch &&
Alexander Duyck8542db02011-08-26 07:44:43 +00006032 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00006033 (adapter->tx_timeout_factor * HZ)) &&
6034 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006035
Auke Kok9d5c8242008-01-24 02:22:38 -08006036 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00006037 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08006038 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07006039 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006040 " TDH <%x>\n"
6041 " TDT <%x>\n"
6042 " next_to_use <%x>\n"
6043 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006044 "buffer_info[next_to_clean]\n"
6045 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00006046 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08006047 " jiffies <%lx>\n"
6048 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07006049 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00006050 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00006051 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08006052 tx_ring->next_to_use,
6053 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00006054 tx_buffer->time_stamp,
Alexander Duyckf4128782012-09-13 06:28:01 +00006055 tx_buffer->next_to_watch,
Auke Kok9d5c8242008-01-24 02:22:38 -08006056 jiffies,
Alexander Duyckf4128782012-09-13 06:28:01 +00006057 tx_buffer->next_to_watch->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00006058 netif_stop_subqueue(tx_ring->netdev,
6059 tx_ring->queue_index);
6060
6061 /* we are about to reset, no point in enabling stuff */
6062 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08006063 }
6064 }
Alexander Duyck13fde972011-10-05 13:35:24 +00006065
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00006066#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyck13fde972011-10-05 13:35:24 +00006067 if (unlikely(total_packets &&
6068 netif_carrier_ok(tx_ring->netdev) &&
Alexander Duyck21ba6fe2013-02-09 04:27:48 +00006069 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00006070 /* Make sure that anybody stopping the queue after this
6071 * sees the new next_to_clean.
6072 */
6073 smp_mb();
6074 if (__netif_subqueue_stopped(tx_ring->netdev,
6075 tx_ring->queue_index) &&
6076 !(test_bit(__IGB_DOWN, &adapter->state))) {
6077 netif_wake_subqueue(tx_ring->netdev,
6078 tx_ring->queue_index);
6079
6080 u64_stats_update_begin(&tx_ring->tx_syncp);
6081 tx_ring->tx_stats.restart_queue++;
6082 u64_stats_update_end(&tx_ring->tx_syncp);
6083 }
6084 }
6085
6086 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006087}
6088
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006089/**
6090 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6091 * @rx_ring: rx descriptor ring to store buffers on
6092 * @old_buff: donor buffer to have page reused
6093 *
6094 * Synchronizes page for reuse by the adapter
6095 **/
6096static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6097 struct igb_rx_buffer *old_buff)
6098{
6099 struct igb_rx_buffer *new_buff;
6100 u16 nta = rx_ring->next_to_alloc;
6101
6102 new_buff = &rx_ring->rx_buffer_info[nta];
6103
6104 /* update, and store next to alloc */
6105 nta++;
6106 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6107
6108 /* transfer page from old buffer to new buffer */
6109 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
6110
6111 /* sync the buffer for use by the device */
6112 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
6113 old_buff->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006114 IGB_RX_BUFSZ,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006115 DMA_FROM_DEVICE);
6116}
6117
Alexander Duyck74e238e2013-02-02 05:07:11 +00006118static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6119 struct page *page,
6120 unsigned int truesize)
6121{
6122 /* avoid re-using remote pages */
6123 if (unlikely(page_to_nid(page) != numa_node_id()))
6124 return false;
6125
6126#if (PAGE_SIZE < 8192)
6127 /* if we are only owner of page we can reuse it */
6128 if (unlikely(page_count(page) != 1))
6129 return false;
6130
6131 /* flip page offset to other buffer */
6132 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6133
6134 /* since we are the only owner of the page and we need to
6135 * increment it, just set the value to 2 in order to avoid
6136 * an unnecessary locked operation
6137 */
6138 atomic_set(&page->_count, 2);
6139#else
6140 /* move offset up to the next cache line */
6141 rx_buffer->page_offset += truesize;
6142
6143 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6144 return false;
6145
6146 /* bump ref count on page before it is given to the stack */
6147 get_page(page);
6148#endif
6149
6150 return true;
6151}
6152
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006153/**
6154 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6155 * @rx_ring: rx descriptor ring to transact packets on
6156 * @rx_buffer: buffer containing page to add
6157 * @rx_desc: descriptor containing length of buffer written by hardware
6158 * @skb: sk_buff to place the data into
6159 *
6160 * This function will add the data contained in rx_buffer->page to the skb.
6161 * This is done either through a direct copy if the data in the buffer is
6162 * less than the skb header size, otherwise it will just attach the page as
6163 * a frag to the skb.
6164 *
6165 * The function will then update the page offset if necessary and return
6166 * true if the buffer can be reused by the adapter.
6167 **/
6168static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6169 struct igb_rx_buffer *rx_buffer,
6170 union e1000_adv_rx_desc *rx_desc,
6171 struct sk_buff *skb)
6172{
6173 struct page *page = rx_buffer->page;
6174 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyck74e238e2013-02-02 05:07:11 +00006175#if (PAGE_SIZE < 8192)
6176 unsigned int truesize = IGB_RX_BUFSZ;
6177#else
6178 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
6179#endif
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006180
6181 if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
6182 unsigned char *va = page_address(page) + rx_buffer->page_offset;
6183
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006184 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6185 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6186 va += IGB_TS_HDR_LEN;
6187 size -= IGB_TS_HDR_LEN;
6188 }
6189
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006190 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6191
6192 /* we can reuse buffer as-is, just make sure it is local */
6193 if (likely(page_to_nid(page) == numa_node_id()))
6194 return true;
6195
6196 /* this page cannot be reused so discard it */
6197 put_page(page);
6198 return false;
6199 }
6200
6201 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
Alexander Duyck74e238e2013-02-02 05:07:11 +00006202 rx_buffer->page_offset, size, truesize);
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006203
Alexander Duyck74e238e2013-02-02 05:07:11 +00006204 return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6205}
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006206
Alexander Duyck74e238e2013-02-02 05:07:11 +00006207static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
6208 union e1000_adv_rx_desc *rx_desc)
6209{
6210 struct igb_rx_buffer *rx_buffer;
6211 struct sk_buff *skb;
6212 struct page *page;
6213 void *page_addr;
6214 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006215#if (PAGE_SIZE < 8192)
Alexander Duyck74e238e2013-02-02 05:07:11 +00006216 unsigned int truesize = IGB_RX_BUFSZ;
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006217#else
Alexander Duyck74e238e2013-02-02 05:07:11 +00006218 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
6219 SKB_DATA_ALIGN(NET_SKB_PAD +
6220 NET_IP_ALIGN +
6221 size);
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006222#endif
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006223
Alexander Duyck74e238e2013-02-02 05:07:11 +00006224 /* If we spanned a buffer we have a huge mess so test for it */
6225 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
6226
Alexander Duyck74e238e2013-02-02 05:07:11 +00006227 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6228 page = rx_buffer->page;
6229 prefetchw(page);
6230
6231 page_addr = page_address(page) + rx_buffer->page_offset;
6232
6233 /* prefetch first cache line of first page */
6234 prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
6235#if L1_CACHE_BYTES < 128
6236 prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
6237#endif
6238
6239 /* build an skb to around the page buffer */
6240 skb = build_skb(page_addr, truesize);
6241 if (unlikely(!skb)) {
6242 rx_ring->rx_stats.alloc_failed++;
6243 return NULL;
6244 }
6245
6246 /* we are reusing so sync this buffer for CPU use */
6247 dma_sync_single_range_for_cpu(rx_ring->dev,
6248 rx_buffer->dma,
6249 rx_buffer->page_offset,
6250 IGB_RX_BUFSZ,
6251 DMA_FROM_DEVICE);
6252
6253 /* update pointers within the skb to store the data */
6254 skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
6255 __skb_put(skb, size);
6256
6257 /* pull timestamp out of packet data */
6258 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6259 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
6260 __skb_pull(skb, IGB_TS_HDR_LEN);
6261 }
6262
6263 if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
6264 /* hand second half of page back to the ring */
6265 igb_reuse_rx_page(rx_ring, rx_buffer);
6266 } else {
6267 /* we are not reusing the buffer so unmap it */
6268 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6269 PAGE_SIZE, DMA_FROM_DEVICE);
6270 }
6271
6272 /* clear contents of buffer_info */
6273 rx_buffer->dma = 0;
6274 rx_buffer->page = NULL;
6275
6276 return skb;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006277}
6278
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006279static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6280 union e1000_adv_rx_desc *rx_desc,
6281 struct sk_buff *skb)
6282{
6283 struct igb_rx_buffer *rx_buffer;
6284 struct page *page;
6285
6286 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6287
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006288 page = rx_buffer->page;
6289 prefetchw(page);
6290
6291 if (likely(!skb)) {
6292 void *page_addr = page_address(page) +
6293 rx_buffer->page_offset;
6294
6295 /* prefetch first cache line of first page */
6296 prefetch(page_addr);
6297#if L1_CACHE_BYTES < 128
6298 prefetch(page_addr + L1_CACHE_BYTES);
6299#endif
6300
6301 /* allocate a skb to store the frags */
6302 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6303 IGB_RX_HDR_LEN);
6304 if (unlikely(!skb)) {
6305 rx_ring->rx_stats.alloc_failed++;
6306 return NULL;
6307 }
6308
6309 /*
6310 * we will be copying header into skb->data in
6311 * pskb_may_pull so it is in our interest to prefetch
6312 * it now to avoid a possible cache miss
6313 */
6314 prefetchw(skb->data);
6315 }
6316
6317 /* we are reusing so sync this buffer for CPU use */
6318 dma_sync_single_range_for_cpu(rx_ring->dev,
6319 rx_buffer->dma,
6320 rx_buffer->page_offset,
Alexander Duyckde78d1f2012-09-25 00:31:12 +00006321 IGB_RX_BUFSZ,
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006322 DMA_FROM_DEVICE);
6323
6324 /* pull page into skb */
6325 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
6326 /* hand second half of page back to the ring */
6327 igb_reuse_rx_page(rx_ring, rx_buffer);
6328 } else {
6329 /* we are not reusing the buffer so unmap it */
6330 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6331 PAGE_SIZE, DMA_FROM_DEVICE);
6332 }
6333
6334 /* clear contents of rx_buffer */
6335 rx_buffer->page = NULL;
6336
6337 return skb;
6338}
6339
Alexander Duyckcd392f52011-08-26 07:43:59 +00006340static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006341 union e1000_adv_rx_desc *rx_desc,
6342 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08006343{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07006344 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006345
Alexander Duyck294e7d72011-08-26 07:45:57 +00006346 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006347 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00006348 return;
6349
6350 /* Rx checksum disabled via ethtool */
6351 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08006352 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00006353
Auke Kok9d5c8242008-01-24 02:22:38 -08006354 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006355 if (igb_test_staterr(rx_desc,
6356 E1000_RXDEXT_STATERR_TCPE |
6357 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00006358 /*
6359 * work around errata with sctp packets where the TCPE aka
6360 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6361 * packets, (aka let the stack check the crc32c)
6362 */
Alexander Duyck866cff02011-08-26 07:45:36 +00006363 if (!((skb->len == 60) &&
6364 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00006365 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00006366 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006367 u64_stats_update_end(&ring->rx_syncp);
6368 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006369 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08006370 return;
6371 }
6372 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006373 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
6374 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08006375 skb->ip_summed = CHECKSUM_UNNECESSARY;
6376
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006377 dev_dbg(ring->dev, "cksum success: bits %08X\n",
6378 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08006379}
6380
Alexander Duyck077887c2011-08-26 07:46:29 +00006381static inline void igb_rx_hash(struct igb_ring *ring,
6382 union e1000_adv_rx_desc *rx_desc,
6383 struct sk_buff *skb)
6384{
6385 if (ring->netdev->features & NETIF_F_RXHASH)
6386 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
6387}
6388
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006389/**
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006390 * igb_is_non_eop - process handling of non-EOP buffers
6391 * @rx_ring: Rx ring being processed
6392 * @rx_desc: Rx descriptor for current buffer
6393 * @skb: current socket buffer containing buffer in progress
6394 *
6395 * This function updates next to clean. If the buffer is an EOP buffer
6396 * this function exits returning false, otherwise it will place the
6397 * sk_buff in the next buffer to be chained and return true indicating
6398 * that this is in fact a non-EOP buffer.
6399 **/
6400static bool igb_is_non_eop(struct igb_ring *rx_ring,
6401 union e1000_adv_rx_desc *rx_desc)
6402{
6403 u32 ntc = rx_ring->next_to_clean + 1;
6404
6405 /* fetch, update, and store next to clean */
6406 ntc = (ntc < rx_ring->count) ? ntc : 0;
6407 rx_ring->next_to_clean = ntc;
6408
6409 prefetch(IGB_RX_DESC(rx_ring, ntc));
6410
6411 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6412 return false;
6413
6414 return true;
6415}
6416
6417/**
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006418 * igb_get_headlen - determine size of header for LRO/GRO
6419 * @data: pointer to the start of the headers
6420 * @max_len: total length of section to find headers in
6421 *
6422 * This function is meant to determine the length of headers that will
6423 * be recognized by hardware for LRO, and GRO offloads. The main
6424 * motivation of doing this is to only perform one pull for IPv4 TCP
6425 * packets so that we can do basic things like calculating the gso_size
6426 * based on the average data per packet.
6427 **/
6428static unsigned int igb_get_headlen(unsigned char *data,
6429 unsigned int max_len)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006430{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006431 union {
6432 unsigned char *network;
6433 /* l2 headers */
6434 struct ethhdr *eth;
6435 struct vlan_hdr *vlan;
6436 /* l3 headers */
6437 struct iphdr *ipv4;
6438 struct ipv6hdr *ipv6;
6439 } hdr;
6440 __be16 protocol;
6441 u8 nexthdr = 0; /* default to not TCP */
6442 u8 hlen;
6443
6444 /* this should never happen, but better safe than sorry */
6445 if (max_len < ETH_HLEN)
6446 return max_len;
6447
6448 /* initialize network frame pointer */
6449 hdr.network = data;
6450
6451 /* set first protocol and move network header forward */
6452 protocol = hdr.eth->h_proto;
6453 hdr.network += ETH_HLEN;
6454
6455 /* handle any vlan tag if present */
6456 if (protocol == __constant_htons(ETH_P_8021Q)) {
6457 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6458 return max_len;
6459
6460 protocol = hdr.vlan->h_vlan_encapsulated_proto;
6461 hdr.network += VLAN_HLEN;
6462 }
6463
6464 /* handle L3 protocols */
6465 if (protocol == __constant_htons(ETH_P_IP)) {
6466 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6467 return max_len;
6468
6469 /* access ihl as a u8 to avoid unaligned access on ia64 */
6470 hlen = (hdr.network[0] & 0x0F) << 2;
6471
6472 /* verify hlen meets minimum size requirements */
6473 if (hlen < sizeof(struct iphdr))
6474 return hdr.network - data;
6475
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006476 /* record next protocol if header is present */
Alexander Duyckb9555f62013-02-01 08:56:47 +00006477 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006478 nexthdr = hdr.ipv4->protocol;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006479 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6480 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6481 return max_len;
6482
6483 /* record next protocol */
6484 nexthdr = hdr.ipv6->nexthdr;
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006485 hlen = sizeof(struct ipv6hdr);
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006486 } else {
6487 return hdr.network - data;
6488 }
6489
Alexander Duyckf2fb4ab2012-11-13 01:13:38 +00006490 /* relocate pointer to start of L4 header */
6491 hdr.network += hlen;
6492
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006493 /* finally sort out TCP */
6494 if (nexthdr == IPPROTO_TCP) {
6495 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6496 return max_len;
6497
6498 /* access doff as a u8 to avoid unaligned access on ia64 */
6499 hlen = (hdr.network[12] & 0xF0) >> 2;
6500
6501 /* verify hlen meets minimum size requirements */
6502 if (hlen < sizeof(struct tcphdr))
6503 return hdr.network - data;
6504
6505 hdr.network += hlen;
6506 } else if (nexthdr == IPPROTO_UDP) {
6507 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6508 return max_len;
6509
6510 hdr.network += sizeof(struct udphdr);
6511 }
6512
6513 /*
6514 * If everything has gone correctly hdr.network should be the
6515 * data section of the packet and will be the end of the header.
6516 * If not then it probably represents the end of the last recognized
6517 * header.
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006518 */
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006519 if ((hdr.network - data) < max_len)
6520 return hdr.network - data;
6521 else
6522 return max_len;
6523}
6524
6525/**
6526 * igb_pull_tail - igb specific version of skb_pull_tail
6527 * @rx_ring: rx descriptor ring packet is being transacted on
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006528 * @rx_desc: pointer to the EOP Rx descriptor
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006529 * @skb: pointer to current skb being adjusted
6530 *
6531 * This function is an igb specific version of __pskb_pull_tail. The
6532 * main difference between this version and the original function is that
6533 * this function can make several assumptions about the state of things
6534 * that allow for significant optimizations versus the standard function.
6535 * As a result we can do things like drop a frag and maintain an accurate
6536 * truesize for the skb.
6537 */
6538static void igb_pull_tail(struct igb_ring *rx_ring,
6539 union e1000_adv_rx_desc *rx_desc,
6540 struct sk_buff *skb)
6541{
6542 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6543 unsigned char *va;
6544 unsigned int pull_len;
6545
6546 /*
6547 * it is valid to use page_address instead of kmap since we are
6548 * working with pages allocated out of the lomem pool per
6549 * alloc_page(GFP_ATOMIC)
6550 */
6551 va = skb_frag_address(frag);
6552
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006553 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6554 /* retrieve timestamp from buffer */
6555 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6556
6557 /* update pointers to remove timestamp header */
6558 skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6559 frag->page_offset += IGB_TS_HDR_LEN;
6560 skb->data_len -= IGB_TS_HDR_LEN;
6561 skb->len -= IGB_TS_HDR_LEN;
6562
6563 /* move va to start of packet data */
6564 va += IGB_TS_HDR_LEN;
6565 }
6566
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006567 /*
6568 * we need the header to contain the greater of either ETH_HLEN or
6569 * 60 bytes if the skb->len is less than 60 for skb_pad.
6570 */
6571 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
6572
6573 /* align pull length to size of long to optimize memcpy performance */
6574 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
6575
6576 /* update all of the pointers */
6577 skb_frag_size_sub(frag, pull_len);
6578 frag->page_offset += pull_len;
6579 skb->data_len -= pull_len;
6580 skb->tail += pull_len;
6581}
6582
6583/**
6584 * igb_cleanup_headers - Correct corrupted or empty headers
6585 * @rx_ring: rx descriptor ring packet is being transacted on
6586 * @rx_desc: pointer to the EOP Rx descriptor
6587 * @skb: pointer to current skb being fixed
6588 *
6589 * Address the case where we are pulling data in on pages only
6590 * and as such no data is present in the skb header.
6591 *
6592 * In addition if skb is not at least 60 bytes we need to pad it so that
6593 * it is large enough to qualify as a valid Ethernet frame.
6594 *
6595 * Returns true if an error was encountered and skb was freed.
6596 **/
6597static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6598 union e1000_adv_rx_desc *rx_desc,
6599 struct sk_buff *skb)
6600{
6601
6602 if (unlikely((igb_test_staterr(rx_desc,
6603 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6604 struct net_device *netdev = rx_ring->netdev;
6605 if (!(netdev->features & NETIF_F_RXALL)) {
6606 dev_kfree_skb_any(skb);
6607 return true;
6608 }
6609 }
6610
6611 /* place header in linear portion of buffer */
6612 if (skb_is_nonlinear(skb))
6613 igb_pull_tail(rx_ring, rx_desc, skb);
6614
6615 /* if skb_pad returns an error the skb was freed */
6616 if (unlikely(skb->len < 60)) {
6617 int pad_len = 60 - skb->len;
6618
6619 if (skb_pad(skb, pad_len))
6620 return true;
6621 __skb_put(skb, pad_len);
6622 }
6623
6624 return false;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006625}
6626
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006627/**
6628 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6629 * @rx_ring: rx descriptor ring packet is being transacted on
6630 * @rx_desc: pointer to the EOP Rx descriptor
6631 * @skb: pointer to current skb being populated
6632 *
6633 * This function checks the ring, descriptor, and packet information in
6634 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6635 * other fields within the skb.
6636 **/
6637static void igb_process_skb_fields(struct igb_ring *rx_ring,
6638 union e1000_adv_rx_desc *rx_desc,
6639 struct sk_buff *skb)
6640{
6641 struct net_device *dev = rx_ring->netdev;
6642
6643 igb_rx_hash(rx_ring, rx_desc, skb);
6644
6645 igb_rx_checksum(rx_ring, rx_desc, skb);
6646
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006647 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006648
6649 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
6650 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6651 u16 vid;
6652 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6653 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6654 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6655 else
6656 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6657
6658 __vlan_hwaccel_put_tag(skb, vid);
6659 }
6660
6661 skb_record_rx_queue(skb, rx_ring->queue_index);
6662
6663 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6664}
6665
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006666static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006667{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006668 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006669 struct sk_buff *skb = rx_ring->skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08006670 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006671 u16 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08006672
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006673 do {
6674 union e1000_adv_rx_desc *rx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08006675
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006676 /* return some buffers to hardware, one at a time is too slow */
6677 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6678 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6679 cleaned_count = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006680 }
6681
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006682 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006683
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006684 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6685 break;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006686
Alexander Duyck74e238e2013-02-02 05:07:11 +00006687 /* This memory barrier is needed to keep us from reading
6688 * any other fields out of the rx_desc until we know the
6689 * RXD_STAT_DD bit is set
6690 */
6691 rmb();
6692
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006693 /* retrieve a buffer from the ring */
Alexander Duyck74e238e2013-02-02 05:07:11 +00006694 if (ring_uses_build_skb(rx_ring))
6695 skb = igb_build_rx_buffer(rx_ring, rx_desc);
6696 else
6697 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
Alexander Duyck16eb8812011-08-26 07:43:54 +00006698
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006699 /* exit if we failed to retrieve a buffer */
6700 if (!skb)
6701 break;
6702
6703 cleaned_count++;
6704
6705 /* fetch next buffer in frame if non-eop */
6706 if (igb_is_non_eop(rx_ring, rx_desc))
6707 continue;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006708
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006709 /* verify the packet layout is correct */
6710 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6711 skb = NULL;
6712 continue;
Auke Kok9d5c8242008-01-24 02:22:38 -08006713 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006714
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006715 /* probably a little skewed due to removing CRC */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006716 total_bytes += skb->len;
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006717
Alexander Duyckdb2ee5b2012-09-25 00:30:57 +00006718 /* populate checksum, timestamp, VLAN, and protocol */
6719 igb_process_skb_fields(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006720
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006721 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006722
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006723 /* reset skb pointer */
6724 skb = NULL;
6725
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006726 /* update budget accounting */
6727 total_packets++;
6728 } while (likely(total_packets < budget));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006729
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006730 /* place incomplete frames back on ring for completion */
6731 rx_ring->skb = skb;
6732
Eric Dumazet12dcd862010-10-15 17:27:10 +00006733 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006734 rx_ring->rx_stats.packets += total_packets;
6735 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006736 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006737 q_vector->rx.total_packets += total_packets;
6738 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006739
6740 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006741 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006742
Alexander Duyck2e334ee2012-09-25 00:31:07 +00006743 return (total_packets < budget);
Auke Kok9d5c8242008-01-24 02:22:38 -08006744}
6745
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006746static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6747 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006748{
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006749 struct page *page = bi->page;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006750 dma_addr_t dma;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006751
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006752 /* since we are recycling buffers we should seldom need to alloc */
6753 if (likely(page))
Alexander Duyckc023cd82011-08-26 07:43:43 +00006754 return true;
6755
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006756 /* alloc new page for storage */
6757 page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6758 if (unlikely(!page)) {
6759 rx_ring->rx_stats.alloc_failed++;
6760 return false;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006761 }
6762
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006763 /* map page for use */
6764 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006765
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006766 /*
6767 * if mapping failed free memory back to system since
6768 * there isn't much point in holding memory we can't use
6769 */
Alexander Duyckc023cd82011-08-26 07:43:43 +00006770 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006771 __free_page(page);
6772
Alexander Duyckc023cd82011-08-26 07:43:43 +00006773 rx_ring->rx_stats.alloc_failed++;
6774 return false;
6775 }
6776
6777 bi->dma = dma;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006778 bi->page = page;
6779 bi->page_offset = 0;
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006780
Alexander Duyckc023cd82011-08-26 07:43:43 +00006781 return true;
6782}
6783
Alexander Duyck74e238e2013-02-02 05:07:11 +00006784static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
6785{
6786 if (ring_uses_build_skb(rx_ring))
6787 return NET_SKB_PAD + NET_IP_ALIGN;
6788 else
6789 return 0;
6790}
6791
Auke Kok9d5c8242008-01-24 02:22:38 -08006792/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006793 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006794 * @adapter: address of board private structure
6795 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006796void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006797{
Auke Kok9d5c8242008-01-24 02:22:38 -08006798 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006799 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006800 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006801
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006802 /* nothing to do */
6803 if (!cleaned_count)
6804 return;
6805
Alexander Duyck601369062011-08-26 07:44:05 +00006806 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006807 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006808 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006809
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006810 do {
Alexander Duyck1a1c2252012-09-25 00:30:52 +00006811 if (!igb_alloc_mapped_page(rx_ring, bi))
Alexander Duyckc023cd82011-08-26 07:43:43 +00006812 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006813
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006814 /*
6815 * Refresh the desc even if buffer_addrs didn't change
6816 * because each write-back erases this info.
6817 */
Alexander Duyck74e238e2013-02-02 05:07:11 +00006818 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
6819 bi->page_offset +
6820 igb_rx_offset(rx_ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08006821
Alexander Duyckc023cd82011-08-26 07:43:43 +00006822 rx_desc++;
6823 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006824 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006825 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006826 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006827 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006828 i -= rx_ring->count;
6829 }
6830
6831 /* clear the hdr_addr for the next_to_use descriptor */
6832 rx_desc->read.hdr_addr = 0;
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006833
6834 cleaned_count--;
6835 } while (cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006836
Alexander Duyckc023cd82011-08-26 07:43:43 +00006837 i += rx_ring->count;
6838
Auke Kok9d5c8242008-01-24 02:22:38 -08006839 if (rx_ring->next_to_use != i) {
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006840 /* record the next descriptor to use */
Auke Kok9d5c8242008-01-24 02:22:38 -08006841 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006842
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006843 /* update next to alloc since we have filled the ring */
6844 rx_ring->next_to_alloc = i;
6845
6846 /*
6847 * Force memory writes to complete before letting h/w
Auke Kok9d5c8242008-01-24 02:22:38 -08006848 * know there are new descriptors to fetch. (Only
6849 * applicable for weak-ordered memory model archs,
Alexander Duyckcbc8e552012-09-25 00:31:02 +00006850 * such as IA-64).
6851 */
Auke Kok9d5c8242008-01-24 02:22:38 -08006852 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006853 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006854 }
6855}
6856
6857/**
6858 * igb_mii_ioctl -
6859 * @netdev:
6860 * @ifreq:
6861 * @cmd:
6862 **/
6863static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6864{
6865 struct igb_adapter *adapter = netdev_priv(netdev);
6866 struct mii_ioctl_data *data = if_mii(ifr);
6867
6868 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6869 return -EOPNOTSUPP;
6870
6871 switch (cmd) {
6872 case SIOCGMIIPHY:
6873 data->phy_id = adapter->hw.phy.addr;
6874 break;
6875 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006876 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6877 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006878 return -EIO;
6879 break;
6880 case SIOCSMIIREG:
6881 default:
6882 return -EOPNOTSUPP;
6883 }
6884 return 0;
6885}
6886
6887/**
6888 * igb_ioctl -
6889 * @netdev:
6890 * @ifreq:
6891 * @cmd:
6892 **/
6893static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6894{
6895 switch (cmd) {
6896 case SIOCGMIIPHY:
6897 case SIOCGMIIREG:
6898 case SIOCSMIIREG:
6899 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006900 case SIOCSHWTSTAMP:
Matthew Vicka79f4f82012-08-10 05:40:44 +00006901 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006902 default:
6903 return -EOPNOTSUPP;
6904 }
6905}
6906
Alexander Duyck009bc062009-07-23 18:08:35 +00006907s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6908{
6909 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00006910
Jiang Liu23d028c2012-08-20 13:32:20 -06006911 if (pcie_capability_read_word(adapter->pdev, reg, value))
Alexander Duyck009bc062009-07-23 18:08:35 +00006912 return -E1000_ERR_CONFIG;
6913
Alexander Duyck009bc062009-07-23 18:08:35 +00006914 return 0;
6915}
6916
6917s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6918{
6919 struct igb_adapter *adapter = hw->back;
Alexander Duyck009bc062009-07-23 18:08:35 +00006920
Jiang Liu23d028c2012-08-20 13:32:20 -06006921 if (pcie_capability_write_word(adapter->pdev, reg, *value))
Alexander Duyck009bc062009-07-23 18:08:35 +00006922 return -E1000_ERR_CONFIG;
6923
Alexander Duyck009bc062009-07-23 18:08:35 +00006924 return 0;
6925}
6926
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006927static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006928{
6929 struct igb_adapter *adapter = netdev_priv(netdev);
6930 struct e1000_hw *hw = &adapter->hw;
6931 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006932 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006933
Alexander Duyck5faf0302011-08-26 07:46:08 +00006934 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006935 /* enable VLAN tag insert/strip */
6936 ctrl = rd32(E1000_CTRL);
6937 ctrl |= E1000_CTRL_VME;
6938 wr32(E1000_CTRL, ctrl);
6939
Alexander Duyck51466232009-10-27 23:47:35 +00006940 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006941 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006942 rctl &= ~E1000_RCTL_CFIEN;
6943 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006944 } else {
6945 /* disable VLAN tag insert/strip */
6946 ctrl = rd32(E1000_CTRL);
6947 ctrl &= ~E1000_CTRL_VME;
6948 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006949 }
6950
Alexander Duycke1739522009-02-19 20:39:44 -08006951 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006952}
6953
Jiri Pirko8e586132011-12-08 19:52:37 -05006954static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006955{
6956 struct igb_adapter *adapter = netdev_priv(netdev);
6957 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006958 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006959
Alexander Duyck51466232009-10-27 23:47:35 +00006960 /* attempt to add filter to vlvf array */
6961 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006962
Alexander Duyck51466232009-10-27 23:47:35 +00006963 /* add the filter since PF can receive vlans w/o entry in vlvf */
6964 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006965
6966 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006967
6968 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006969}
6970
Jiri Pirko8e586132011-12-08 19:52:37 -05006971static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006972{
6973 struct igb_adapter *adapter = netdev_priv(netdev);
6974 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006975 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006976 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006977
Alexander Duyck51466232009-10-27 23:47:35 +00006978 /* remove vlan from VLVF table array */
6979 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006980
Alexander Duyck51466232009-10-27 23:47:35 +00006981 /* if vid was not present in VLVF just remove it from table */
6982 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006983 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006984
6985 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006986
6987 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006988}
6989
6990static void igb_restore_vlan(struct igb_adapter *adapter)
6991{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006992 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006993
Alexander Duyck5faf0302011-08-26 07:46:08 +00006994 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6995
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006996 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6997 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006998}
6999
David Decotigny14ad2512011-04-27 18:32:43 +00007000int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08007001{
Alexander Duyck090b1792009-10-27 23:51:55 +00007002 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08007003 struct e1000_mac_info *mac = &adapter->hw.mac;
7004
7005 mac->autoneg = 0;
7006
David Decotigny14ad2512011-04-27 18:32:43 +00007007 /* Make sure dplx is at most 1 bit and lsb of speed is not set
7008 * for the switch() below to work */
7009 if ((spd & 1) || (dplx & ~1))
7010 goto err_inval;
7011
Akeem G. Abodunrinf502ef72013-04-05 16:49:06 +00007012 /* Fiber NIC's only allow 1000 gbps Full duplex
7013 * and 100Mbps Full duplex for 100baseFx sfp
7014 */
7015 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
7016 switch (spd + dplx) {
7017 case SPEED_10 + DUPLEX_HALF:
7018 case SPEED_10 + DUPLEX_FULL:
7019 case SPEED_100 + DUPLEX_HALF:
7020 goto err_inval;
7021 default:
7022 break;
7023 }
7024 }
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00007025
David Decotigny14ad2512011-04-27 18:32:43 +00007026 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007027 case SPEED_10 + DUPLEX_HALF:
7028 mac->forced_speed_duplex = ADVERTISE_10_HALF;
7029 break;
7030 case SPEED_10 + DUPLEX_FULL:
7031 mac->forced_speed_duplex = ADVERTISE_10_FULL;
7032 break;
7033 case SPEED_100 + DUPLEX_HALF:
7034 mac->forced_speed_duplex = ADVERTISE_100_HALF;
7035 break;
7036 case SPEED_100 + DUPLEX_FULL:
7037 mac->forced_speed_duplex = ADVERTISE_100_FULL;
7038 break;
7039 case SPEED_1000 + DUPLEX_FULL:
7040 mac->autoneg = 1;
7041 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
7042 break;
7043 case SPEED_1000 + DUPLEX_HALF: /* not supported */
7044 default:
David Decotigny14ad2512011-04-27 18:32:43 +00007045 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08007046 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00007047
7048 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
7049 adapter->hw.phy.mdix = AUTO_ALL_MODES;
7050
Auke Kok9d5c8242008-01-24 02:22:38 -08007051 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00007052
7053err_inval:
7054 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
7055 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08007056}
7057
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007058static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7059 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08007060{
7061 struct net_device *netdev = pci_get_drvdata(pdev);
7062 struct igb_adapter *adapter = netdev_priv(netdev);
7063 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07007064 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007065 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08007066#ifdef CONFIG_PM
7067 int retval = 0;
7068#endif
7069
7070 netif_device_detach(netdev);
7071
Alexander Duycka88f10e2008-07-08 15:13:38 -07007072 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007073 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07007074
Alexander Duyck047e0032009-10-27 15:49:27 +00007075 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007076
7077#ifdef CONFIG_PM
7078 retval = pci_save_state(pdev);
7079 if (retval)
7080 return retval;
7081#endif
7082
7083 status = rd32(E1000_STATUS);
7084 if (status & E1000_STATUS_LU)
7085 wufc &= ~E1000_WUFC_LNKC;
7086
7087 if (wufc) {
7088 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007089 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007090
7091 /* turn on all-multi mode if wake on multicast is enabled */
7092 if (wufc & E1000_WUFC_MC) {
7093 rctl = rd32(E1000_RCTL);
7094 rctl |= E1000_RCTL_MPE;
7095 wr32(E1000_RCTL, rctl);
7096 }
7097
7098 ctrl = rd32(E1000_CTRL);
7099 /* advertise wake from D3Cold */
7100 #define E1000_CTRL_ADVD3WUC 0x00100000
7101 /* phy power management enable */
7102 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
7103 ctrl |= E1000_CTRL_ADVD3WUC;
7104 wr32(E1000_CTRL, ctrl);
7105
Auke Kok9d5c8242008-01-24 02:22:38 -08007106 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00007107 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08007108
7109 wr32(E1000_WUC, E1000_WUC_PME_EN);
7110 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08007111 } else {
7112 wr32(E1000_WUC, 0);
7113 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08007114 }
7115
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007116 *enable_wake = wufc || adapter->en_mng_pt;
7117 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00007118 igb_power_down_link(adapter);
7119 else
7120 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007121
7122 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7123 * would have already happened in close and is redundant. */
7124 igb_release_hw_control(adapter);
7125
7126 pci_disable_device(pdev);
7127
Auke Kok9d5c8242008-01-24 02:22:38 -08007128 return 0;
7129}
7130
7131#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00007132#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007133static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007134{
7135 int retval;
7136 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007137 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007138
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007139 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007140 if (retval)
7141 return retval;
7142
7143 if (wake) {
7144 pci_prepare_to_sleep(pdev);
7145 } else {
7146 pci_wake_from_d3(pdev, false);
7147 pci_set_power_state(pdev, PCI_D3hot);
7148 }
7149
7150 return 0;
7151}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00007152#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007153
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007154static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08007155{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007156 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007157 struct net_device *netdev = pci_get_drvdata(pdev);
7158 struct igb_adapter *adapter = netdev_priv(netdev);
7159 struct e1000_hw *hw = &adapter->hw;
7160 u32 err;
7161
7162 pci_set_power_state(pdev, PCI_D0);
7163 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00007164 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09007165
Alexander Duyckaed5dec2009-02-06 23:16:04 +00007166 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08007167 if (err) {
7168 dev_err(&pdev->dev,
7169 "igb: Cannot enable PCI device from suspend\n");
7170 return err;
7171 }
7172 pci_set_master(pdev);
7173
7174 pci_enable_wake(pdev, PCI_D3hot, 0);
7175 pci_enable_wake(pdev, PCI_D3cold, 0);
7176
Stefan Assmann53c7d062012-12-04 06:00:12 +00007177 if (igb_init_interrupt_scheme(adapter, true)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07007178 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7179 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08007180 }
7181
Auke Kok9d5c8242008-01-24 02:22:38 -08007182 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00007183
7184 /* let the f/w know that the h/w is now under the control of the
7185 * driver. */
7186 igb_get_hw_control(adapter);
7187
Auke Kok9d5c8242008-01-24 02:22:38 -08007188 wr32(E1000_WUS, ~0);
7189
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007190 if (netdev->flags & IFF_UP) {
Alexander Duyck0c2cc022012-09-25 00:31:22 +00007191 rtnl_lock();
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007192 err = __igb_open(netdev, true);
Alexander Duyck0c2cc022012-09-25 00:31:22 +00007193 rtnl_unlock();
Alexander Duycka88f10e2008-07-08 15:13:38 -07007194 if (err)
7195 return err;
7196 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007197
7198 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007199 return 0;
7200}
7201
7202#ifdef CONFIG_PM_RUNTIME
7203static int igb_runtime_idle(struct device *dev)
7204{
7205 struct pci_dev *pdev = to_pci_dev(dev);
7206 struct net_device *netdev = pci_get_drvdata(pdev);
7207 struct igb_adapter *adapter = netdev_priv(netdev);
7208
7209 if (!igb_has_link(adapter))
7210 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7211
7212 return -EBUSY;
7213}
7214
7215static int igb_runtime_suspend(struct device *dev)
7216{
7217 struct pci_dev *pdev = to_pci_dev(dev);
7218 int retval;
7219 bool wake;
7220
7221 retval = __igb_shutdown(pdev, &wake, 1);
7222 if (retval)
7223 return retval;
7224
7225 if (wake) {
7226 pci_prepare_to_sleep(pdev);
7227 } else {
7228 pci_wake_from_d3(pdev, false);
7229 pci_set_power_state(pdev, PCI_D3hot);
7230 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007231
Auke Kok9d5c8242008-01-24 02:22:38 -08007232 return 0;
7233}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007234
7235static int igb_runtime_resume(struct device *dev)
7236{
7237 return igb_resume(dev);
7238}
7239#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08007240#endif
7241
7242static void igb_shutdown(struct pci_dev *pdev)
7243{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007244 bool wake;
7245
Yan, Zheng749ab2c2012-01-04 20:23:37 +00007246 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00007247
7248 if (system_state == SYSTEM_POWER_OFF) {
7249 pci_wake_from_d3(pdev, wake);
7250 pci_set_power_state(pdev, PCI_D3hot);
7251 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007252}
7253
Greg Rosefa44f2f2013-01-17 01:03:06 -08007254#ifdef CONFIG_PCI_IOV
7255static int igb_sriov_reinit(struct pci_dev *dev)
7256{
7257 struct net_device *netdev = pci_get_drvdata(dev);
7258 struct igb_adapter *adapter = netdev_priv(netdev);
7259 struct pci_dev *pdev = adapter->pdev;
7260
7261 rtnl_lock();
7262
7263 if (netif_running(netdev))
7264 igb_close(netdev);
7265
7266 igb_clear_interrupt_scheme(adapter);
7267
7268 igb_init_queue_configuration(adapter);
7269
7270 if (igb_init_interrupt_scheme(adapter, true)) {
7271 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7272 return -ENOMEM;
7273 }
7274
7275 if (netif_running(netdev))
7276 igb_open(netdev);
7277
7278 rtnl_unlock();
7279
7280 return 0;
7281}
7282
7283static int igb_pci_disable_sriov(struct pci_dev *dev)
7284{
7285 int err = igb_disable_sriov(dev);
7286
7287 if (!err)
7288 err = igb_sriov_reinit(dev);
7289
7290 return err;
7291}
7292
7293static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
7294{
7295 int err = igb_enable_sriov(dev, num_vfs);
7296
7297 if (err)
7298 goto out;
7299
7300 err = igb_sriov_reinit(dev);
7301 if (!err)
7302 return num_vfs;
7303
7304out:
7305 return err;
7306}
7307
7308#endif
7309static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7310{
7311#ifdef CONFIG_PCI_IOV
7312 if (num_vfs == 0)
7313 return igb_pci_disable_sriov(dev);
7314 else
7315 return igb_pci_enable_sriov(dev, num_vfs);
7316#endif
7317 return 0;
7318}
7319
Auke Kok9d5c8242008-01-24 02:22:38 -08007320#ifdef CONFIG_NET_POLL_CONTROLLER
7321/*
7322 * Polling 'interrupt' - used by things like netconsole to send skbs
7323 * without having to re-enable interrupts. It's not called while
7324 * the interrupt routine is executing.
7325 */
7326static void igb_netpoll(struct net_device *netdev)
7327{
7328 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00007329 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00007330 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08007331 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08007332
Alexander Duyck047e0032009-10-27 15:49:27 +00007333 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00007334 q_vector = adapter->q_vector[i];
7335 if (adapter->msix_entries)
7336 wr32(E1000_EIMC, q_vector->eims_value);
7337 else
7338 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00007339 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00007340 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007341}
7342#endif /* CONFIG_NET_POLL_CONTROLLER */
7343
7344/**
7345 * igb_io_error_detected - called when PCI error is detected
7346 * @pdev: Pointer to PCI device
7347 * @state: The current pci connection state
7348 *
7349 * This function is called after a PCI bus error affecting
7350 * this device has been detected.
7351 */
7352static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7353 pci_channel_state_t state)
7354{
7355 struct net_device *netdev = pci_get_drvdata(pdev);
7356 struct igb_adapter *adapter = netdev_priv(netdev);
7357
7358 netif_device_detach(netdev);
7359
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00007360 if (state == pci_channel_io_perm_failure)
7361 return PCI_ERS_RESULT_DISCONNECT;
7362
Auke Kok9d5c8242008-01-24 02:22:38 -08007363 if (netif_running(netdev))
7364 igb_down(adapter);
7365 pci_disable_device(pdev);
7366
7367 /* Request a slot slot reset. */
7368 return PCI_ERS_RESULT_NEED_RESET;
7369}
7370
7371/**
7372 * igb_io_slot_reset - called after the pci bus has been reset.
7373 * @pdev: Pointer to PCI device
7374 *
7375 * Restart the card from scratch, as if from a cold-boot. Implementation
7376 * resembles the first-half of the igb_resume routine.
7377 */
7378static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7379{
7380 struct net_device *netdev = pci_get_drvdata(pdev);
7381 struct igb_adapter *adapter = netdev_priv(netdev);
7382 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08007383 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09007384 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08007385
Alexander Duyckaed5dec2009-02-06 23:16:04 +00007386 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08007387 dev_err(&pdev->dev,
7388 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08007389 result = PCI_ERS_RESULT_DISCONNECT;
7390 } else {
7391 pci_set_master(pdev);
7392 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00007393 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08007394
7395 pci_enable_wake(pdev, PCI_D3hot, 0);
7396 pci_enable_wake(pdev, PCI_D3cold, 0);
7397
7398 igb_reset(adapter);
7399 wr32(E1000_WUS, ~0);
7400 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08007401 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007402
Jeff Kirsherea943d42008-12-11 20:34:19 -08007403 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7404 if (err) {
7405 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
7406 "failed 0x%0x\n", err);
7407 /* non-fatal, continue */
7408 }
Auke Kok9d5c8242008-01-24 02:22:38 -08007409
Alexander Duyck40a914f2008-11-27 00:24:37 -08007410 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08007411}
7412
7413/**
7414 * igb_io_resume - called when traffic can start flowing again.
7415 * @pdev: Pointer to PCI device
7416 *
7417 * This callback is called when the error recovery driver tells us that
7418 * its OK to resume normal operation. Implementation resembles the
7419 * second-half of the igb_resume routine.
7420 */
7421static void igb_io_resume(struct pci_dev *pdev)
7422{
7423 struct net_device *netdev = pci_get_drvdata(pdev);
7424 struct igb_adapter *adapter = netdev_priv(netdev);
7425
Auke Kok9d5c8242008-01-24 02:22:38 -08007426 if (netif_running(netdev)) {
7427 if (igb_up(adapter)) {
7428 dev_err(&pdev->dev, "igb_up failed after reset\n");
7429 return;
7430 }
7431 }
7432
7433 netif_device_attach(netdev);
7434
7435 /* let the f/w know that the h/w is now under the control of the
7436 * driver. */
7437 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007438}
7439
Alexander Duyck26ad9172009-10-05 06:32:49 +00007440static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7441 u8 qsel)
7442{
7443 u32 rar_low, rar_high;
7444 struct e1000_hw *hw = &adapter->hw;
7445
7446 /* HW expects these in little endian so we reverse the byte order
7447 * from network order (big endian) to little endian
7448 */
7449 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7450 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7451 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7452
7453 /* Indicate to hardware the Address is Valid. */
7454 rar_high |= E1000_RAH_AV;
7455
7456 if (hw->mac.type == e1000_82575)
7457 rar_high |= E1000_RAH_POOL_1 * qsel;
7458 else
7459 rar_high |= E1000_RAH_POOL_1 << qsel;
7460
7461 wr32(E1000_RAL(index), rar_low);
7462 wrfl();
7463 wr32(E1000_RAH(index), rar_high);
7464 wrfl();
7465}
7466
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007467static int igb_set_vf_mac(struct igb_adapter *adapter,
7468 int vf, unsigned char *mac_addr)
7469{
7470 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007471 /* VF MAC addresses start at end of receive addresses and moves
7472 * torwards the first, as a result a collision should not be possible */
7473 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007474
Alexander Duyck37680112009-02-19 20:40:30 -08007475 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007476
Alexander Duyck26ad9172009-10-05 06:32:49 +00007477 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007478
7479 return 0;
7480}
7481
Williams, Mitch A8151d292010-02-10 01:44:24 +00007482static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7483{
7484 struct igb_adapter *adapter = netdev_priv(netdev);
7485 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7486 return -EINVAL;
7487 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7488 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7489 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
7490 " change effective.");
7491 if (test_bit(__IGB_DOWN, &adapter->state)) {
7492 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
7493 " but the PF device is not up.\n");
7494 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
7495 " attempting to use the VF device.\n");
7496 }
7497 return igb_set_vf_mac(adapter, vf, mac);
7498}
7499
Lior Levy17dc5662011-02-08 02:28:46 +00007500static int igb_link_mbps(int internal_link_speed)
7501{
7502 switch (internal_link_speed) {
7503 case SPEED_100:
7504 return 100;
7505 case SPEED_1000:
7506 return 1000;
7507 default:
7508 return 0;
7509 }
7510}
7511
7512static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7513 int link_speed)
7514{
7515 int rf_dec, rf_int;
7516 u32 bcnrc_val;
7517
7518 if (tx_rate != 0) {
7519 /* Calculate the rate factor values to set */
7520 rf_int = link_speed / tx_rate;
7521 rf_dec = (link_speed - (rf_int * tx_rate));
7522 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
7523
7524 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7525 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7526 E1000_RTTBCNRC_RF_INT_MASK);
7527 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7528 } else {
7529 bcnrc_val = 0;
7530 }
7531
7532 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Lior Levyf00b0da2011-06-04 06:05:03 +00007533 /*
7534 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7535 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7536 */
7537 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00007538 wr32(E1000_RTTBCNRC, bcnrc_val);
7539}
7540
7541static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7542{
7543 int actual_link_speed, i;
7544 bool reset_rate = false;
7545
7546 /* VF TX rate limit was not set or not supported */
7547 if ((adapter->vf_rate_link_speed == 0) ||
7548 (adapter->hw.mac.type != e1000_82576))
7549 return;
7550
7551 actual_link_speed = igb_link_mbps(adapter->link_speed);
7552 if (actual_link_speed != adapter->vf_rate_link_speed) {
7553 reset_rate = true;
7554 adapter->vf_rate_link_speed = 0;
7555 dev_info(&adapter->pdev->dev,
7556 "Link speed has been changed. VF Transmit "
7557 "rate is disabled\n");
7558 }
7559
7560 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7561 if (reset_rate)
7562 adapter->vf_data[i].tx_rate = 0;
7563
7564 igb_set_vf_rate_limit(&adapter->hw, i,
7565 adapter->vf_data[i].tx_rate,
7566 actual_link_speed);
7567 }
7568}
7569
Williams, Mitch A8151d292010-02-10 01:44:24 +00007570static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7571{
Lior Levy17dc5662011-02-08 02:28:46 +00007572 struct igb_adapter *adapter = netdev_priv(netdev);
7573 struct e1000_hw *hw = &adapter->hw;
7574 int actual_link_speed;
7575
7576 if (hw->mac.type != e1000_82576)
7577 return -EOPNOTSUPP;
7578
7579 actual_link_speed = igb_link_mbps(adapter->link_speed);
7580 if ((vf >= adapter->vfs_allocated_count) ||
7581 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7582 (tx_rate < 0) || (tx_rate > actual_link_speed))
7583 return -EINVAL;
7584
7585 adapter->vf_rate_link_speed = actual_link_speed;
7586 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7587 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7588
7589 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007590}
7591
7592static int igb_ndo_get_vf_config(struct net_device *netdev,
7593 int vf, struct ifla_vf_info *ivi)
7594{
7595 struct igb_adapter *adapter = netdev_priv(netdev);
7596 if (vf >= adapter->vfs_allocated_count)
7597 return -EINVAL;
7598 ivi->vf = vf;
7599 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007600 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007601 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7602 ivi->qos = adapter->vf_data[vf].pf_qos;
7603 return 0;
7604}
7605
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007606static void igb_vmm_control(struct igb_adapter *adapter)
7607{
7608 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007609 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007610
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007611 switch (hw->mac.type) {
7612 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00007613 case e1000_i210:
7614 case e1000_i211:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007615 default:
7616 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007617 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007618 case e1000_82576:
7619 /* notify HW that the MAC is adding vlan tags */
7620 reg = rd32(E1000_DTXCTL);
7621 reg |= E1000_DTXCTL_VLAN_ADDED;
7622 wr32(E1000_DTXCTL, reg);
7623 case e1000_82580:
7624 /* enable replication vlan tag stripping */
7625 reg = rd32(E1000_RPLOLR);
7626 reg |= E1000_RPLOLR_STRVLAN;
7627 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007628 case e1000_i350:
7629 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007630 break;
7631 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007632
Alexander Duyckd4960302009-10-27 15:53:45 +00007633 if (adapter->vfs_allocated_count) {
7634 igb_vmdq_set_loopback_pf(hw, true);
7635 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007636 igb_vmdq_set_anti_spoofing_pf(hw, true,
7637 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007638 } else {
7639 igb_vmdq_set_loopback_pf(hw, false);
7640 igb_vmdq_set_replication_pf(hw, false);
7641 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007642}
7643
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007644static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7645{
7646 struct e1000_hw *hw = &adapter->hw;
7647 u32 dmac_thr;
7648 u16 hwm;
7649
7650 if (hw->mac.type > e1000_82580) {
7651 if (adapter->flags & IGB_FLAG_DMAC) {
7652 u32 reg;
7653
7654 /* force threshold to 0. */
7655 wr32(E1000_DMCTXTH, 0);
7656
7657 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007658 * DMA Coalescing high water mark needs to be greater
7659 * than the Rx threshold. Set hwm to PBA - max frame
7660 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007661 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007662 hwm = 64 * pba - adapter->max_frame_size / 16;
7663 if (hwm < 64 * (pba - 6))
7664 hwm = 64 * (pba - 6);
7665 reg = rd32(E1000_FCRTC);
7666 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7667 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7668 & E1000_FCRTC_RTH_COAL_MASK);
7669 wr32(E1000_FCRTC, reg);
7670
7671 /*
7672 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7673 * frame size, capping it at PBA - 10KB.
7674 */
7675 dmac_thr = pba - adapter->max_frame_size / 512;
7676 if (dmac_thr < pba - 10)
7677 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007678 reg = rd32(E1000_DMACR);
7679 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007680 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7681 & E1000_DMACR_DMACTHR_MASK);
7682
7683 /* transition to L0x or L1 if available..*/
7684 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7685
7686 /* watchdog timer= +-1000 usec in 32usec intervals */
7687 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00007688
7689 /* Disable BMC-to-OS Watchdog Enable */
7690 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007691 wr32(E1000_DMACR, reg);
7692
7693 /*
7694 * no lower threshold to disable
7695 * coalescing(smart fifb)-UTRESH=0
7696 */
7697 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007698
7699 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7700
7701 wr32(E1000_DMCTLX, reg);
7702
7703 /*
7704 * free space in tx packet buffer to wake from
7705 * DMA coal
7706 */
7707 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7708 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7709
7710 /*
7711 * make low power state decision controlled
7712 * by DMA coal
7713 */
7714 reg = rd32(E1000_PCIEMISC);
7715 reg &= ~E1000_PCIEMISC_LX_DECISION;
7716 wr32(E1000_PCIEMISC, reg);
7717 } /* endif adapter->dmac is not disabled */
7718 } else if (hw->mac.type == e1000_82580) {
7719 u32 reg = rd32(E1000_PCIEMISC);
7720 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7721 wr32(E1000_DMACR, 0);
7722 }
7723}
7724
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007725/* igb_read_i2c_byte - Reads 8 bit word over I2C
7726 * @hw: pointer to hardware structure
7727 * @byte_offset: byte offset to read
7728 * @dev_addr: device address
7729 * @data: value read
7730 *
7731 * Performs byte read operation over I2C interface at
7732 * a specified device address.
7733 */
7734s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7735 u8 dev_addr, u8 *data)
7736{
7737 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00007738 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007739 s32 status;
7740 u16 swfw_mask = 0;
7741
7742 if (!this_client)
7743 return E1000_ERR_I2C;
7744
7745 swfw_mask = E1000_SWFW_PHY0_SM;
7746
7747 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
7748 != E1000_SUCCESS)
7749 return E1000_ERR_SWFW_SYNC;
7750
7751 status = i2c_smbus_read_byte_data(this_client, byte_offset);
7752 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7753
7754 if (status < 0)
7755 return E1000_ERR_I2C;
7756 else {
7757 *data = status;
7758 return E1000_SUCCESS;
7759 }
7760}
7761
7762/* igb_write_i2c_byte - Writes 8 bit word over I2C
7763 * @hw: pointer to hardware structure
7764 * @byte_offset: byte offset to write
7765 * @dev_addr: device address
7766 * @data: value to write
7767 *
7768 * Performs byte write operation over I2C interface at
7769 * a specified device address.
7770 */
7771s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7772 u8 dev_addr, u8 data)
7773{
7774 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
Carolyn Wyborny603e86f2013-02-20 07:40:55 +00007775 struct i2c_client *this_client = adapter->i2c_client;
Carolyn Wyborny441fc6f2012-12-07 03:00:30 +00007776 s32 status;
7777 u16 swfw_mask = E1000_SWFW_PHY0_SM;
7778
7779 if (!this_client)
7780 return E1000_ERR_I2C;
7781
7782 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
7783 return E1000_ERR_SWFW_SYNC;
7784 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
7785 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7786
7787 if (status)
7788 return E1000_ERR_I2C;
7789 else
7790 return E1000_SUCCESS;
7791
7792}
Auke Kok9d5c8242008-01-24 02:22:38 -08007793/* igb_main.c */