blob: d4f47a0f6689ddd1af41a3a16ad4990b1d346aa2 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny6e861322012-01-18 22:13:27 +00004 Copyright(c) 2007-2012 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny200e5fd2012-05-31 23:39:30 +000062#define MAJ 4
63#define MIN 0
64#define BUILD 1
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny6e861322012-01-18 22:13:27 +000071static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000119static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000131static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000140static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000144static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700145#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000146static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700147static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700149static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000150static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800158static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800162static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000171static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000172
173#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
175static int igb_find_enabled_vfs(struct igb_adapter *adapter);
176static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000177#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800178
Auke Kok9d5c8242008-01-24 02:22:38 -0800179#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000180#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000181static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000182#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000183static int igb_resume(struct device *);
184#ifdef CONFIG_PM_RUNTIME
185static int igb_runtime_suspend(struct device *dev);
186static int igb_runtime_resume(struct device *dev);
187static int igb_runtime_idle(struct device *dev);
188#endif
189static const struct dev_pm_ops igb_pm_ops = {
190 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
191 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
192 igb_runtime_idle)
193};
Auke Kok9d5c8242008-01-24 02:22:38 -0800194#endif
195static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700196#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700197static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
198static struct notifier_block dca_notifier = {
199 .notifier_call = igb_notify_dca,
200 .next = NULL,
201 .priority = 0
202};
203#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800204#ifdef CONFIG_NET_POLL_CONTROLLER
205/* for netdump / net console */
206static void igb_netpoll(struct net_device *);
207#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800208#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000209static unsigned int max_vfs = 0;
210module_param(max_vfs, uint, 0);
211MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
212 "per physical function");
213#endif /* CONFIG_PCI_IOV */
214
Auke Kok9d5c8242008-01-24 02:22:38 -0800215static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
216 pci_channel_state_t);
217static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
218static void igb_io_resume(struct pci_dev *);
219
220static struct pci_error_handlers igb_err_handler = {
221 .error_detected = igb_io_error_detected,
222 .slot_reset = igb_io_slot_reset,
223 .resume = igb_io_resume,
224};
225
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000226static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800227
228static struct pci_driver igb_driver = {
229 .name = igb_driver_name,
230 .id_table = igb_pci_tbl,
231 .probe = igb_probe,
232 .remove = __devexit_p(igb_remove),
233#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000234 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800235#endif
236 .shutdown = igb_shutdown,
237 .err_handler = &igb_err_handler
238};
239
240MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242MODULE_LICENSE("GPL");
243MODULE_VERSION(DRV_VERSION);
244
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000245#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246static int debug = -1;
247module_param(debug, int, 0);
248MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
Taku Izumic97ec422010-04-27 14:39:30 +0000250struct igb_reg_info {
251 u32 ofs;
252 char *name;
253};
254
255static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257 /* General Registers */
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262 /* Interrupt Registers */
263 {E1000_ICR, "ICR"},
264
265 /* RX Registers */
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274 /* TX Registers */
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287 /* List Terminator */
288 {}
289};
290
291/*
292 * igb_regdump - register printout routine
293 */
294static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
295{
296 int n = 0;
297 char rname[16];
298 u32 regs[8];
299
300 switch (reginfo->ofs) {
301 case E1000_RDLEN(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDLEN(n));
304 break;
305 case E1000_RDH(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RDH(n));
308 break;
309 case E1000_RDT(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDT(n));
312 break;
313 case E1000_RXDCTL(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RXDCTL(n));
316 break;
317 case E1000_RDBAL(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_RDBAL(n));
320 break;
321 case E1000_RDBAH(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RDBAH(n));
324 break;
325 case E1000_TDBAL(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDBAL(n));
328 break;
329 case E1000_TDBAH(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_TDBAH(n));
332 break;
333 case E1000_TDLEN(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_TDLEN(n));
336 break;
337 case E1000_TDH(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TDH(n));
340 break;
341 case E1000_TDT(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_TDT(n));
344 break;
345 case E1000_TXDCTL(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_TXDCTL(n));
348 break;
349 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000350 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000351 return;
352 }
353
354 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000355 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
356 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000357}
358
359/*
360 * igb_dump - Print registers, tx-rings and rx-rings
361 */
362static void igb_dump(struct igb_adapter *adapter)
363{
364 struct net_device *netdev = adapter->netdev;
365 struct e1000_hw *hw = &adapter->hw;
366 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000367 struct igb_ring *tx_ring;
368 union e1000_adv_tx_desc *tx_desc;
369 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000370 struct igb_ring *rx_ring;
371 union e1000_adv_rx_desc *rx_desc;
372 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000373 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000374
375 if (!netif_msg_hw(adapter))
376 return;
377
378 /* Print netdevice Info */
379 if (netdev) {
380 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000381 pr_info("Device Name state trans_start "
382 "last_rx\n");
383 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
384 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000385 }
386
387 /* Print Registers */
388 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000389 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000390 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
391 reginfo->name; reginfo++) {
392 igb_regdump(hw, reginfo);
393 }
394
395 /* Print TX Ring Summary */
396 if (!netdev || !netif_running(netdev))
397 goto exit;
398
399 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000400 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000401 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000402 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000403 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000404 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000405 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 (u64)buffer_info->dma,
408 buffer_info->length,
409 buffer_info->next_to_watch,
410 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000411 }
412
413 /* Print TX Rings */
414 if (!netif_msg_tx_done(adapter))
415 goto rx_ring_summary;
416
417 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
418
419 /* Transmit Descriptor Formats
420 *
421 * Advanced Transmit Descriptor
422 * +--------------------------------------------------------------+
423 * 0 | Buffer Address [63:0] |
424 * +--------------------------------------------------------------+
425 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
426 * +--------------------------------------------------------------+
427 * 63 46 45 40 39 38 36 35 32 31 24 15 0
428 */
429
430 for (n = 0; n < adapter->num_tx_queues; n++) {
431 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000432 pr_info("------------------------------------\n");
433 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
434 pr_info("------------------------------------\n");
435 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
436 "[bi->dma ] leng ntw timestamp "
437 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000438
439 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000440 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000441 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000442 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000443 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000444 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000445 if (i == tx_ring->next_to_use &&
446 i == tx_ring->next_to_clean)
447 next_desc = " NTC/U";
448 else if (i == tx_ring->next_to_use)
449 next_desc = " NTU";
450 else if (i == tx_ring->next_to_clean)
451 next_desc = " NTC";
452 else
453 next_desc = "";
454
455 pr_info("T [0x%03X] %016llX %016llX %016llX"
456 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000457 le64_to_cpu(u0->a),
458 le64_to_cpu(u0->b),
459 (u64)buffer_info->dma,
460 buffer_info->length,
461 buffer_info->next_to_watch,
462 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000463 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000464
465 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
466 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS,
468 16, 1, phys_to_virt(buffer_info->dma),
469 buffer_info->length, true);
470 }
471 }
472
473 /* Print RX Rings Summary */
474rx_ring_summary:
475 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000476 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000477 for (n = 0; n < adapter->num_rx_queues; n++) {
478 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000479 pr_info(" %5d %5X %5X\n",
480 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000481 }
482
483 /* Print RX Rings */
484 if (!netif_msg_rx_status(adapter))
485 goto exit;
486
487 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
488
489 /* Advanced Receive Descriptor (Read) Format
490 * 63 1 0
491 * +-----------------------------------------------------+
492 * 0 | Packet Buffer Address [63:1] |A0/NSE|
493 * +----------------------------------------------+------+
494 * 8 | Header Buffer Address [63:1] | DD |
495 * +-----------------------------------------------------+
496 *
497 *
498 * Advanced Receive Descriptor (Write-Back) Format
499 *
500 * 63 48 47 32 31 30 21 20 17 16 4 3 0
501 * +------------------------------------------------------+
502 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
503 * | Checksum Ident | | | | Type | Type |
504 * +------------------------------------------------------+
505 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
506 * +------------------------------------------------------+
507 * 63 48 47 32 31 20 19 0
508 */
509
510 for (n = 0; n < adapter->num_rx_queues; n++) {
511 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000512 pr_info("------------------------------------\n");
513 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
514 pr_info("------------------------------------\n");
515 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
516 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
517 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
518 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000519
520 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000521 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000522 struct igb_rx_buffer *buffer_info;
523 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000524 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000525 u0 = (struct my_u0 *)rx_desc;
526 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000527
528 if (i == rx_ring->next_to_use)
529 next_desc = " NTU";
530 else if (i == rx_ring->next_to_clean)
531 next_desc = " NTC";
532 else
533 next_desc = "";
534
Taku Izumic97ec422010-04-27 14:39:30 +0000535 if (staterr & E1000_RXD_STAT_DD) {
536 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000537 pr_info("%s[0x%03X] %016llX %016llX -------"
538 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000539 le64_to_cpu(u0->a),
540 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000541 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000542 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000543 pr_info("%s[0x%03X] %016llX %016llX %016llX"
544 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000545 le64_to_cpu(u0->a),
546 le64_to_cpu(u0->b),
547 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000548 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000549
550 if (netif_msg_pktdata(adapter)) {
551 print_hex_dump(KERN_INFO, "",
552 DUMP_PREFIX_ADDRESS,
553 16, 1,
554 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000555 IGB_RX_HDR_LEN, true);
556 print_hex_dump(KERN_INFO, "",
557 DUMP_PREFIX_ADDRESS,
558 16, 1,
559 phys_to_virt(
560 buffer_info->page_dma +
561 buffer_info->page_offset),
562 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000563 }
564 }
Taku Izumic97ec422010-04-27 14:39:30 +0000565 }
566 }
567
568exit:
569 return;
570}
571
Auke Kok9d5c8242008-01-24 02:22:38 -0800572/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000573 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800574 * used by hardware layer to print debugging information
575 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000576struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800577{
578 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000579 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800580}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000581
582/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800583 * igb_init_module - Driver Registration Routine
584 *
585 * igb_init_module is the first routine called when the driver is
586 * loaded. All it does is register with the PCI subsystem.
587 **/
588static int __init igb_init_module(void)
589{
590 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000591 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800592 igb_driver_string, igb_driver_version);
593
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000594 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800595
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700596#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700597 dca_register_notify(&dca_notifier);
598#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800599 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800600 return ret;
601}
602
603module_init(igb_init_module);
604
605/**
606 * igb_exit_module - Driver Exit Cleanup Routine
607 *
608 * igb_exit_module is called just before the driver is removed
609 * from memory.
610 **/
611static void __exit igb_exit_module(void)
612{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700613#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700614 dca_unregister_notify(&dca_notifier);
615#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800616 pci_unregister_driver(&igb_driver);
617}
618
619module_exit(igb_exit_module);
620
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800621#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
622/**
623 * igb_cache_ring_register - Descriptor ring to register mapping
624 * @adapter: board private structure to initialize
625 *
626 * Once we know the feature-set enabled for the device, we'll cache
627 * the register offset the descriptor ring is assigned to.
628 **/
629static void igb_cache_ring_register(struct igb_adapter *adapter)
630{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000631 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000632 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800633
634 switch (adapter->hw.mac.type) {
635 case e1000_82576:
636 /* The queues are allocated for virtualization such that VF 0
637 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
638 * In order to avoid collision we start at the first free queue
639 * and continue consuming queues in the same sequence
640 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000641 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000642 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000643 adapter->rx_ring[i]->reg_idx = rbase_offset +
644 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800646 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000647 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000648 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000649 case e1000_i210:
650 case e1000_i211:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800651 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 break;
657 }
658}
659
Alexander Duyck047e0032009-10-27 15:49:27 +0000660static void igb_free_queues(struct igb_adapter *adapter)
661{
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000663
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 for (i = 0; i < adapter->num_tx_queues; i++) {
665 kfree(adapter->tx_ring[i]);
666 adapter->tx_ring[i] = NULL;
667 }
668 for (i = 0; i < adapter->num_rx_queues; i++) {
669 kfree(adapter->rx_ring[i]);
670 adapter->rx_ring[i] = NULL;
671 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000672 adapter->num_rx_queues = 0;
673 adapter->num_tx_queues = 0;
674}
675
Auke Kok9d5c8242008-01-24 02:22:38 -0800676/**
677 * igb_alloc_queues - Allocate memory for all rings
678 * @adapter: board private structure to initialize
679 *
680 * We allocate one ring per queue at run-time since we don't know the
681 * number of queues at compile-time.
682 **/
683static int igb_alloc_queues(struct igb_adapter *adapter)
684{
Alexander Duyck3025a442010-02-17 01:02:39 +0000685 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800686 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000687 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800688
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700689 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000690 if (orig_node == -1) {
691 int cur_node = next_online_node(adapter->node);
692 if (cur_node == MAX_NUMNODES)
693 cur_node = first_online_node;
694 adapter->node = cur_node;
695 }
696 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
697 adapter->node);
698 if (!ring)
699 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000700 if (!ring)
701 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800702 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700703 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000704 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000705 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000706 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000707 /* For 82575, context index must be unique per ring. */
708 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000709 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000710 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700711 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000712 /* Restore the adapter's original node */
713 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000714
Auke Kok9d5c8242008-01-24 02:22:38 -0800715 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000716 if (orig_node == -1) {
717 int cur_node = next_online_node(adapter->node);
718 if (cur_node == MAX_NUMNODES)
719 cur_node = first_online_node;
720 adapter->node = cur_node;
721 }
722 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
723 adapter->node);
724 if (!ring)
725 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000726 if (!ring)
727 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800728 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700729 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000730 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000731 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000732 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000733 /* set flag indicating ring supports SCTP checksum offload */
734 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000735 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000736
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000737 /*
738 * On i350, i210, and i211, loopback VLAN packets
739 * have the tag byte-swapped.
740 * */
741 if (adapter->hw.mac.type >= e1000_i350)
Alexander Duyck8be10e92011-08-26 07:47:11 +0000742 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
743
Alexander Duyck3025a442010-02-17 01:02:39 +0000744 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800745 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000746 /* Restore the adapter's original node */
747 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800748
749 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000750
Auke Kok9d5c8242008-01-24 02:22:38 -0800751 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800752
Alexander Duyck047e0032009-10-27 15:49:27 +0000753err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000754 /* Restore the adapter's original node */
755 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000756 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700757
Alexander Duyck047e0032009-10-27 15:49:27 +0000758 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700759}
760
Alexander Duyck4be000c2011-08-26 07:45:52 +0000761/**
762 * igb_write_ivar - configure ivar for given MSI-X vector
763 * @hw: pointer to the HW structure
764 * @msix_vector: vector number we are allocating to a given ring
765 * @index: row index of IVAR register to write within IVAR table
766 * @offset: column offset of in IVAR, should be multiple of 8
767 *
768 * This function is intended to handle the writing of the IVAR register
769 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
770 * each containing an cause allocation for an Rx and Tx ring, and a
771 * variable number of rows depending on the number of queues supported.
772 **/
773static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
774 int index, int offset)
775{
776 u32 ivar = array_rd32(E1000_IVAR0, index);
777
778 /* clear any bits that are currently set */
779 ivar &= ~((u32)0xFF << offset);
780
781 /* write vector and valid bit */
782 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
783
784 array_wr32(E1000_IVAR0, index, ivar);
785}
786
Auke Kok9d5c8242008-01-24 02:22:38 -0800787#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000788static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800789{
Alexander Duyck047e0032009-10-27 15:49:27 +0000790 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800791 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000792 int rx_queue = IGB_N0_QUEUE;
793 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000794 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000795
Alexander Duyck0ba82992011-08-26 07:45:47 +0000796 if (q_vector->rx.ring)
797 rx_queue = q_vector->rx.ring->reg_idx;
798 if (q_vector->tx.ring)
799 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700800
801 switch (hw->mac.type) {
802 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800803 /* The 82575 assigns vectors using a bitmask, which matches the
804 bitmask for the EICR/EIMS/EIMC registers. To assign one
805 or more queues to a vector, we write the appropriate bits
806 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000807 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800808 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000809 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800810 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000811 if (!adapter->msix_entries && msix_vector == 0)
812 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800813 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000814 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700815 break;
816 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000817 /*
818 * 82576 uses a table that essentially consists of 2 columns
819 * with 8 rows. The ordering is column-major so we use the
820 * lower 3 bits as the row index, and the 4th bit as the
821 * column offset.
822 */
823 if (rx_queue > IGB_N0_QUEUE)
824 igb_write_ivar(hw, msix_vector,
825 rx_queue & 0x7,
826 (rx_queue & 0x8) << 1);
827 if (tx_queue > IGB_N0_QUEUE)
828 igb_write_ivar(hw, msix_vector,
829 tx_queue & 0x7,
830 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000831 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700832 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000833 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000834 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000835 case e1000_i210:
836 case e1000_i211:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000837 /*
838 * On 82580 and newer adapters the scheme is similar to 82576
839 * however instead of ordering column-major we have things
840 * ordered row-major. So we traverse the table by using
841 * bit 0 as the column offset, and the remaining bits as the
842 * row index.
843 */
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000852 q_vector->eims_value = 1 << msix_vector;
853 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700854 default:
855 BUG();
856 break;
857 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000858
859 /* add q_vector eims value to global eims_enable_mask */
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862 /* configure q_vector to set itr on first interrupt */
863 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800864}
865
866/**
867 * igb_configure_msix - Configure MSI-X hardware
868 *
869 * igb_configure_msix sets up the hardware to properly
870 * generate MSI-X interrupts.
871 **/
872static void igb_configure_msix(struct igb_adapter *adapter)
873{
874 u32 tmp;
875 int i, vector = 0;
876 struct e1000_hw *hw = &adapter->hw;
877
878 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800879
880 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700881 switch (hw->mac.type) {
882 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800883 tmp = rd32(E1000_CTRL_EXT);
884 /* enable MSI-X PBA support*/
885 tmp |= E1000_CTRL_EXT_PBA_CLR;
886
887 /* Auto-Mask interrupts upon ICR read. */
888 tmp |= E1000_CTRL_EXT_EIAME;
889 tmp |= E1000_CTRL_EXT_IRCA;
890
891 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000892
893 /* enable msix_other interrupt */
894 array_wr32(E1000_MSIXBM(0), vector++,
895 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700896 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800897
Alexander Duyck2d064c02008-07-08 15:10:12 -0700898 break;
899
900 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000901 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000902 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000903 case e1000_i210:
904 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000905 /* Turn on MSI-X capability first, or our settings
906 * won't stick. And it will take days to debug. */
907 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
908 E1000_GPIE_PBA | E1000_GPIE_EIAME |
909 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700910
Alexander Duyck047e0032009-10-27 15:49:27 +0000911 /* enable msix_other interrupt */
912 adapter->eims_other = 1 << vector;
913 tmp = (vector++ | E1000_IVAR_VALID) << 8;
914
915 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700916 break;
917 default:
918 /* do nothing, since nothing else supports MSI-X */
919 break;
920 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000921
922 adapter->eims_enable_mask |= adapter->eims_other;
923
Alexander Duyck26b39272010-02-17 01:00:41 +0000924 for (i = 0; i < adapter->num_q_vectors; i++)
925 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000926
Auke Kok9d5c8242008-01-24 02:22:38 -0800927 wrfl();
928}
929
930/**
931 * igb_request_msix - Initialize MSI-X interrupts
932 *
933 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
934 * kernel.
935 **/
936static int igb_request_msix(struct igb_adapter *adapter)
937{
938 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 int i, err = 0, vector = 0;
941
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800943 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 if (err)
945 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 vector++;
947
948 for (i = 0; i < adapter->num_q_vectors; i++) {
949 struct igb_q_vector *q_vector = adapter->q_vector[i];
950
951 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
952
Alexander Duyck0ba82992011-08-26 07:45:47 +0000953 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000954 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000955 q_vector->rx.ring->queue_index);
956 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000958 q_vector->tx.ring->queue_index);
959 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000960 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000961 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000962 else
963 sprintf(q_vector->name, "%s-unused", netdev->name);
964
965 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800966 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 q_vector);
968 if (err)
969 goto out;
970 vector++;
971 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800972
Auke Kok9d5c8242008-01-24 02:22:38 -0800973 igb_configure_msix(adapter);
974 return 0;
975out:
976 return err;
977}
978
979static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
980{
981 if (adapter->msix_entries) {
982 pci_disable_msix(adapter->pdev);
983 kfree(adapter->msix_entries);
984 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000985 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800986 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000987 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800988}
989
Alexander Duyck047e0032009-10-27 15:49:27 +0000990/**
991 * igb_free_q_vectors - Free memory allocated for interrupt vectors
992 * @adapter: board private structure to initialize
993 *
994 * This function frees the memory allocated to the q_vectors. In addition if
995 * NAPI is enabled it will delete any references to the NAPI struct prior
996 * to freeing the q_vector.
997 **/
998static void igb_free_q_vectors(struct igb_adapter *adapter)
999{
1000 int v_idx;
1001
1002 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1003 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1004 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001005 if (!q_vector)
1006 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001007 netif_napi_del(&q_vector->napi);
1008 kfree(q_vector);
1009 }
1010 adapter->num_q_vectors = 0;
1011}
1012
1013/**
1014 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1015 *
1016 * This function resets the device so that it has 0 rx queues, tx queues, and
1017 * MSI-X interrupts allocated.
1018 */
1019static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1020{
1021 igb_free_queues(adapter);
1022 igb_free_q_vectors(adapter);
1023 igb_reset_interrupt_capability(adapter);
1024}
Auke Kok9d5c8242008-01-24 02:22:38 -08001025
1026/**
1027 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1028 *
1029 * Attempt to configure interrupts using the best available
1030 * capabilities of the hardware and kernel.
1031 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001032static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001033{
1034 int err;
1035 int numvecs, i;
1036
Alexander Duyck83b71802009-02-06 23:15:45 +00001037 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001038 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001039 if (adapter->vfs_allocated_count)
1040 adapter->num_tx_queues = 1;
1041 else
1042 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001043
Alexander Duyck047e0032009-10-27 15:49:27 +00001044 /* start with one vector for every rx queue */
1045 numvecs = adapter->num_rx_queues;
1046
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001047 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1049 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001050
1051 /* store the number of vectors reserved for queues */
1052 adapter->num_q_vectors = numvecs;
1053
1054 /* add 1 vector for link status interrupts */
1055 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001056 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1057 GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001058
Auke Kok9d5c8242008-01-24 02:22:38 -08001059 if (!adapter->msix_entries)
1060 goto msi_only;
1061
1062 for (i = 0; i < numvecs; i++)
1063 adapter->msix_entries[i].entry = i;
1064
1065 err = pci_enable_msix(adapter->pdev,
1066 adapter->msix_entries,
1067 numvecs);
1068 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001069 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001070
1071 igb_reset_interrupt_capability(adapter);
1072
1073 /* If we can't do MSI-X, try MSI */
1074msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001075#ifdef CONFIG_PCI_IOV
1076 /* disable SR-IOV for non MSI-X configurations */
1077 if (adapter->vf_data) {
1078 struct e1000_hw *hw = &adapter->hw;
1079 /* disable iov and allow time for transactions to clear */
1080 pci_disable_sriov(adapter->pdev);
1081 msleep(500);
1082
1083 kfree(adapter->vf_data);
1084 adapter->vf_data = NULL;
1085 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001086 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001087 msleep(100);
1088 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1089 }
1090#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001091 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001092 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001093 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001094 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001095 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001096 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001097 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001098 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001099out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001100 /* Notify the stack of the (possibly) reduced queue counts. */
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00001101 rtnl_lock();
Ben Hutchings21adef32010-09-27 08:28:39 +00001102 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00001103 err = netif_set_real_num_rx_queues(adapter->netdev,
1104 adapter->num_rx_queues);
1105 rtnl_unlock();
1106 return err;
Auke Kok9d5c8242008-01-24 02:22:38 -08001107}
1108
1109/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001110 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1111 * @adapter: board private structure to initialize
1112 *
1113 * We allocate one q_vector per queue interrupt. If allocation fails we
1114 * return -ENOMEM.
1115 **/
1116static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1117{
1118 struct igb_q_vector *q_vector;
1119 struct e1000_hw *hw = &adapter->hw;
1120 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001121 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001122
1123 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001124 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1125 adapter->num_tx_queues)) &&
1126 (adapter->num_rx_queues == v_idx))
1127 adapter->node = orig_node;
1128 if (orig_node == -1) {
1129 int cur_node = next_online_node(adapter->node);
1130 if (cur_node == MAX_NUMNODES)
1131 cur_node = first_online_node;
1132 adapter->node = cur_node;
1133 }
1134 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1135 adapter->node);
1136 if (!q_vector)
1137 q_vector = kzalloc(sizeof(struct igb_q_vector),
1138 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001139 if (!q_vector)
1140 goto err_out;
1141 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001142 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1143 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001144 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1145 adapter->q_vector[v_idx] = q_vector;
1146 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001147 /* Restore the adapter's original node */
1148 adapter->node = orig_node;
1149
Alexander Duyck047e0032009-10-27 15:49:27 +00001150 return 0;
1151
1152err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001153 /* Restore the adapter's original node */
1154 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001155 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001156 return -ENOMEM;
1157}
1158
1159static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1160 int ring_idx, int v_idx)
1161{
Alexander Duyck3025a442010-02-17 01:02:39 +00001162 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001163
Alexander Duyck0ba82992011-08-26 07:45:47 +00001164 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1165 q_vector->rx.ring->q_vector = q_vector;
1166 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001167 q_vector->itr_val = adapter->rx_itr_setting;
1168 if (q_vector->itr_val && q_vector->itr_val <= 3)
1169 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001170}
1171
1172static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1173 int ring_idx, int v_idx)
1174{
Alexander Duyck3025a442010-02-17 01:02:39 +00001175 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001176
Alexander Duyck0ba82992011-08-26 07:45:47 +00001177 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1178 q_vector->tx.ring->q_vector = q_vector;
1179 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001180 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001181 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001182 if (q_vector->itr_val && q_vector->itr_val <= 3)
1183 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001184}
1185
1186/**
1187 * igb_map_ring_to_vector - maps allocated queues to vectors
1188 *
1189 * This function maps the recently allocated queues to vectors.
1190 **/
1191static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1192{
1193 int i;
1194 int v_idx = 0;
1195
1196 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1197 (adapter->num_q_vectors < adapter->num_tx_queues))
1198 return -ENOMEM;
1199
1200 if (adapter->num_q_vectors >=
1201 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1202 for (i = 0; i < adapter->num_rx_queues; i++)
1203 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1204 for (i = 0; i < adapter->num_tx_queues; i++)
1205 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1206 } else {
1207 for (i = 0; i < adapter->num_rx_queues; i++) {
1208 if (i < adapter->num_tx_queues)
1209 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1210 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1211 }
1212 for (; i < adapter->num_tx_queues; i++)
1213 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1214 }
1215 return 0;
1216}
1217
1218/**
1219 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1220 *
1221 * This function initializes the interrupts and allocates all of the queues.
1222 **/
1223static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1224{
1225 struct pci_dev *pdev = adapter->pdev;
1226 int err;
1227
Ben Hutchings21adef32010-09-27 08:28:39 +00001228 err = igb_set_interrupt_capability(adapter);
1229 if (err)
1230 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001231
1232 err = igb_alloc_q_vectors(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1235 goto err_alloc_q_vectors;
1236 }
1237
1238 err = igb_alloc_queues(adapter);
1239 if (err) {
1240 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1241 goto err_alloc_queues;
1242 }
1243
1244 err = igb_map_ring_to_vector(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1247 goto err_map_queues;
1248 }
1249
1250
1251 return 0;
1252err_map_queues:
1253 igb_free_queues(adapter);
1254err_alloc_queues:
1255 igb_free_q_vectors(adapter);
1256err_alloc_q_vectors:
1257 igb_reset_interrupt_capability(adapter);
1258 return err;
1259}
1260
1261/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001262 * igb_request_irq - initialize interrupts
1263 *
1264 * Attempts to configure interrupts using the best available
1265 * capabilities of the hardware and kernel.
1266 **/
1267static int igb_request_irq(struct igb_adapter *adapter)
1268{
1269 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001270 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 int err = 0;
1272
1273 if (adapter->msix_entries) {
1274 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001275 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001276 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001277 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001278 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001279 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001280 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001281 igb_free_all_tx_resources(adapter);
1282 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001283 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001284 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001285 adapter->num_q_vectors = 1;
1286 err = igb_alloc_q_vectors(adapter);
1287 if (err) {
1288 dev_err(&pdev->dev,
1289 "Unable to allocate memory for vectors\n");
1290 goto request_done;
1291 }
1292 err = igb_alloc_queues(adapter);
1293 if (err) {
1294 dev_err(&pdev->dev,
1295 "Unable to allocate memory for queues\n");
1296 igb_free_q_vectors(adapter);
1297 goto request_done;
1298 }
1299 igb_setup_all_tx_resources(adapter);
1300 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001301 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001302
Alexander Duyckc74d5882011-08-26 07:46:45 +00001303 igb_assign_vector(adapter->q_vector[0], 0);
1304
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001305 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001306 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001307 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 if (!err)
1309 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001310
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 /* fall back to legacy interrupts */
1312 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001313 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001314 }
1315
Alexander Duyckc74d5882011-08-26 07:46:45 +00001316 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001317 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001318
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001319 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001320 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001321 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001322
1323request_done:
1324 return err;
1325}
1326
1327static void igb_free_irq(struct igb_adapter *adapter)
1328{
Auke Kok9d5c8242008-01-24 02:22:38 -08001329 if (adapter->msix_entries) {
1330 int vector = 0, i;
1331
Alexander Duyck047e0032009-10-27 15:49:27 +00001332 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001333
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001334 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001335 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001336 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001337 } else {
1338 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001339 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001340}
1341
1342/**
1343 * igb_irq_disable - Mask off interrupt generation on the NIC
1344 * @adapter: board private structure
1345 **/
1346static void igb_irq_disable(struct igb_adapter *adapter)
1347{
1348 struct e1000_hw *hw = &adapter->hw;
1349
Alexander Duyck25568a52009-10-27 23:49:59 +00001350 /*
1351 * we need to be careful when disabling interrupts. The VFs are also
1352 * mapped into these registers and so clearing the bits can cause
1353 * issues on the VF drivers so we only need to clear what we set
1354 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001355 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001356 u32 regval = rd32(E1000_EIAM);
1357 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1358 wr32(E1000_EIMC, adapter->eims_enable_mask);
1359 regval = rd32(E1000_EIAC);
1360 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001361 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001362
1363 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001364 wr32(E1000_IMC, ~0);
1365 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001366 if (adapter->msix_entries) {
1367 int i;
1368 for (i = 0; i < adapter->num_q_vectors; i++)
1369 synchronize_irq(adapter->msix_entries[i].vector);
1370 } else {
1371 synchronize_irq(adapter->pdev->irq);
1372 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001373}
1374
1375/**
1376 * igb_irq_enable - Enable default interrupt generation settings
1377 * @adapter: board private structure
1378 **/
1379static void igb_irq_enable(struct igb_adapter *adapter)
1380{
1381 struct e1000_hw *hw = &adapter->hw;
1382
1383 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001384 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001385 u32 regval = rd32(E1000_EIAC);
1386 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1387 regval = rd32(E1000_EIAM);
1388 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001389 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001390 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001391 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001392 ims |= E1000_IMS_VMMB;
1393 }
1394 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001395 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001396 wr32(E1000_IMS, IMS_ENABLE_MASK |
1397 E1000_IMS_DRSTA);
1398 wr32(E1000_IAM, IMS_ENABLE_MASK |
1399 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001400 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001401}
1402
1403static void igb_update_mng_vlan(struct igb_adapter *adapter)
1404{
Alexander Duyck51466232009-10-27 23:47:35 +00001405 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001406 u16 vid = adapter->hw.mng_cookie.vlan_id;
1407 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001408
Alexander Duyck51466232009-10-27 23:47:35 +00001409 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1410 /* add VID to filter table */
1411 igb_vfta_set(hw, vid, true);
1412 adapter->mng_vlan_id = vid;
1413 } else {
1414 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1415 }
1416
1417 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1418 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001419 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001420 /* remove VID from filter table */
1421 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001422 }
1423}
1424
1425/**
1426 * igb_release_hw_control - release control of the h/w to f/w
1427 * @adapter: address of board private structure
1428 *
1429 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1430 * For ASF and Pass Through versions of f/w this means that the
1431 * driver is no longer loaded.
1432 *
1433 **/
1434static void igb_release_hw_control(struct igb_adapter *adapter)
1435{
1436 struct e1000_hw *hw = &adapter->hw;
1437 u32 ctrl_ext;
1438
1439 /* Let firmware take over control of h/w */
1440 ctrl_ext = rd32(E1000_CTRL_EXT);
1441 wr32(E1000_CTRL_EXT,
1442 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1443}
1444
Auke Kok9d5c8242008-01-24 02:22:38 -08001445/**
1446 * igb_get_hw_control - get control of the h/w from f/w
1447 * @adapter: address of board private structure
1448 *
1449 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1450 * For ASF and Pass Through versions of f/w this means that
1451 * the driver is loaded.
1452 *
1453 **/
1454static void igb_get_hw_control(struct igb_adapter *adapter)
1455{
1456 struct e1000_hw *hw = &adapter->hw;
1457 u32 ctrl_ext;
1458
1459 /* Let firmware know the driver has taken over */
1460 ctrl_ext = rd32(E1000_CTRL_EXT);
1461 wr32(E1000_CTRL_EXT,
1462 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1463}
1464
Auke Kok9d5c8242008-01-24 02:22:38 -08001465/**
1466 * igb_configure - configure the hardware for RX and TX
1467 * @adapter: private board structure
1468 **/
1469static void igb_configure(struct igb_adapter *adapter)
1470{
1471 struct net_device *netdev = adapter->netdev;
1472 int i;
1473
1474 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001475 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001476
1477 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001478
Alexander Duyck85b430b2009-10-27 15:50:29 +00001479 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001480 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001481 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001482
1483 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001484 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001485
1486 igb_rx_fifo_flush_82575(&adapter->hw);
1487
Alexander Duyckc493ea42009-03-20 00:16:50 +00001488 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001489 * at least 1 descriptor unused to make sure
1490 * next_to_use != next_to_clean */
1491 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001492 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001493 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001494 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001495}
1496
Nick Nunley88a268c2010-02-17 01:01:59 +00001497/**
1498 * igb_power_up_link - Power up the phy/serdes link
1499 * @adapter: address of board private structure
1500 **/
1501void igb_power_up_link(struct igb_adapter *adapter)
1502{
Akeem G. Abodunrin76886592012-07-17 04:51:18 +00001503 igb_reset_phy(&adapter->hw);
1504
Nick Nunley88a268c2010-02-17 01:01:59 +00001505 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1506 igb_power_up_phy_copper(&adapter->hw);
1507 else
1508 igb_power_up_serdes_link_82575(&adapter->hw);
1509}
1510
1511/**
1512 * igb_power_down_link - Power down the phy/serdes link
1513 * @adapter: address of board private structure
1514 */
1515static void igb_power_down_link(struct igb_adapter *adapter)
1516{
1517 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1518 igb_power_down_phy_copper_82575(&adapter->hw);
1519 else
1520 igb_shutdown_serdes_link_82575(&adapter->hw);
1521}
Auke Kok9d5c8242008-01-24 02:22:38 -08001522
1523/**
1524 * igb_up - Open the interface and prepare it to handle traffic
1525 * @adapter: board private structure
1526 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001527int igb_up(struct igb_adapter *adapter)
1528{
1529 struct e1000_hw *hw = &adapter->hw;
1530 int i;
1531
1532 /* hardware has been reset, we need to reload some things */
1533 igb_configure(adapter);
1534
1535 clear_bit(__IGB_DOWN, &adapter->state);
1536
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001537 for (i = 0; i < adapter->num_q_vectors; i++)
1538 napi_enable(&(adapter->q_vector[i]->napi));
1539
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001540 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001541 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001542 else
1543 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001544
1545 /* Clear any pending interrupts. */
1546 rd32(E1000_ICR);
1547 igb_irq_enable(adapter);
1548
Alexander Duyckd4960302009-10-27 15:53:45 +00001549 /* notify VFs that reset has been completed */
1550 if (adapter->vfs_allocated_count) {
1551 u32 reg_data = rd32(E1000_CTRL_EXT);
1552 reg_data |= E1000_CTRL_EXT_PFRSTD;
1553 wr32(E1000_CTRL_EXT, reg_data);
1554 }
1555
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001556 netif_tx_start_all_queues(adapter->netdev);
1557
Alexander Duyck25568a52009-10-27 23:49:59 +00001558 /* start the watchdog. */
1559 hw->mac.get_link_status = 1;
1560 schedule_work(&adapter->watchdog_task);
1561
Auke Kok9d5c8242008-01-24 02:22:38 -08001562 return 0;
1563}
1564
1565void igb_down(struct igb_adapter *adapter)
1566{
Auke Kok9d5c8242008-01-24 02:22:38 -08001567 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001568 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001569 u32 tctl, rctl;
1570 int i;
1571
1572 /* signal that we're down so the interrupt handler does not
1573 * reschedule our watchdog timer */
1574 set_bit(__IGB_DOWN, &adapter->state);
1575
1576 /* disable receives in the hardware */
1577 rctl = rd32(E1000_RCTL);
1578 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1579 /* flush and sleep below */
1580
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001581 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001582
1583 /* disable transmits in the hardware */
1584 tctl = rd32(E1000_TCTL);
1585 tctl &= ~E1000_TCTL_EN;
1586 wr32(E1000_TCTL, tctl);
1587 /* flush both disables and wait for them to finish */
1588 wrfl();
1589 msleep(10);
1590
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001591 for (i = 0; i < adapter->num_q_vectors; i++)
1592 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001593
Auke Kok9d5c8242008-01-24 02:22:38 -08001594 igb_irq_disable(adapter);
1595
1596 del_timer_sync(&adapter->watchdog_timer);
1597 del_timer_sync(&adapter->phy_info_timer);
1598
Auke Kok9d5c8242008-01-24 02:22:38 -08001599 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001600
1601 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001602 spin_lock(&adapter->stats64_lock);
1603 igb_update_stats(adapter, &adapter->stats64);
1604 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001605
Auke Kok9d5c8242008-01-24 02:22:38 -08001606 adapter->link_speed = 0;
1607 adapter->link_duplex = 0;
1608
Jeff Kirsher30236822008-06-24 17:01:15 -07001609 if (!pci_channel_offline(adapter->pdev))
1610 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001611 igb_clean_all_tx_rings(adapter);
1612 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001613#ifdef CONFIG_IGB_DCA
1614
1615 /* since we reset the hardware DCA settings were cleared */
1616 igb_setup_dca(adapter);
1617#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001618}
1619
1620void igb_reinit_locked(struct igb_adapter *adapter)
1621{
1622 WARN_ON(in_interrupt());
1623 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1624 msleep(1);
1625 igb_down(adapter);
1626 igb_up(adapter);
1627 clear_bit(__IGB_RESETTING, &adapter->state);
1628}
1629
1630void igb_reset(struct igb_adapter *adapter)
1631{
Alexander Duyck090b1792009-10-27 23:51:55 +00001632 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001633 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001634 struct e1000_mac_info *mac = &hw->mac;
1635 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001636 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1637 u16 hwm;
1638
1639 /* Repartition Pba for greater than 9k mtu
1640 * To take effect CTRL.RST is required.
1641 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001642 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001643 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001644 case e1000_82580:
1645 pba = rd32(E1000_RXPBS);
1646 pba = igb_rxpbs_adjust_82580(pba);
1647 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001648 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001649 pba = rd32(E1000_RXPBS);
1650 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001651 break;
1652 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001653 case e1000_i210:
1654 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001655 default:
1656 pba = E1000_PBA_34K;
1657 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001658 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001659
Alexander Duyck2d064c02008-07-08 15:10:12 -07001660 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1661 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001662 /* adjust PBA for jumbo frames */
1663 wr32(E1000_PBA, pba);
1664
1665 /* To maintain wire speed transmits, the Tx FIFO should be
1666 * large enough to accommodate two full transmit packets,
1667 * rounded up to the next 1KB and expressed in KB. Likewise,
1668 * the Rx FIFO should be large enough to accommodate at least
1669 * one full receive packet and is similarly rounded up and
1670 * expressed in KB. */
1671 pba = rd32(E1000_PBA);
1672 /* upper 16 bits has Tx packet buffer allocation size in KB */
1673 tx_space = pba >> 16;
1674 /* lower 16 bits has Rx packet buffer allocation size in KB */
1675 pba &= 0xffff;
1676 /* the tx fifo also stores 16 bytes of information about the tx
1677 * but don't include ethernet FCS because hardware appends it */
1678 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001679 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 ETH_FCS_LEN) * 2;
1681 min_tx_space = ALIGN(min_tx_space, 1024);
1682 min_tx_space >>= 10;
1683 /* software strips receive CRC, so leave room for it */
1684 min_rx_space = adapter->max_frame_size;
1685 min_rx_space = ALIGN(min_rx_space, 1024);
1686 min_rx_space >>= 10;
1687
1688 /* If current Tx allocation is less than the min Tx FIFO size,
1689 * and the min Tx FIFO size is less than the current Rx FIFO
1690 * allocation, take space away from current Rx allocation */
1691 if (tx_space < min_tx_space &&
1692 ((min_tx_space - tx_space) < pba)) {
1693 pba = pba - (min_tx_space - tx_space);
1694
1695 /* if short on rx space, rx wins and must trump tx
1696 * adjustment */
1697 if (pba < min_rx_space)
1698 pba = min_rx_space;
1699 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001701 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001702
1703 /* flow control settings */
1704 /* The high water mark must be low enough to fit one full frame
1705 * (or the size used for early receive) above it in the Rx FIFO.
1706 * Set it to the lower of:
1707 * - 90% of the Rx FIFO size, or
1708 * - the full Rx FIFO size minus one full frame */
1709 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001710 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001711
Alexander Duyckd405ea32009-12-23 13:21:27 +00001712 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1713 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001714 fc->pause_time = 0xFFFF;
1715 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001716 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001717
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001718 /* disable receive for all VFs and wait one second */
1719 if (adapter->vfs_allocated_count) {
1720 int i;
1721 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001722 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001723
1724 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001725 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001726
1727 /* disable transmits and receives */
1728 wr32(E1000_VFRE, 0);
1729 wr32(E1000_VFTE, 0);
1730 }
1731
Auke Kok9d5c8242008-01-24 02:22:38 -08001732 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001733 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001734 wr32(E1000_WUC, 0);
1735
Alexander Duyck330a6d62009-10-27 23:51:35 +00001736 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001737 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001738
Matthew Vicka27416b2012-04-18 02:57:44 +00001739 /*
1740 * Flow control settings reset on hardware reset, so guarantee flow
1741 * control is off when forcing speed.
1742 */
1743 if (!hw->mac.autoneg)
1744 igb_force_mac_fc(hw);
1745
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001746 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001747 if (!netif_running(adapter->netdev))
1748 igb_power_down_link(adapter);
1749
Auke Kok9d5c8242008-01-24 02:22:38 -08001750 igb_update_mng_vlan(adapter);
1751
1752 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1753 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1754
Alexander Duyck330a6d62009-10-27 23:51:35 +00001755 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001756}
1757
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001758static netdev_features_t igb_fix_features(struct net_device *netdev,
1759 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001760{
1761 /*
1762 * Since there is no support for separate rx/tx vlan accel
1763 * enable/disable make sure tx flag is always in same state as rx.
1764 */
1765 if (features & NETIF_F_HW_VLAN_RX)
1766 features |= NETIF_F_HW_VLAN_TX;
1767 else
1768 features &= ~NETIF_F_HW_VLAN_TX;
1769
1770 return features;
1771}
1772
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001773static int igb_set_features(struct net_device *netdev,
1774 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001775{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001776 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001777 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001778
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001779 if (changed & NETIF_F_HW_VLAN_RX)
1780 igb_vlan_mode(netdev, features);
1781
Ben Greear89eaefb2012-03-06 09:41:58 +00001782 if (!(changed & NETIF_F_RXALL))
1783 return 0;
1784
1785 netdev->features = features;
1786
1787 if (netif_running(netdev))
1788 igb_reinit_locked(adapter);
1789 else
1790 igb_reset(adapter);
1791
Michał Mirosławac52caa2011-06-08 08:38:01 +00001792 return 0;
1793}
1794
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001795static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001796 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001797 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001798 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001799 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001800 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001801 .ndo_set_mac_address = igb_set_mac,
1802 .ndo_change_mtu = igb_change_mtu,
1803 .ndo_do_ioctl = igb_ioctl,
1804 .ndo_tx_timeout = igb_tx_timeout,
1805 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001806 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1807 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001808 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1809 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1810 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1811 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001812#ifdef CONFIG_NET_POLL_CONTROLLER
1813 .ndo_poll_controller = igb_netpoll,
1814#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001815 .ndo_fix_features = igb_fix_features,
1816 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001817};
1818
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001819/**
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00001820 * igb_set_fw_version - Configure version string for ethtool
1821 * @adapter: adapter struct
1822 *
1823 **/
1824void igb_set_fw_version(struct igb_adapter *adapter)
1825{
1826 struct e1000_hw *hw = &adapter->hw;
1827 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1828 u16 major, build, patch, fw_version;
1829 u32 etrack_id;
1830
1831 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1832 if (adapter->hw.mac.type != e1000_i211) {
1833 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1834 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1835 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1836
1837 /* combo image version needs to be found */
1838 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1839 if ((comb_offset != 0x0) &&
1840 (comb_offset != IGB_NVM_VER_INVALID)) {
1841 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1842 + 1), 1, &comb_verh);
1843 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1844 1, &comb_verl);
1845
1846 /* Only display Option Rom if it exists and is valid */
1847 if ((comb_verh && comb_verl) &&
1848 ((comb_verh != IGB_NVM_VER_INVALID) &&
1849 (comb_verl != IGB_NVM_VER_INVALID))) {
1850 major = comb_verl >> IGB_COMB_VER_SHFT;
1851 build = (comb_verl << IGB_COMB_VER_SHFT) |
1852 (comb_verh >> IGB_COMB_VER_SHFT);
1853 patch = comb_verh & IGB_COMB_VER_MASK;
1854 snprintf(adapter->fw_version,
1855 sizeof(adapter->fw_version),
1856 "%d.%d%d, 0x%08x, %d.%d.%d",
1857 (fw_version & IGB_MAJOR_MASK) >>
1858 IGB_MAJOR_SHIFT,
1859 (fw_version & IGB_MINOR_MASK) >>
1860 IGB_MINOR_SHIFT,
1861 (fw_version & IGB_BUILD_MASK),
1862 etrack_id, major, build, patch);
1863 goto out;
1864 }
1865 }
1866 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1867 "%d.%d%d, 0x%08x",
1868 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1869 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1870 (fw_version & IGB_BUILD_MASK), etrack_id);
1871 } else {
1872 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1873 "%d.%d%d",
1874 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1875 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1876 (fw_version & IGB_BUILD_MASK));
1877 }
1878out:
1879 return;
1880}
1881
1882/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001883 * igb_probe - Device Initialization Routine
1884 * @pdev: PCI device information struct
1885 * @ent: entry in igb_pci_tbl
1886 *
1887 * Returns 0 on success, negative on failure
1888 *
1889 * igb_probe initializes an adapter identified by a pci_dev structure.
1890 * The OS initialization, configuring of the adapter private structure,
1891 * and a hardware reset occur.
1892 **/
1893static int __devinit igb_probe(struct pci_dev *pdev,
1894 const struct pci_device_id *ent)
1895{
1896 struct net_device *netdev;
1897 struct igb_adapter *adapter;
1898 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001899 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001900 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001901 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001902 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1903 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001904 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001905 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001906 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001907
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001908 /* Catch broken hardware that put the wrong VF device ID in
1909 * the PCIe SR-IOV capability.
1910 */
1911 if (pdev->is_virtfn) {
1912 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001913 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001914 return -EINVAL;
1915 }
1916
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001917 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001918 if (err)
1919 return err;
1920
1921 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001922 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001923 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001924 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001925 if (!err)
1926 pci_using_dac = 1;
1927 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001928 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001929 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001930 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001931 if (err) {
1932 dev_err(&pdev->dev, "No usable DMA "
1933 "configuration, aborting\n");
1934 goto err_dma;
1935 }
1936 }
1937 }
1938
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001939 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1940 IORESOURCE_MEM),
1941 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001942 if (err)
1943 goto err_pci_reg;
1944
Frans Pop19d5afd2009-10-02 10:04:12 -07001945 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001946
Auke Kok9d5c8242008-01-24 02:22:38 -08001947 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001948 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001949
1950 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001951 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001952 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001953 if (!netdev)
1954 goto err_alloc_etherdev;
1955
1956 SET_NETDEV_DEV(netdev, &pdev->dev);
1957
1958 pci_set_drvdata(pdev, netdev);
1959 adapter = netdev_priv(netdev);
1960 adapter->netdev = netdev;
1961 adapter->pdev = pdev;
1962 hw = &adapter->hw;
1963 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00001964 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08001965
1966 mmio_start = pci_resource_start(pdev, 0);
1967 mmio_len = pci_resource_len(pdev, 0);
1968
1969 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001970 hw->hw_addr = ioremap(mmio_start, mmio_len);
1971 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001972 goto err_ioremap;
1973
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001974 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001975 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001976 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001977
1978 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1979
1980 netdev->mem_start = mmio_start;
1981 netdev->mem_end = mmio_start + mmio_len;
1982
Auke Kok9d5c8242008-01-24 02:22:38 -08001983 /* PCI config space info */
1984 hw->vendor_id = pdev->vendor;
1985 hw->device_id = pdev->device;
1986 hw->revision_id = pdev->revision;
1987 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1988 hw->subsystem_device_id = pdev->subsystem_device;
1989
Auke Kok9d5c8242008-01-24 02:22:38 -08001990 /* Copy the default MAC, PHY and NVM function pointers */
1991 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1992 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1993 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1994 /* Initialize skew-specific constants */
1995 err = ei->get_invariants(hw);
1996 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001997 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001998
Alexander Duyck450c87c2009-02-06 23:22:11 +00001999 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08002000 err = igb_sw_init(adapter);
2001 if (err)
2002 goto err_sw_init;
2003
2004 igb_get_bus_info_pcie(hw);
2005
2006 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08002007
2008 /* Copper options */
2009 if (hw->phy.media_type == e1000_media_type_copper) {
2010 hw->phy.mdix = AUTO_ALL_MODES;
2011 hw->phy.disable_polarity_correction = false;
2012 hw->phy.ms_type = e1000_ms_hw_default;
2013 }
2014
2015 if (igb_check_reset_block(hw))
2016 dev_info(&pdev->dev,
2017 "PHY reset is blocked due to SOL/IDER session.\n");
2018
Alexander Duyck077887c2011-08-26 07:46:29 +00002019 /*
2020 * features is initialized to 0 in allocation, it might have bits
2021 * set by igb_sw_init so we should use an or instead of an
2022 * assignment.
2023 */
2024 netdev->features |= NETIF_F_SG |
2025 NETIF_F_IP_CSUM |
2026 NETIF_F_IPV6_CSUM |
2027 NETIF_F_TSO |
2028 NETIF_F_TSO6 |
2029 NETIF_F_RXHASH |
2030 NETIF_F_RXCSUM |
2031 NETIF_F_HW_VLAN_RX |
2032 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002033
Alexander Duyck077887c2011-08-26 07:46:29 +00002034 /* copy netdev features into list of user selectable features */
2035 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00002036 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08002037
Alexander Duyck077887c2011-08-26 07:46:29 +00002038 /* set this bit last since it cannot be part of hw_features */
2039 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2040
2041 netdev->vlan_features |= NETIF_F_TSO |
2042 NETIF_F_TSO6 |
2043 NETIF_F_IP_CSUM |
2044 NETIF_F_IPV6_CSUM |
2045 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002046
Ben Greear6b8f0922012-03-06 09:41:53 +00002047 netdev->priv_flags |= IFF_SUPP_NOFCS;
2048
Yi Zou7b872a52010-09-22 17:57:58 +00002049 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002050 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002051 netdev->vlan_features |= NETIF_F_HIGHDMA;
2052 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002053
Michał Mirosławac52caa2011-06-08 08:38:01 +00002054 if (hw->mac.type >= e1000_82576) {
2055 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002056 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002057 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002058
Jiri Pirko01789342011-08-16 06:29:00 +00002059 netdev->priv_flags |= IFF_UNICAST_FLT;
2060
Alexander Duyck330a6d62009-10-27 23:51:35 +00002061 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002062
2063 /* before reading the NVM, reset the controller to put the device in a
2064 * known good starting state */
2065 hw->mac.ops.reset_hw(hw);
2066
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002067 /*
2068 * make sure the NVM is good , i211 parts have special NVM that
2069 * doesn't contain a checksum
2070 */
2071 if (hw->mac.type != e1000_i211) {
2072 if (hw->nvm.ops.validate(hw) < 0) {
2073 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2074 err = -EIO;
2075 goto err_eeprom;
2076 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002077 }
2078
2079 /* copy the MAC address out of the NVM */
2080 if (hw->mac.ops.read_mac_addr(hw))
2081 dev_err(&pdev->dev, "NVM Read Error\n");
2082
2083 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2084 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2085
2086 if (!is_valid_ether_addr(netdev->perm_addr)) {
2087 dev_err(&pdev->dev, "Invalid MAC Address\n");
2088 err = -EIO;
2089 goto err_eeprom;
2090 }
2091
Carolyn Wybornyd67974f2012-06-14 16:04:19 +00002092 /* get firmware version for ethtool -i */
2093 igb_set_fw_version(adapter);
2094
Joe Perchesc061b182010-08-23 18:20:03 +00002095 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002096 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002097 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002098 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002099
2100 INIT_WORK(&adapter->reset_task, igb_reset_task);
2101 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2102
Alexander Duyck450c87c2009-02-06 23:22:11 +00002103 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002104 adapter->fc_autoneg = true;
2105 hw->mac.autoneg = true;
2106 hw->phy.autoneg_advertised = 0x2f;
2107
Alexander Duyck0cce1192009-07-23 18:10:24 +00002108 hw->fc.requested_mode = e1000_fc_default;
2109 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002110
Auke Kok9d5c8242008-01-24 02:22:38 -08002111 igb_validate_mdi_setting(hw);
2112
Auke Kok9d5c8242008-01-24 02:22:38 -08002113 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2114 * enable the ACPI Magic Packet filter
2115 */
2116
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002117 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002118 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002119 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002120 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2121 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2122 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002123 else if (hw->bus.func == 1)
2124 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002125
2126 if (eeprom_data & eeprom_apme_mask)
2127 adapter->eeprom_wol |= E1000_WUFC_MAG;
2128
2129 /* now that we have the eeprom settings, apply the special cases where
2130 * the eeprom may be wrong or the board simply won't support wake on
2131 * lan on a particular port */
2132 switch (pdev->device) {
2133 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2134 adapter->eeprom_wol = 0;
2135 break;
2136 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002137 case E1000_DEV_ID_82576_FIBER:
2138 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002139 /* Wake events only supported on port A for dual fiber
2140 * regardless of eeprom setting */
2141 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2142 adapter->eeprom_wol = 0;
2143 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002144 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002145 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002146 /* if quad port adapter, disable WoL on all but port A */
2147 if (global_quad_port_a != 0)
2148 adapter->eeprom_wol = 0;
2149 else
2150 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2151 /* Reset for multiple quad port adapters */
2152 if (++global_quad_port_a == 4)
2153 global_quad_port_a = 0;
2154 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002155 }
2156
2157 /* initialize the wol settings based on the eeprom settings */
2158 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002159 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002160
2161 /* reset the hardware with the new settings */
2162 igb_reset(adapter);
2163
2164 /* let the f/w know that the h/w is now under the control of the
2165 * driver. */
2166 igb_get_hw_control(adapter);
2167
Auke Kok9d5c8242008-01-24 02:22:38 -08002168 strcpy(netdev->name, "eth%d");
2169 err = register_netdev(netdev);
2170 if (err)
2171 goto err_register;
2172
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002173 /* carrier off reporting is important to ethtool even BEFORE open */
2174 netif_carrier_off(netdev);
2175
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002176#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002177 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002178 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002179 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002180 igb_setup_dca(adapter);
2181 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002182
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002183#endif
Richard Cochran7ebae812012-03-16 10:55:37 +00002184#ifdef CONFIG_IGB_PTP
Anders Berggren673b8b72011-02-04 07:32:32 +00002185 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002186 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00002187
Richard Cochran7ebae812012-03-16 10:55:37 +00002188#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002189 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2190 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002191 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002192 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002193 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002194 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002195 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002196 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2197 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2198 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2199 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002200 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002201
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002202 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2203 if (ret_val)
2204 strcpy(part_str, "Unknown");
2205 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002206 dev_info(&pdev->dev,
2207 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2208 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002209 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002210 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002211 switch (hw->mac.type) {
2212 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002213 case e1000_i210:
2214 case e1000_i211:
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002215 igb_set_eee_i350(hw);
2216 break;
2217 default:
2218 break;
2219 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002220
2221 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002222 return 0;
2223
2224err_register:
2225 igb_release_hw_control(adapter);
2226err_eeprom:
2227 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002228 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002229
2230 if (hw->flash_address)
2231 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002232err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002233 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002234 iounmap(hw->hw_addr);
2235err_ioremap:
2236 free_netdev(netdev);
2237err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002238 pci_release_selected_regions(pdev,
2239 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002240err_pci_reg:
2241err_dma:
2242 pci_disable_device(pdev);
2243 return err;
2244}
2245
2246/**
2247 * igb_remove - Device Removal Routine
2248 * @pdev: PCI device information struct
2249 *
2250 * igb_remove is called by the PCI subsystem to alert the driver
2251 * that it should release a PCI device. The could be caused by a
2252 * Hot-Plug event, or because the driver is going to be removed from
2253 * memory.
2254 **/
2255static void __devexit igb_remove(struct pci_dev *pdev)
2256{
2257 struct net_device *netdev = pci_get_drvdata(pdev);
2258 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002259 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002260
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002261 pm_runtime_get_noresume(&pdev->dev);
Richard Cochran7ebae812012-03-16 10:55:37 +00002262#ifdef CONFIG_IGB_PTP
2263 igb_ptp_remove(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002264
Richard Cochran7ebae812012-03-16 10:55:37 +00002265#endif
Tejun Heo760141a2010-12-12 16:45:14 +01002266 /*
2267 * The watchdog timer may be rescheduled, so explicitly
2268 * disable watchdog from being rescheduled.
2269 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002270 set_bit(__IGB_DOWN, &adapter->state);
2271 del_timer_sync(&adapter->watchdog_timer);
2272 del_timer_sync(&adapter->phy_info_timer);
2273
Tejun Heo760141a2010-12-12 16:45:14 +01002274 cancel_work_sync(&adapter->reset_task);
2275 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002276
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002277#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002278 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002279 dev_info(&pdev->dev, "DCA disabled\n");
2280 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002281 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002282 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002283 }
2284#endif
2285
Auke Kok9d5c8242008-01-24 02:22:38 -08002286 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2287 * would have already happened in close and is redundant. */
2288 igb_release_hw_control(adapter);
2289
2290 unregister_netdev(netdev);
2291
Alexander Duyck047e0032009-10-27 15:49:27 +00002292 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002293
Alexander Duyck37680112009-02-19 20:40:30 -08002294#ifdef CONFIG_PCI_IOV
2295 /* reclaim resources allocated to VFs */
2296 if (adapter->vf_data) {
2297 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002298 if (!igb_check_vf_assignment(adapter)) {
2299 pci_disable_sriov(pdev);
2300 msleep(500);
2301 } else {
2302 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2303 }
Alexander Duyck37680112009-02-19 20:40:30 -08002304
2305 kfree(adapter->vf_data);
2306 adapter->vf_data = NULL;
2307 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002308 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002309 msleep(100);
2310 dev_info(&pdev->dev, "IOV Disabled\n");
2311 }
2312#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002313
Alexander Duyck28b07592009-02-06 23:20:31 +00002314 iounmap(hw->hw_addr);
2315 if (hw->flash_address)
2316 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002317 pci_release_selected_regions(pdev,
2318 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002319
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002320 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002321 free_netdev(netdev);
2322
Frans Pop19d5afd2009-10-02 10:04:12 -07002323 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002324
Auke Kok9d5c8242008-01-24 02:22:38 -08002325 pci_disable_device(pdev);
2326}
2327
2328/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002329 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2330 * @adapter: board private structure to initialize
2331 *
2332 * This function initializes the vf specific data storage and then attempts to
2333 * allocate the VFs. The reason for ordering it this way is because it is much
2334 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2335 * the memory for the VFs.
2336 **/
2337static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2338{
2339#ifdef CONFIG_PCI_IOV
2340 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002341 struct e1000_hw *hw = &adapter->hw;
Greg Rose0224d662011-10-14 02:57:14 +00002342 int old_vfs = igb_find_enabled_vfs(adapter);
2343 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002344
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002345 /* Virtualization features not supported on i210 family. */
2346 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2347 return;
2348
Greg Rose0224d662011-10-14 02:57:14 +00002349 if (old_vfs) {
2350 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2351 "max_vfs setting of %d\n", old_vfs, max_vfs);
2352 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002353 }
2354
Greg Rose0224d662011-10-14 02:57:14 +00002355 if (!adapter->vfs_allocated_count)
2356 return;
2357
2358 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2359 sizeof(struct vf_data_storage), GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002360
Greg Rose0224d662011-10-14 02:57:14 +00002361 /* if allocation failed then we do not support SR-IOV */
2362 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002363 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002364 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2365 "Data Storage\n");
2366 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002367 }
Greg Rose0224d662011-10-14 02:57:14 +00002368
2369 if (!old_vfs) {
2370 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2371 goto err_out;
2372 }
2373 dev_info(&pdev->dev, "%d VFs allocated\n",
2374 adapter->vfs_allocated_count);
2375 for (i = 0; i < adapter->vfs_allocated_count; i++)
2376 igb_vf_configure(adapter, i);
2377
2378 /* DMA Coalescing is not supported in IOV mode. */
2379 adapter->flags &= ~IGB_FLAG_DMAC;
2380 goto out;
2381err_out:
2382 kfree(adapter->vf_data);
2383 adapter->vf_data = NULL;
2384 adapter->vfs_allocated_count = 0;
2385out:
2386 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002387#endif /* CONFIG_PCI_IOV */
2388}
2389
Alexander Duyck115f4592009-11-12 18:37:00 +00002390/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002391 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2392 * @adapter: board private structure to initialize
2393 *
2394 * igb_sw_init initializes the Adapter private data structure.
2395 * Fields are initialized based on PCI device information and
2396 * OS network device settings (MTU size).
2397 **/
2398static int __devinit igb_sw_init(struct igb_adapter *adapter)
2399{
2400 struct e1000_hw *hw = &adapter->hw;
2401 struct net_device *netdev = adapter->netdev;
2402 struct pci_dev *pdev = adapter->pdev;
Matthew Vick374a5422012-05-18 04:54:58 +00002403 u32 max_rss_queues;
Auke Kok9d5c8242008-01-24 02:22:38 -08002404
2405 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2406
Alexander Duyck13fde972011-10-05 13:35:24 +00002407 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002408 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2409 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002410
2411 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002412 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2413 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2414
Alexander Duyck13fde972011-10-05 13:35:24 +00002415 /* set default work limits */
2416 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2417
Alexander Duyck153285f2011-08-26 07:43:32 +00002418 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2419 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002420 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2421
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002422 adapter->node = -1;
2423
Eric Dumazet12dcd862010-10-15 17:27:10 +00002424 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002425#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002426 switch (hw->mac.type) {
2427 case e1000_82576:
2428 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002429 if (max_vfs > 7) {
2430 dev_warn(&pdev->dev,
2431 "Maximum of 7 VFs per PF, using max\n");
2432 adapter->vfs_allocated_count = 7;
2433 } else
2434 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002435 break;
2436 default:
2437 break;
2438 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002439#endif /* CONFIG_PCI_IOV */
Matthew Vick374a5422012-05-18 04:54:58 +00002440
2441 /* Determine the maximum number of RSS queues supported. */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002442 switch (hw->mac.type) {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002443 case e1000_i211:
Matthew Vick374a5422012-05-18 04:54:58 +00002444 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002445 break;
Matthew Vick374a5422012-05-18 04:54:58 +00002446 case e1000_82575:
2447 case e1000_i210:
2448 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2449 break;
2450 case e1000_i350:
2451 /* I350 cannot do RSS and SR-IOV at the same time */
2452 if (!!adapter->vfs_allocated_count) {
2453 max_rss_queues = 1;
2454 break;
2455 }
2456 /* fall through */
2457 case e1000_82576:
2458 if (!!adapter->vfs_allocated_count) {
2459 max_rss_queues = 2;
2460 break;
2461 }
2462 /* fall through */
2463 case e1000_82580:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002464 default:
Matthew Vick374a5422012-05-18 04:54:58 +00002465 max_rss_queues = IGB_MAX_RX_QUEUES;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002466 break;
2467 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002468
Matthew Vick374a5422012-05-18 04:54:58 +00002469 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2470
2471 /* Determine if we need to pair queues. */
2472 switch (hw->mac.type) {
2473 case e1000_82575:
2474 case e1000_i211:
2475 /* Device supports enough interrupts without queue pairing. */
2476 break;
2477 case e1000_82576:
2478 /*
2479 * If VFs are going to be allocated with RSS queues then we
2480 * should pair the queues in order to conserve interrupts due
2481 * to limited supply.
2482 */
2483 if ((adapter->rss_queues > 1) &&
2484 (adapter->vfs_allocated_count > 6))
2485 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2486 /* fall through */
2487 case e1000_82580:
2488 case e1000_i350:
2489 case e1000_i210:
2490 default:
2491 /*
2492 * If rss_queues > half of max_rss_queues, pair the queues in
2493 * order to conserve interrupts due to limited supply.
2494 */
2495 if (adapter->rss_queues > (max_rss_queues / 2))
2496 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2497 break;
2498 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002499
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002500 /* Setup and initialize a copy of the hw vlan table array */
2501 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2502 E1000_VLAN_FILTER_TBL_SIZE,
2503 GFP_ATOMIC);
2504
Alexander Duycka6b623e2009-10-27 23:47:53 +00002505 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002506 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002507 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2508 return -ENOMEM;
2509 }
2510
Alexander Duycka6b623e2009-10-27 23:47:53 +00002511 igb_probe_vfs(adapter);
2512
Auke Kok9d5c8242008-01-24 02:22:38 -08002513 /* Explicitly disable IRQ since the NIC can be in any state. */
2514 igb_irq_disable(adapter);
2515
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002516 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002517 adapter->flags &= ~IGB_FLAG_DMAC;
2518
Auke Kok9d5c8242008-01-24 02:22:38 -08002519 set_bit(__IGB_DOWN, &adapter->state);
2520 return 0;
2521}
2522
2523/**
2524 * igb_open - Called when a network interface is made active
2525 * @netdev: network interface device structure
2526 *
2527 * Returns 0 on success, negative value on failure
2528 *
2529 * The open entry point is called when a network interface is made
2530 * active by the system (IFF_UP). At this point all resources needed
2531 * for transmit and receive operations are allocated, the interrupt
2532 * handler is registered with the OS, the watchdog timer is started,
2533 * and the stack is notified that the interface is ready.
2534 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002535static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002536{
2537 struct igb_adapter *adapter = netdev_priv(netdev);
2538 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002539 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002540 int err;
2541 int i;
2542
2543 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002544 if (test_bit(__IGB_TESTING, &adapter->state)) {
2545 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002546 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002547 }
2548
2549 if (!resuming)
2550 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002551
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002552 netif_carrier_off(netdev);
2553
Auke Kok9d5c8242008-01-24 02:22:38 -08002554 /* allocate transmit descriptors */
2555 err = igb_setup_all_tx_resources(adapter);
2556 if (err)
2557 goto err_setup_tx;
2558
2559 /* allocate receive descriptors */
2560 err = igb_setup_all_rx_resources(adapter);
2561 if (err)
2562 goto err_setup_rx;
2563
Nick Nunley88a268c2010-02-17 01:01:59 +00002564 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002565
Auke Kok9d5c8242008-01-24 02:22:38 -08002566 /* before we allocate an interrupt, we must be ready to handle it.
2567 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2568 * as soon as we call pci_request_irq, so we have to setup our
2569 * clean_rx handler before we do so. */
2570 igb_configure(adapter);
2571
2572 err = igb_request_irq(adapter);
2573 if (err)
2574 goto err_req_irq;
2575
2576 /* From here on the code is the same as igb_up() */
2577 clear_bit(__IGB_DOWN, &adapter->state);
2578
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002579 for (i = 0; i < adapter->num_q_vectors; i++)
2580 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002581
2582 /* Clear any pending interrupts. */
2583 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002584
2585 igb_irq_enable(adapter);
2586
Alexander Duyckd4960302009-10-27 15:53:45 +00002587 /* notify VFs that reset has been completed */
2588 if (adapter->vfs_allocated_count) {
2589 u32 reg_data = rd32(E1000_CTRL_EXT);
2590 reg_data |= E1000_CTRL_EXT_PFRSTD;
2591 wr32(E1000_CTRL_EXT, reg_data);
2592 }
2593
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002594 netif_tx_start_all_queues(netdev);
2595
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002596 if (!resuming)
2597 pm_runtime_put(&pdev->dev);
2598
Alexander Duyck25568a52009-10-27 23:49:59 +00002599 /* start the watchdog. */
2600 hw->mac.get_link_status = 1;
2601 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002602
2603 return 0;
2604
2605err_req_irq:
2606 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002607 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002608 igb_free_all_rx_resources(adapter);
2609err_setup_rx:
2610 igb_free_all_tx_resources(adapter);
2611err_setup_tx:
2612 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002613 if (!resuming)
2614 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002615
2616 return err;
2617}
2618
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002619static int igb_open(struct net_device *netdev)
2620{
2621 return __igb_open(netdev, false);
2622}
2623
Auke Kok9d5c8242008-01-24 02:22:38 -08002624/**
2625 * igb_close - Disables a network interface
2626 * @netdev: network interface device structure
2627 *
2628 * Returns 0, this is not allowed to fail
2629 *
2630 * The close entry point is called when an interface is de-activated
2631 * by the OS. The hardware is still under the driver's control, but
2632 * needs to be disabled. A global MAC reset is issued to stop the
2633 * hardware, and all transmit and receive resources are freed.
2634 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002635static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002636{
2637 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002638 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002639
2640 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002641
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002642 if (!suspending)
2643 pm_runtime_get_sync(&pdev->dev);
2644
2645 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002646 igb_free_irq(adapter);
2647
2648 igb_free_all_tx_resources(adapter);
2649 igb_free_all_rx_resources(adapter);
2650
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002651 if (!suspending)
2652 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002653 return 0;
2654}
2655
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002656static int igb_close(struct net_device *netdev)
2657{
2658 return __igb_close(netdev, false);
2659}
2660
Auke Kok9d5c8242008-01-24 02:22:38 -08002661/**
2662 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002663 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2664 *
2665 * Return 0 on success, negative on failure
2666 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002667int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002668{
Alexander Duyck59d71982010-04-27 13:09:25 +00002669 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002670 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002671 int size;
2672
Alexander Duyck06034642011-08-26 07:44:22 +00002673 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002674 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2675 if (!tx_ring->tx_buffer_info)
2676 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002677 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002678 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002679
2680 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002681 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002682 tx_ring->size = ALIGN(tx_ring->size, 4096);
2683
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002684 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002685 tx_ring->desc = dma_alloc_coherent(dev,
2686 tx_ring->size,
2687 &tx_ring->dma,
2688 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002689 set_dev_node(dev, orig_node);
2690 if (!tx_ring->desc)
2691 tx_ring->desc = dma_alloc_coherent(dev,
2692 tx_ring->size,
2693 &tx_ring->dma,
2694 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002695
2696 if (!tx_ring->desc)
2697 goto err;
2698
Auke Kok9d5c8242008-01-24 02:22:38 -08002699 tx_ring->next_to_use = 0;
2700 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002701
Auke Kok9d5c8242008-01-24 02:22:38 -08002702 return 0;
2703
2704err:
Alexander Duyck06034642011-08-26 07:44:22 +00002705 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002706 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002707 "Unable to allocate memory for the transmit descriptor ring\n");
2708 return -ENOMEM;
2709}
2710
2711/**
2712 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2713 * (Descriptors) for all queues
2714 * @adapter: board private structure
2715 *
2716 * Return 0 on success, negative on failure
2717 **/
2718static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2719{
Alexander Duyck439705e2009-10-27 23:49:20 +00002720 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002721 int i, err = 0;
2722
2723 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002724 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002725 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002726 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002727 "Allocation for Tx Queue %u failed\n", i);
2728 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002729 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 break;
2731 }
2732 }
2733
2734 return err;
2735}
2736
2737/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002738 * igb_setup_tctl - configure the transmit control registers
2739 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002740 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002741void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002742{
Auke Kok9d5c8242008-01-24 02:22:38 -08002743 struct e1000_hw *hw = &adapter->hw;
2744 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002745
Alexander Duyck85b430b2009-10-27 15:50:29 +00002746 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2747 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002748
2749 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002750 tctl = rd32(E1000_TCTL);
2751 tctl &= ~E1000_TCTL_CT;
2752 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2753 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2754
2755 igb_config_collision_dist(hw);
2756
Auke Kok9d5c8242008-01-24 02:22:38 -08002757 /* Enable transmits */
2758 tctl |= E1000_TCTL_EN;
2759
2760 wr32(E1000_TCTL, tctl);
2761}
2762
2763/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002764 * igb_configure_tx_ring - Configure transmit ring after Reset
2765 * @adapter: board private structure
2766 * @ring: tx ring to configure
2767 *
2768 * Configure a transmit ring after a reset.
2769 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002770void igb_configure_tx_ring(struct igb_adapter *adapter,
2771 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002772{
2773 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002774 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002775 u64 tdba = ring->dma;
2776 int reg_idx = ring->reg_idx;
2777
2778 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002779 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002780 wrfl();
2781 mdelay(10);
2782
2783 wr32(E1000_TDLEN(reg_idx),
2784 ring->count * sizeof(union e1000_adv_tx_desc));
2785 wr32(E1000_TDBAL(reg_idx),
2786 tdba & 0x00000000ffffffffULL);
2787 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2788
Alexander Duyckfce99e32009-10-27 15:51:27 +00002789 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002790 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002791 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002792
2793 txdctl |= IGB_TX_PTHRESH;
2794 txdctl |= IGB_TX_HTHRESH << 8;
2795 txdctl |= IGB_TX_WTHRESH << 16;
2796
2797 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2798 wr32(E1000_TXDCTL(reg_idx), txdctl);
2799}
2800
2801/**
2802 * igb_configure_tx - Configure transmit Unit after Reset
2803 * @adapter: board private structure
2804 *
2805 * Configure the Tx unit of the MAC after a reset.
2806 **/
2807static void igb_configure_tx(struct igb_adapter *adapter)
2808{
2809 int i;
2810
2811 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002812 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002813}
2814
2815/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002816 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002817 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2818 *
2819 * Returns 0 on success, negative on failure
2820 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002821int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002822{
Alexander Duyck59d71982010-04-27 13:09:25 +00002823 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002824 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002825 int size, desc_len;
2826
Alexander Duyck06034642011-08-26 07:44:22 +00002827 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002828 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2829 if (!rx_ring->rx_buffer_info)
2830 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002831 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002832 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002833
2834 desc_len = sizeof(union e1000_adv_rx_desc);
2835
2836 /* Round up to nearest 4K */
2837 rx_ring->size = rx_ring->count * desc_len;
2838 rx_ring->size = ALIGN(rx_ring->size, 4096);
2839
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002840 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002841 rx_ring->desc = dma_alloc_coherent(dev,
2842 rx_ring->size,
2843 &rx_ring->dma,
2844 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002845 set_dev_node(dev, orig_node);
2846 if (!rx_ring->desc)
2847 rx_ring->desc = dma_alloc_coherent(dev,
2848 rx_ring->size,
2849 &rx_ring->dma,
2850 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002851
2852 if (!rx_ring->desc)
2853 goto err;
2854
2855 rx_ring->next_to_clean = 0;
2856 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002857
Auke Kok9d5c8242008-01-24 02:22:38 -08002858 return 0;
2859
2860err:
Alexander Duyck06034642011-08-26 07:44:22 +00002861 vfree(rx_ring->rx_buffer_info);
2862 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002863 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2864 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002865 return -ENOMEM;
2866}
2867
2868/**
2869 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2870 * (Descriptors) for all queues
2871 * @adapter: board private structure
2872 *
2873 * Return 0 on success, negative on failure
2874 **/
2875static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2876{
Alexander Duyck439705e2009-10-27 23:49:20 +00002877 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002878 int i, err = 0;
2879
2880 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002881 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002882 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002883 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002884 "Allocation for Rx Queue %u failed\n", i);
2885 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002886 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002887 break;
2888 }
2889 }
2890
2891 return err;
2892}
2893
2894/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002895 * igb_setup_mrqc - configure the multiple receive queue control registers
2896 * @adapter: Board private structure
2897 **/
2898static void igb_setup_mrqc(struct igb_adapter *adapter)
2899{
2900 struct e1000_hw *hw = &adapter->hw;
2901 u32 mrqc, rxcsum;
2902 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2903 union e1000_reta {
2904 u32 dword;
2905 u8 bytes[4];
2906 } reta;
2907 static const u8 rsshash[40] = {
2908 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2909 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2910 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2911 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2912
2913 /* Fill out hash function seeds */
2914 for (j = 0; j < 10; j++) {
2915 u32 rsskey = rsshash[(j * 4)];
2916 rsskey |= rsshash[(j * 4) + 1] << 8;
2917 rsskey |= rsshash[(j * 4) + 2] << 16;
2918 rsskey |= rsshash[(j * 4) + 3] << 24;
2919 array_wr32(E1000_RSSRK(0), j, rsskey);
2920 }
2921
Alexander Duycka99955f2009-11-12 18:37:19 +00002922 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002923
2924 if (adapter->vfs_allocated_count) {
2925 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2926 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002927 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002928 case e1000_82580:
2929 num_rx_queues = 1;
2930 shift = 0;
2931 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002932 case e1000_82576:
2933 shift = 3;
2934 num_rx_queues = 2;
2935 break;
2936 case e1000_82575:
2937 shift = 2;
2938 shift2 = 6;
2939 default:
2940 break;
2941 }
2942 } else {
2943 if (hw->mac.type == e1000_82575)
2944 shift = 6;
2945 }
2946
2947 for (j = 0; j < (32 * 4); j++) {
2948 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2949 if (shift2)
2950 reta.bytes[j & 3] |= num_rx_queues << shift2;
2951 if ((j & 3) == 3)
2952 wr32(E1000_RETA(j >> 2), reta.dword);
2953 }
2954
2955 /*
2956 * Disable raw packet checksumming so that RSS hash is placed in
2957 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2958 * offloads as they are enabled by default
2959 */
2960 rxcsum = rd32(E1000_RXCSUM);
2961 rxcsum |= E1000_RXCSUM_PCSD;
2962
2963 if (adapter->hw.mac.type >= e1000_82576)
2964 /* Enable Receive Checksum Offload for SCTP */
2965 rxcsum |= E1000_RXCSUM_CRCOFL;
2966
2967 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2968 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002969 /*
2970 * Generate RSS hash based on TCP port numbers and/or
2971 * IPv4/v6 src and dst addresses since UDP cannot be
2972 * hashed reliably due to IP fragmentation
2973 */
2974
2975 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2976 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2977 E1000_MRQC_RSS_FIELD_IPV6 |
2978 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2979 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002980
2981 /* If VMDq is enabled then we set the appropriate mode for that, else
2982 * we default to RSS so that an RSS hash is calculated per packet even
2983 * if we are only using one queue */
2984 if (adapter->vfs_allocated_count) {
2985 if (hw->mac.type > e1000_82575) {
2986 /* Set the default pool for the PF's first queue */
2987 u32 vtctl = rd32(E1000_VT_CTL);
2988 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2989 E1000_VT_CTL_DISABLE_DEF_POOL);
2990 vtctl |= adapter->vfs_allocated_count <<
2991 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2992 wr32(E1000_VT_CTL, vtctl);
2993 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002994 if (adapter->rss_queues > 1)
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002995 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002996 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002997 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002998 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002999 if (hw->mac.type != e1000_i211)
3000 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00003001 }
3002 igb_vmm_control(adapter);
3003
Alexander Duyck06cf2662009-10-27 15:53:25 +00003004 wr32(E1000_MRQC, mrqc);
3005}
3006
3007/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003008 * igb_setup_rctl - configure the receive control registers
3009 * @adapter: Board private structure
3010 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003011void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003012{
3013 struct e1000_hw *hw = &adapter->hw;
3014 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08003015
3016 rctl = rd32(E1000_RCTL);
3017
3018 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08003019 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003020
Alexander Duyck69d728b2008-11-25 01:04:03 -08003021 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00003022 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08003023
Auke Kok87cb7e82008-07-08 15:08:29 -07003024 /*
3025 * enable stripping of CRC. It's unlikely this will break BMC
3026 * redirection as it did with e1000. Newer features require
3027 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003028 */
Auke Kok87cb7e82008-07-08 15:08:29 -07003029 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08003030
Alexander Duyck559e9c42009-10-27 23:52:50 +00003031 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08003032 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08003033
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003034 /* enable LPE to prevent packets larger than max_frame_size */
3035 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08003036
Alexander Duyck952f72a2009-10-27 15:51:07 +00003037 /* disable queue 0 to prevent tail write w/o re-config */
3038 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003039
Alexander Duycke1739522009-02-19 20:39:44 -08003040 /* Attention!!! For SR-IOV PF driver operations you must enable
3041 * queue drop for all VF and PF queues to prevent head of line blocking
3042 * if an un-trusted VF does not provide descriptors to hardware.
3043 */
3044 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003045 /* set all queue drop enable bits */
3046 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003047 }
3048
Ben Greear89eaefb2012-03-06 09:41:58 +00003049 /* This is useful for sniffing bad packets. */
3050 if (adapter->netdev->features & NETIF_F_RXALL) {
3051 /* UPE and MPE will be handled by normal PROMISC logic
3052 * in e1000e_set_rx_mode */
3053 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3054 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3055 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3056
3057 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3058 E1000_RCTL_DPF | /* Allow filtered pause */
3059 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3060 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3061 * and that breaks VLANs.
3062 */
3063 }
3064
Auke Kok9d5c8242008-01-24 02:22:38 -08003065 wr32(E1000_RCTL, rctl);
3066}
3067
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003068static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3069 int vfn)
3070{
3071 struct e1000_hw *hw = &adapter->hw;
3072 u32 vmolr;
3073
3074 /* if it isn't the PF check to see if VFs are enabled and
3075 * increase the size to support vlan tags */
3076 if (vfn < adapter->vfs_allocated_count &&
3077 adapter->vf_data[vfn].vlans_enabled)
3078 size += VLAN_TAG_SIZE;
3079
3080 vmolr = rd32(E1000_VMOLR(vfn));
3081 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3082 vmolr |= size | E1000_VMOLR_LPE;
3083 wr32(E1000_VMOLR(vfn), vmolr);
3084
3085 return 0;
3086}
3087
Auke Kok9d5c8242008-01-24 02:22:38 -08003088/**
Alexander Duycke1739522009-02-19 20:39:44 -08003089 * igb_rlpml_set - set maximum receive packet size
3090 * @adapter: board private structure
3091 *
3092 * Configure maximum receivable packet size.
3093 **/
3094static void igb_rlpml_set(struct igb_adapter *adapter)
3095{
Alexander Duyck153285f2011-08-26 07:43:32 +00003096 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003097 struct e1000_hw *hw = &adapter->hw;
3098 u16 pf_id = adapter->vfs_allocated_count;
3099
Alexander Duycke1739522009-02-19 20:39:44 -08003100 if (pf_id) {
3101 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003102 /*
3103 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3104 * to our max jumbo frame size, in case we need to enable
3105 * jumbo frames on one of the rings later.
3106 * This will not pass over-length frames into the default
3107 * queue because it's gated by the VMOLR.RLPML.
3108 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003109 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003110 }
3111
3112 wr32(E1000_RLPML, max_frame_size);
3113}
3114
Williams, Mitch A8151d292010-02-10 01:44:24 +00003115static inline void igb_set_vmolr(struct igb_adapter *adapter,
3116 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003117{
3118 struct e1000_hw *hw = &adapter->hw;
3119 u32 vmolr;
3120
3121 /*
3122 * This register exists only on 82576 and newer so if we are older then
3123 * we should exit and do nothing
3124 */
3125 if (hw->mac.type < e1000_82576)
3126 return;
3127
3128 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003129 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3130 if (aupe)
3131 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3132 else
3133 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003134
3135 /* clear all bits that might not be set */
3136 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3137
Alexander Duycka99955f2009-11-12 18:37:19 +00003138 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003139 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3140 /*
3141 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3142 * multicast packets
3143 */
3144 if (vfn <= adapter->vfs_allocated_count)
3145 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3146
3147 wr32(E1000_VMOLR(vfn), vmolr);
3148}
3149
Alexander Duycke1739522009-02-19 20:39:44 -08003150/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003151 * igb_configure_rx_ring - Configure a receive ring after Reset
3152 * @adapter: board private structure
3153 * @ring: receive ring to be configured
3154 *
3155 * Configure the Rx unit of the MAC after a reset.
3156 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003157void igb_configure_rx_ring(struct igb_adapter *adapter,
3158 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003159{
3160 struct e1000_hw *hw = &adapter->hw;
3161 u64 rdba = ring->dma;
3162 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003163 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003164
3165 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003166 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003167
3168 /* Set DMA base address registers */
3169 wr32(E1000_RDBAL(reg_idx),
3170 rdba & 0x00000000ffffffffULL);
3171 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3172 wr32(E1000_RDLEN(reg_idx),
3173 ring->count * sizeof(union e1000_adv_rx_desc));
3174
3175 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003176 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003177 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003178 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003179
Alexander Duyck952f72a2009-10-27 15:51:07 +00003180 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003181 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003182#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003183 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003184#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003185 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003186#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003187 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003188 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003189 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003190 /* Only set Drop Enable if we are supporting multiple queues */
3191 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3192 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003193
3194 wr32(E1000_SRRCTL(reg_idx), srrctl);
3195
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003196 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003197 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003198
Alexander Duyck85b430b2009-10-27 15:50:29 +00003199 rxdctl |= IGB_RX_PTHRESH;
3200 rxdctl |= IGB_RX_HTHRESH << 8;
3201 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003202
3203 /* enable receive descriptor fetching */
3204 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003205 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3206}
3207
3208/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003209 * igb_configure_rx - Configure receive Unit after Reset
3210 * @adapter: board private structure
3211 *
3212 * Configure the Rx unit of the MAC after a reset.
3213 **/
3214static void igb_configure_rx(struct igb_adapter *adapter)
3215{
Hannes Eder91075842009-02-18 19:36:04 -08003216 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003217
Alexander Duyck68d480c2009-10-05 06:33:08 +00003218 /* set UTA to appropriate mode */
3219 igb_set_uta(adapter);
3220
Alexander Duyck26ad9172009-10-05 06:32:49 +00003221 /* set the correct pool for the PF default MAC address in entry 0 */
3222 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3223 adapter->vfs_allocated_count);
3224
Alexander Duyck06cf2662009-10-27 15:53:25 +00003225 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3226 * the Base and Length of the Rx Descriptor Ring */
3227 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003228 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003229}
3230
3231/**
3232 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003233 * @tx_ring: Tx descriptor ring for a specific queue
3234 *
3235 * Free all transmit software resources
3236 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003237void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003238{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003239 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003240
Alexander Duyck06034642011-08-26 07:44:22 +00003241 vfree(tx_ring->tx_buffer_info);
3242 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003243
Alexander Duyck439705e2009-10-27 23:49:20 +00003244 /* if not set, then don't free */
3245 if (!tx_ring->desc)
3246 return;
3247
Alexander Duyck59d71982010-04-27 13:09:25 +00003248 dma_free_coherent(tx_ring->dev, tx_ring->size,
3249 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003250
3251 tx_ring->desc = NULL;
3252}
3253
3254/**
3255 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3256 * @adapter: board private structure
3257 *
3258 * Free all transmit software resources
3259 **/
3260static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3261{
3262 int i;
3263
3264 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003265 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003266}
3267
Alexander Duyckebe42d12011-08-26 07:45:09 +00003268void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3269 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003270{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003271 if (tx_buffer->skb) {
3272 dev_kfree_skb_any(tx_buffer->skb);
3273 if (tx_buffer->dma)
3274 dma_unmap_single(ring->dev,
3275 tx_buffer->dma,
3276 tx_buffer->length,
3277 DMA_TO_DEVICE);
3278 } else if (tx_buffer->dma) {
3279 dma_unmap_page(ring->dev,
3280 tx_buffer->dma,
3281 tx_buffer->length,
3282 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003283 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003284 tx_buffer->next_to_watch = NULL;
3285 tx_buffer->skb = NULL;
3286 tx_buffer->dma = 0;
3287 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003288}
3289
3290/**
3291 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003292 * @tx_ring: ring to be cleaned
3293 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003294static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003295{
Alexander Duyck06034642011-08-26 07:44:22 +00003296 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003297 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003298 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003299
Alexander Duyck06034642011-08-26 07:44:22 +00003300 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003301 return;
3302 /* Free all the Tx ring sk_buffs */
3303
3304 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003305 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003306 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003307 }
3308
John Fastabenddad8a3b2012-04-23 12:22:39 +00003309 netdev_tx_reset_queue(txring_txq(tx_ring));
3310
Alexander Duyck06034642011-08-26 07:44:22 +00003311 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3312 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003313
3314 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003315 memset(tx_ring->desc, 0, tx_ring->size);
3316
3317 tx_ring->next_to_use = 0;
3318 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003319}
3320
3321/**
3322 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3323 * @adapter: board private structure
3324 **/
3325static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3326{
3327 int i;
3328
3329 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003330 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003331}
3332
3333/**
3334 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003335 * @rx_ring: ring to clean the resources from
3336 *
3337 * Free all receive software resources
3338 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003339void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003340{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003341 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003342
Alexander Duyck06034642011-08-26 07:44:22 +00003343 vfree(rx_ring->rx_buffer_info);
3344 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003345
Alexander Duyck439705e2009-10-27 23:49:20 +00003346 /* if not set, then don't free */
3347 if (!rx_ring->desc)
3348 return;
3349
Alexander Duyck59d71982010-04-27 13:09:25 +00003350 dma_free_coherent(rx_ring->dev, rx_ring->size,
3351 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003352
3353 rx_ring->desc = NULL;
3354}
3355
3356/**
3357 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3358 * @adapter: board private structure
3359 *
3360 * Free all receive software resources
3361 **/
3362static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3363{
3364 int i;
3365
3366 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003367 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003368}
3369
3370/**
3371 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003372 * @rx_ring: ring to free buffers from
3373 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003374static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003375{
Auke Kok9d5c8242008-01-24 02:22:38 -08003376 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003377 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003378
Alexander Duyck06034642011-08-26 07:44:22 +00003379 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003380 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003381
Auke Kok9d5c8242008-01-24 02:22:38 -08003382 /* Free all the Rx ring sk_buffs */
3383 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003384 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003385 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003386 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003387 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003388 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003389 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003390 buffer_info->dma = 0;
3391 }
3392
3393 if (buffer_info->skb) {
3394 dev_kfree_skb(buffer_info->skb);
3395 buffer_info->skb = NULL;
3396 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003397 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003398 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003399 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003400 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003401 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003402 buffer_info->page_dma = 0;
3403 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003404 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003405 put_page(buffer_info->page);
3406 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003407 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003408 }
3409 }
3410
Alexander Duyck06034642011-08-26 07:44:22 +00003411 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3412 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003413
3414 /* Zero out the descriptor ring */
3415 memset(rx_ring->desc, 0, rx_ring->size);
3416
3417 rx_ring->next_to_clean = 0;
3418 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003419}
3420
3421/**
3422 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3423 * @adapter: board private structure
3424 **/
3425static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3426{
3427 int i;
3428
3429 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003430 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003431}
3432
3433/**
3434 * igb_set_mac - Change the Ethernet Address of the NIC
3435 * @netdev: network interface device structure
3436 * @p: pointer to an address structure
3437 *
3438 * Returns 0 on success, negative on failure
3439 **/
3440static int igb_set_mac(struct net_device *netdev, void *p)
3441{
3442 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003443 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003444 struct sockaddr *addr = p;
3445
3446 if (!is_valid_ether_addr(addr->sa_data))
3447 return -EADDRNOTAVAIL;
3448
3449 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003450 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003451
Alexander Duyck26ad9172009-10-05 06:32:49 +00003452 /* set the correct pool for the new PF MAC address in entry 0 */
3453 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3454 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003455
Auke Kok9d5c8242008-01-24 02:22:38 -08003456 return 0;
3457}
3458
3459/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003460 * igb_write_mc_addr_list - write multicast addresses to MTA
3461 * @netdev: network interface device structure
3462 *
3463 * Writes multicast address list to the MTA hash table.
3464 * Returns: -ENOMEM on failure
3465 * 0 on no addresses written
3466 * X on writing X addresses to MTA
3467 **/
3468static int igb_write_mc_addr_list(struct net_device *netdev)
3469{
3470 struct igb_adapter *adapter = netdev_priv(netdev);
3471 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003472 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003473 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003474 int i;
3475
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003476 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003477 /* nothing to program, so clear mc list */
3478 igb_update_mc_addr_list(hw, NULL, 0);
3479 igb_restore_vf_multicasts(adapter);
3480 return 0;
3481 }
3482
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003483 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003484 if (!mta_list)
3485 return -ENOMEM;
3486
Alexander Duyck68d480c2009-10-05 06:33:08 +00003487 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003488 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003489 netdev_for_each_mc_addr(ha, netdev)
3490 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003491
Alexander Duyck68d480c2009-10-05 06:33:08 +00003492 igb_update_mc_addr_list(hw, mta_list, i);
3493 kfree(mta_list);
3494
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003495 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003496}
3497
3498/**
3499 * igb_write_uc_addr_list - write unicast addresses to RAR table
3500 * @netdev: network interface device structure
3501 *
3502 * Writes unicast address list to the RAR table.
3503 * Returns: -ENOMEM on failure/insufficient address space
3504 * 0 on no addresses written
3505 * X on writing X addresses to the RAR table
3506 **/
3507static int igb_write_uc_addr_list(struct net_device *netdev)
3508{
3509 struct igb_adapter *adapter = netdev_priv(netdev);
3510 struct e1000_hw *hw = &adapter->hw;
3511 unsigned int vfn = adapter->vfs_allocated_count;
3512 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3513 int count = 0;
3514
3515 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003516 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003517 return -ENOMEM;
3518
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003519 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003520 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003521
3522 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003523 if (!rar_entries)
3524 break;
3525 igb_rar_set_qsel(adapter, ha->addr,
3526 rar_entries--,
3527 vfn);
3528 count++;
3529 }
3530 }
3531 /* write the addresses in reverse order to avoid write combining */
3532 for (; rar_entries > 0 ; rar_entries--) {
3533 wr32(E1000_RAH(rar_entries), 0);
3534 wr32(E1000_RAL(rar_entries), 0);
3535 }
3536 wrfl();
3537
3538 return count;
3539}
3540
3541/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003542 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003543 * @netdev: network interface device structure
3544 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003545 * The set_rx_mode entry point is called whenever the unicast or multicast
3546 * address lists or the network interface flags are updated. This routine is
3547 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003548 * promiscuous mode, and all-multi behavior.
3549 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003550static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003551{
3552 struct igb_adapter *adapter = netdev_priv(netdev);
3553 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003554 unsigned int vfn = adapter->vfs_allocated_count;
3555 u32 rctl, vmolr = 0;
3556 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003557
3558 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003559 rctl = rd32(E1000_RCTL);
3560
Alexander Duyck68d480c2009-10-05 06:33:08 +00003561 /* clear the effected bits */
3562 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3563
Patrick McHardy746b9f02008-07-16 20:15:45 -07003564 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003565 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003566 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003567 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003568 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003569 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003570 vmolr |= E1000_VMOLR_MPME;
3571 } else {
3572 /*
3573 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003574 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003575 * that we can at least receive multicast traffic
3576 */
3577 count = igb_write_mc_addr_list(netdev);
3578 if (count < 0) {
3579 rctl |= E1000_RCTL_MPE;
3580 vmolr |= E1000_VMOLR_MPME;
3581 } else if (count) {
3582 vmolr |= E1000_VMOLR_ROMPE;
3583 }
3584 }
3585 /*
3586 * Write addresses to available RAR registers, if there is not
3587 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003588 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003589 */
3590 count = igb_write_uc_addr_list(netdev);
3591 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003592 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003593 vmolr |= E1000_VMOLR_ROPE;
3594 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003595 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003596 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003597 wr32(E1000_RCTL, rctl);
3598
Alexander Duyck68d480c2009-10-05 06:33:08 +00003599 /*
3600 * In order to support SR-IOV and eventually VMDq it is necessary to set
3601 * the VMOLR to enable the appropriate modes. Without this workaround
3602 * we will have issues with VLAN tag stripping not being done for frames
3603 * that are only arriving because we are the default pool
3604 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003605 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003606 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003607
Alexander Duyck68d480c2009-10-05 06:33:08 +00003608 vmolr |= rd32(E1000_VMOLR(vfn)) &
3609 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3610 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003611 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003612}
3613
Greg Rose13800462010-11-06 02:08:26 +00003614static void igb_check_wvbr(struct igb_adapter *adapter)
3615{
3616 struct e1000_hw *hw = &adapter->hw;
3617 u32 wvbr = 0;
3618
3619 switch (hw->mac.type) {
3620 case e1000_82576:
3621 case e1000_i350:
3622 if (!(wvbr = rd32(E1000_WVBR)))
3623 return;
3624 break;
3625 default:
3626 break;
3627 }
3628
3629 adapter->wvbr |= wvbr;
3630}
3631
3632#define IGB_STAGGERED_QUEUE_OFFSET 8
3633
3634static void igb_spoof_check(struct igb_adapter *adapter)
3635{
3636 int j;
3637
3638 if (!adapter->wvbr)
3639 return;
3640
3641 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3642 if (adapter->wvbr & (1 << j) ||
3643 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3644 dev_warn(&adapter->pdev->dev,
3645 "Spoof event(s) detected on VF %d\n", j);
3646 adapter->wvbr &=
3647 ~((1 << j) |
3648 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3649 }
3650 }
3651}
3652
Auke Kok9d5c8242008-01-24 02:22:38 -08003653/* Need to wait a few seconds after link up to get diagnostic information from
3654 * the phy */
3655static void igb_update_phy_info(unsigned long data)
3656{
3657 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003658 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003659}
3660
3661/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003662 * igb_has_link - check shared code for link and determine up/down
3663 * @adapter: pointer to driver private info
3664 **/
Nick Nunley31455352010-02-17 01:01:21 +00003665bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003666{
3667 struct e1000_hw *hw = &adapter->hw;
3668 bool link_active = false;
3669 s32 ret_val = 0;
3670
3671 /* get_link_status is set on LSC (link status) interrupt or
3672 * rx sequence error interrupt. get_link_status will stay
3673 * false until the e1000_check_for_link establishes link
3674 * for copper adapters ONLY
3675 */
3676 switch (hw->phy.media_type) {
3677 case e1000_media_type_copper:
3678 if (hw->mac.get_link_status) {
3679 ret_val = hw->mac.ops.check_for_link(hw);
3680 link_active = !hw->mac.get_link_status;
3681 } else {
3682 link_active = true;
3683 }
3684 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003685 case e1000_media_type_internal_serdes:
3686 ret_val = hw->mac.ops.check_for_link(hw);
3687 link_active = hw->mac.serdes_has_link;
3688 break;
3689 default:
3690 case e1000_media_type_unknown:
3691 break;
3692 }
3693
3694 return link_active;
3695}
3696
Stefan Assmann563988d2011-04-05 04:27:15 +00003697static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3698{
3699 bool ret = false;
3700 u32 ctrl_ext, thstat;
3701
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003702 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00003703 if (hw->mac.type == e1000_i350) {
3704 thstat = rd32(E1000_THSTAT);
3705 ctrl_ext = rd32(E1000_CTRL_EXT);
3706
3707 if ((hw->phy.media_type == e1000_media_type_copper) &&
3708 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3709 ret = !!(thstat & event);
3710 }
3711 }
3712
3713 return ret;
3714}
3715
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003716/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003717 * igb_watchdog - Timer Call-back
3718 * @data: pointer to adapter cast into an unsigned long
3719 **/
3720static void igb_watchdog(unsigned long data)
3721{
3722 struct igb_adapter *adapter = (struct igb_adapter *)data;
3723 /* Do the rest outside of interrupt context */
3724 schedule_work(&adapter->watchdog_task);
3725}
3726
3727static void igb_watchdog_task(struct work_struct *work)
3728{
3729 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003730 struct igb_adapter,
3731 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003732 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003733 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003734 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003735 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003736
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003737 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003738 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003739 /* Cancel scheduled suspend requests. */
3740 pm_runtime_resume(netdev->dev.parent);
3741
Auke Kok9d5c8242008-01-24 02:22:38 -08003742 if (!netif_carrier_ok(netdev)) {
3743 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003744 hw->mac.ops.get_speed_and_duplex(hw,
3745 &adapter->link_speed,
3746 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003747
3748 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003749 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003750 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3751 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003752 netdev->name,
3753 adapter->link_speed,
3754 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003755 "Full" : "Half",
3756 (ctrl & E1000_CTRL_TFCE) &&
3757 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3758 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3759 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003760
Stefan Assmann563988d2011-04-05 04:27:15 +00003761 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003762 if (igb_thermal_sensor_event(hw,
3763 E1000_THSTAT_LINK_THROTTLE)) {
3764 netdev_info(netdev, "The network adapter link "
3765 "speed was downshifted because it "
3766 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003767 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003768
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003769 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003770 adapter->tx_timeout_factor = 1;
3771 switch (adapter->link_speed) {
3772 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003773 adapter->tx_timeout_factor = 14;
3774 break;
3775 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 /* maybe add some timeout factor ? */
3777 break;
3778 }
3779
3780 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003781
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003782 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003783 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003784
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003785 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003786 if (!test_bit(__IGB_DOWN, &adapter->state))
3787 mod_timer(&adapter->phy_info_timer,
3788 round_jiffies(jiffies + 2 * HZ));
3789 }
3790 } else {
3791 if (netif_carrier_ok(netdev)) {
3792 adapter->link_speed = 0;
3793 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003794
3795 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003796 if (igb_thermal_sensor_event(hw,
3797 E1000_THSTAT_PWR_DOWN)) {
3798 netdev_err(netdev, "The network adapter was "
3799 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003800 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003801
Alexander Duyck527d47c2008-11-27 00:21:39 -08003802 /* Links status message must follow this format */
3803 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3804 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003805 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003806
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003807 igb_ping_all_vfs(adapter);
3808
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003809 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003810 if (!test_bit(__IGB_DOWN, &adapter->state))
3811 mod_timer(&adapter->phy_info_timer,
3812 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003813
3814 pm_schedule_suspend(netdev->dev.parent,
3815 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003816 }
3817 }
3818
Eric Dumazet12dcd862010-10-15 17:27:10 +00003819 spin_lock(&adapter->stats64_lock);
3820 igb_update_stats(adapter, &adapter->stats64);
3821 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003822
Alexander Duyckdbabb062009-11-12 18:38:16 +00003823 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003824 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003825 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003826 /* We've lost link, so the controller stops DMA,
3827 * but we've got queued Tx work that's never going
3828 * to get done, so reset controller to flush Tx.
3829 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003830 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3831 adapter->tx_timeout_count++;
3832 schedule_work(&adapter->reset_task);
3833 /* return immediately since reset is imminent */
3834 return;
3835 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003836 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003837
Alexander Duyckdbabb062009-11-12 18:38:16 +00003838 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003839 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003840 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003841
Auke Kok9d5c8242008-01-24 02:22:38 -08003842 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003843 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003844 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003845 for (i = 0; i < adapter->num_q_vectors; i++)
3846 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003847 wr32(E1000_EICS, eics);
3848 } else {
3849 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3850 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003851
Greg Rose13800462010-11-06 02:08:26 +00003852 igb_spoof_check(adapter);
3853
Auke Kok9d5c8242008-01-24 02:22:38 -08003854 /* Reset the timer */
3855 if (!test_bit(__IGB_DOWN, &adapter->state))
3856 mod_timer(&adapter->watchdog_timer,
3857 round_jiffies(jiffies + 2 * HZ));
3858}
3859
3860enum latency_range {
3861 lowest_latency = 0,
3862 low_latency = 1,
3863 bulk_latency = 2,
3864 latency_invalid = 255
3865};
3866
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003867/**
3868 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3869 *
3870 * Stores a new ITR value based on strictly on packet size. This
3871 * algorithm is less sophisticated than that used in igb_update_itr,
3872 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003873 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003874 * were determined based on theoretical maximum wire speed and testing
3875 * data, in order to minimize response time while increasing bulk
3876 * throughput.
3877 * This functionality is controlled by the InterruptThrottleRate module
3878 * parameter (see igb_param.c)
3879 * NOTE: This function is called only when operating in a multiqueue
3880 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003881 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003882 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003883static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003884{
Alexander Duyck047e0032009-10-27 15:49:27 +00003885 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003886 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003887 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003888 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003889
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003890 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3891 * ints/sec - ITR timer value of 120 ticks.
3892 */
3893 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003894 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003895 goto set_itr_val;
3896 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003897
Alexander Duyck0ba82992011-08-26 07:45:47 +00003898 packets = q_vector->rx.total_packets;
3899 if (packets)
3900 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003901
Alexander Duyck0ba82992011-08-26 07:45:47 +00003902 packets = q_vector->tx.total_packets;
3903 if (packets)
3904 avg_wire_size = max_t(u32, avg_wire_size,
3905 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003906
3907 /* if avg_wire_size isn't set no work was done */
3908 if (!avg_wire_size)
3909 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003910
3911 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3912 avg_wire_size += 24;
3913
3914 /* Don't starve jumbo frames */
3915 avg_wire_size = min(avg_wire_size, 3000);
3916
3917 /* Give a little boost to mid-size frames */
3918 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3919 new_val = avg_wire_size / 3;
3920 else
3921 new_val = avg_wire_size / 2;
3922
Alexander Duyck0ba82992011-08-26 07:45:47 +00003923 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3924 if (new_val < IGB_20K_ITR &&
3925 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3926 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3927 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003928
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003929set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003930 if (new_val != q_vector->itr_val) {
3931 q_vector->itr_val = new_val;
3932 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003934clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003935 q_vector->rx.total_bytes = 0;
3936 q_vector->rx.total_packets = 0;
3937 q_vector->tx.total_bytes = 0;
3938 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003939}
3940
3941/**
3942 * igb_update_itr - update the dynamic ITR value based on statistics
3943 * Stores a new ITR value based on packets and byte
3944 * counts during the last interrupt. The advantage of per interrupt
3945 * computation is faster updates and more accurate ITR for the current
3946 * traffic pattern. Constants in this function were computed
3947 * based on theoretical maximum wire speed and thresholds were set based
3948 * on testing data as well as attempting to minimize response time
3949 * while increasing bulk throughput.
3950 * this functionality is controlled by the InterruptThrottleRate module
3951 * parameter (see igb_param.c)
3952 * NOTE: These calculations are only valid when operating in a single-
3953 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003954 * @q_vector: pointer to q_vector
3955 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003956 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003957static void igb_update_itr(struct igb_q_vector *q_vector,
3958 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003959{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003960 unsigned int packets = ring_container->total_packets;
3961 unsigned int bytes = ring_container->total_bytes;
3962 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003963
Alexander Duyck0ba82992011-08-26 07:45:47 +00003964 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003965 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003966 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003967
Alexander Duyck0ba82992011-08-26 07:45:47 +00003968 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003969 case lowest_latency:
3970 /* handle TSO and jumbo frames */
3971 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003972 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003973 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003974 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003975 break;
3976 case low_latency: /* 50 usec aka 20000 ints/s */
3977 if (bytes > 10000) {
3978 /* this if handles the TSO accounting */
3979 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003980 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003981 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003982 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003983 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003984 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003985 }
3986 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003987 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003988 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003989 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003990 }
3991 break;
3992 case bulk_latency: /* 250 usec aka 4000 ints/s */
3993 if (bytes > 25000) {
3994 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003995 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003996 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003997 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003998 }
3999 break;
4000 }
4001
Alexander Duyck0ba82992011-08-26 07:45:47 +00004002 /* clear work counters since we have the values we need */
4003 ring_container->total_bytes = 0;
4004 ring_container->total_packets = 0;
4005
4006 /* write updated itr to ring container */
4007 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08004008}
4009
Alexander Duyck0ba82992011-08-26 07:45:47 +00004010static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004011{
Alexander Duyck0ba82992011-08-26 07:45:47 +00004012 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004013 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004014 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004015
4016 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4017 if (adapter->link_speed != SPEED_1000) {
4018 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00004019 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08004020 goto set_itr_now;
4021 }
4022
Alexander Duyck0ba82992011-08-26 07:45:47 +00004023 igb_update_itr(q_vector, &q_vector->tx);
4024 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004025
Alexander Duyck0ba82992011-08-26 07:45:47 +00004026 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08004027
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004028 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00004029 if (current_itr == lowest_latency &&
4030 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4031 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07004032 current_itr = low_latency;
4033
Auke Kok9d5c8242008-01-24 02:22:38 -08004034 switch (current_itr) {
4035 /* counts and packets in update_itr are dependent on these numbers */
4036 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004037 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004038 break;
4039 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004040 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004041 break;
4042 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00004043 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08004044 break;
4045 default:
4046 break;
4047 }
4048
4049set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00004050 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004051 /* this attempts to bias the interrupt rate towards Bulk
4052 * by adding intermediate steps when interrupt rate is
4053 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00004054 new_itr = new_itr > q_vector->itr_val ?
4055 max((new_itr * q_vector->itr_val) /
4056 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00004057 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08004058 new_itr;
4059 /* Don't write the value here; it resets the adapter's
4060 * internal timer, and causes us to delay far longer than
4061 * we should between interrupts. Instead, we write the ITR
4062 * value at the beginning of the next interrupt so the timing
4063 * ends up being correct.
4064 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004065 q_vector->itr_val = new_itr;
4066 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004067 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004068}
4069
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00004070static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4071 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004072{
4073 struct e1000_adv_tx_context_desc *context_desc;
4074 u16 i = tx_ring->next_to_use;
4075
4076 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4077
4078 i++;
4079 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4080
4081 /* set bits to identify this as an advanced context descriptor */
4082 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4083
4084 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004085 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004086 mss_l4len_idx |= tx_ring->reg_idx << 4;
4087
4088 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4089 context_desc->seqnum_seed = 0;
4090 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4091 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4092}
4093
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004094static int igb_tso(struct igb_ring *tx_ring,
4095 struct igb_tx_buffer *first,
4096 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004097{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004098 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004099 u32 vlan_macip_lens, type_tucmd;
4100 u32 mss_l4len_idx, l4len;
4101
4102 if (!skb_is_gso(skb))
4103 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004104
4105 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004106 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004107 if (err)
4108 return err;
4109 }
4110
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004111 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4112 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004113
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004114 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004115 struct iphdr *iph = ip_hdr(skb);
4116 iph->tot_len = 0;
4117 iph->check = 0;
4118 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4119 iph->daddr, 0,
4120 IPPROTO_TCP,
4121 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004122 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004123 first->tx_flags |= IGB_TX_FLAGS_TSO |
4124 IGB_TX_FLAGS_CSUM |
4125 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004126 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004127 ipv6_hdr(skb)->payload_len = 0;
4128 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4129 &ipv6_hdr(skb)->daddr,
4130 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004131 first->tx_flags |= IGB_TX_FLAGS_TSO |
4132 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004133 }
4134
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004135 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004136 l4len = tcp_hdrlen(skb);
4137 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004138
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004139 /* update gso size and bytecount with header size */
4140 first->gso_segs = skb_shinfo(skb)->gso_segs;
4141 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4142
Auke Kok9d5c8242008-01-24 02:22:38 -08004143 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004144 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4145 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004146
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004147 /* VLAN MACLEN IPLEN */
4148 vlan_macip_lens = skb_network_header_len(skb);
4149 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004150 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004151
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004152 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004153
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004154 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004155}
4156
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004157static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004158{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004159 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004160 u32 vlan_macip_lens = 0;
4161 u32 mss_l4len_idx = 0;
4162 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004163
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004164 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004165 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4166 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004167 } else {
4168 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004169 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004170 case __constant_htons(ETH_P_IP):
4171 vlan_macip_lens |= skb_network_header_len(skb);
4172 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4173 l4_hdr = ip_hdr(skb)->protocol;
4174 break;
4175 case __constant_htons(ETH_P_IPV6):
4176 vlan_macip_lens |= skb_network_header_len(skb);
4177 l4_hdr = ipv6_hdr(skb)->nexthdr;
4178 break;
4179 default:
4180 if (unlikely(net_ratelimit())) {
4181 dev_warn(tx_ring->dev,
4182 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004183 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004184 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004185 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004186 }
4187
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004188 switch (l4_hdr) {
4189 case IPPROTO_TCP:
4190 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4191 mss_l4len_idx = tcp_hdrlen(skb) <<
4192 E1000_ADVTXD_L4LEN_SHIFT;
4193 break;
4194 case IPPROTO_SCTP:
4195 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4196 mss_l4len_idx = sizeof(struct sctphdr) <<
4197 E1000_ADVTXD_L4LEN_SHIFT;
4198 break;
4199 case IPPROTO_UDP:
4200 mss_l4len_idx = sizeof(struct udphdr) <<
4201 E1000_ADVTXD_L4LEN_SHIFT;
4202 break;
4203 default:
4204 if (unlikely(net_ratelimit())) {
4205 dev_warn(tx_ring->dev,
4206 "partial checksum but l4 proto=%x!\n",
4207 l4_hdr);
4208 }
4209 break;
4210 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004211
4212 /* update TX checksum flag */
4213 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004214 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004215
4216 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004217 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004218
4219 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004220}
4221
Alexander Duycke032afc2011-08-26 07:44:48 +00004222static __le32 igb_tx_cmd_type(u32 tx_flags)
4223{
4224 /* set type for advanced descriptor with frame checksum insertion */
4225 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4226 E1000_ADVTXD_DCMD_IFCS |
4227 E1000_ADVTXD_DCMD_DEXT);
4228
4229 /* set HW vlan bit if vlan is present */
4230 if (tx_flags & IGB_TX_FLAGS_VLAN)
4231 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4232
4233 /* set timestamp bit if present */
4234 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4235 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4236
4237 /* set segmentation bits for TSO */
4238 if (tx_flags & IGB_TX_FLAGS_TSO)
4239 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4240
4241 return cmd_type;
4242}
4243
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004244static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4245 union e1000_adv_tx_desc *tx_desc,
4246 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004247{
4248 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4249
4250 /* 82575 requires a unique index per ring if any offload is enabled */
4251 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004252 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004253 olinfo_status |= tx_ring->reg_idx << 4;
4254
4255 /* insert L4 checksum */
4256 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4257 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4258
4259 /* insert IPv4 checksum */
4260 if (tx_flags & IGB_TX_FLAGS_IPV4)
4261 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4262 }
4263
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004264 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004265}
4266
Alexander Duyckebe42d12011-08-26 07:45:09 +00004267/*
4268 * The largest size we can write to the descriptor is 65535. In order to
4269 * maintain a power of two alignment we have to limit ourselves to 32K.
4270 */
4271#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004272#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004273
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004274static void igb_tx_map(struct igb_ring *tx_ring,
4275 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004276 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004277{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004278 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004279 struct igb_tx_buffer *tx_buffer_info;
4280 union e1000_adv_tx_desc *tx_desc;
4281 dma_addr_t dma;
4282 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4283 unsigned int data_len = skb->data_len;
4284 unsigned int size = skb_headlen(skb);
4285 unsigned int paylen = skb->len - hdr_len;
4286 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004287 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004288 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004289
4290 tx_desc = IGB_TX_DESC(tx_ring, i);
4291
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004292 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004293 cmd_type = igb_tx_cmd_type(tx_flags);
4294
4295 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4296 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004297 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004298
Alexander Duyckebe42d12011-08-26 07:45:09 +00004299 /* record length, and DMA address */
4300 first->length = size;
4301 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004302 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004303
Alexander Duyckebe42d12011-08-26 07:45:09 +00004304 for (;;) {
4305 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4306 tx_desc->read.cmd_type_len =
4307 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004308
Alexander Duyckebe42d12011-08-26 07:45:09 +00004309 i++;
4310 tx_desc++;
4311 if (i == tx_ring->count) {
4312 tx_desc = IGB_TX_DESC(tx_ring, 0);
4313 i = 0;
4314 }
4315
4316 dma += IGB_MAX_DATA_PER_TXD;
4317 size -= IGB_MAX_DATA_PER_TXD;
4318
4319 tx_desc->read.olinfo_status = 0;
4320 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4321 }
4322
4323 if (likely(!data_len))
4324 break;
4325
4326 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4327
Alexander Duyck65689fe2009-03-20 00:17:43 +00004328 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004329 tx_desc++;
4330 if (i == tx_ring->count) {
4331 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004332 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004333 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004334
Eric Dumazet9e903e02011-10-18 21:00:24 +00004335 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004336 data_len -= size;
4337
4338 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4339 size, DMA_TO_DEVICE);
4340 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004341 goto dma_error;
4342
Alexander Duyckebe42d12011-08-26 07:45:09 +00004343 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4344 tx_buffer_info->length = size;
4345 tx_buffer_info->dma = dma;
4346
4347 tx_desc->read.olinfo_status = 0;
4348 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4349
4350 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004351 }
4352
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004353 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4354
Alexander Duyckebe42d12011-08-26 07:45:09 +00004355 /* write last descriptor with RS and EOP bits */
4356 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
Ben Greear6b8f0922012-03-06 09:41:53 +00004357 if (unlikely(skb->no_fcs))
4358 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
Alexander Duyckebe42d12011-08-26 07:45:09 +00004359 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004360
4361 /* set the timestamp */
4362 first->time_stamp = jiffies;
4363
Alexander Duyckebe42d12011-08-26 07:45:09 +00004364 /*
4365 * Force memory writes to complete before letting h/w know there
4366 * are new descriptors to fetch. (Only applicable for weak-ordered
4367 * memory model archs, such as IA-64).
4368 *
4369 * We also need this memory barrier to make certain all of the
4370 * status bits have been updated before next_to_watch is written.
4371 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004372 wmb();
4373
Alexander Duyckebe42d12011-08-26 07:45:09 +00004374 /* set next_to_watch value indicating a packet is present */
4375 first->next_to_watch = tx_desc;
4376
4377 i++;
4378 if (i == tx_ring->count)
4379 i = 0;
4380
Auke Kok9d5c8242008-01-24 02:22:38 -08004381 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004382
Alexander Duyckfce99e32009-10-27 15:51:27 +00004383 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004384
Auke Kok9d5c8242008-01-24 02:22:38 -08004385 /* we need this if more than one processor can write to our tail
4386 * at a time, it syncronizes IO on IA64/Altix systems */
4387 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004388
4389 return;
4390
4391dma_error:
4392 dev_err(tx_ring->dev, "TX DMA map failed\n");
4393
4394 /* clear dma mappings for failed tx_buffer_info map */
4395 for (;;) {
4396 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4397 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4398 if (tx_buffer_info == first)
4399 break;
4400 if (i == 0)
4401 i = tx_ring->count;
4402 i--;
4403 }
4404
4405 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004406}
4407
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004408static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004409{
Alexander Duycke694e962009-10-27 15:53:06 +00004410 struct net_device *netdev = tx_ring->netdev;
4411
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004412 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004413
Auke Kok9d5c8242008-01-24 02:22:38 -08004414 /* Herbert's original patch had:
4415 * smp_mb__after_netif_stop_queue();
4416 * but since that doesn't exist yet, just open code it. */
4417 smp_mb();
4418
4419 /* We need to check again in a case another CPU has just
4420 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004421 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004422 return -EBUSY;
4423
4424 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004425 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004426
4427 u64_stats_update_begin(&tx_ring->tx_syncp2);
4428 tx_ring->tx_stats.restart_queue2++;
4429 u64_stats_update_end(&tx_ring->tx_syncp2);
4430
Auke Kok9d5c8242008-01-24 02:22:38 -08004431 return 0;
4432}
4433
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004434static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004435{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004436 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004437 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004438 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004439}
4440
Alexander Duyckcd392f52011-08-26 07:43:59 +00004441netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4442 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004443{
Alexander Duyck8542db02011-08-26 07:44:43 +00004444 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004445 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004446 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004447 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004448 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004449
Auke Kok9d5c8242008-01-24 02:22:38 -08004450 /* need: 1 descriptor per page,
4451 * + 2 desc gap to keep tail from touching head,
4452 * + 1 desc for skb->data,
4453 * + 1 desc for context descriptor,
4454 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004455 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004456 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004457 return NETDEV_TX_BUSY;
4458 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004459
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004460 /* record the location of the first descriptor for this packet */
4461 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4462 first->skb = skb;
4463 first->bytecount = skb->len;
4464 first->gso_segs = 1;
4465
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004466 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4467 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004468 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004469 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004470
Jesse Grosseab6d182010-10-20 13:56:03 +00004471 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004472 tx_flags |= IGB_TX_FLAGS_VLAN;
4473 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4474 }
4475
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004476 /* record initial flags and protocol */
4477 first->tx_flags = tx_flags;
4478 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004479
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004480 tso = igb_tso(tx_ring, first, &hdr_len);
4481 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004482 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004483 else if (!tso)
4484 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004485
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004486 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004487
4488 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004489 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004490
Auke Kok9d5c8242008-01-24 02:22:38 -08004491 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004492
4493out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004494 igb_unmap_and_free_tx_resource(tx_ring, first);
4495
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004496 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004497}
4498
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004499static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4500 struct sk_buff *skb)
4501{
4502 unsigned int r_idx = skb->queue_mapping;
4503
4504 if (r_idx >= adapter->num_tx_queues)
4505 r_idx = r_idx % adapter->num_tx_queues;
4506
4507 return adapter->tx_ring[r_idx];
4508}
4509
Alexander Duyckcd392f52011-08-26 07:43:59 +00004510static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4511 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004512{
4513 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004514
4515 if (test_bit(__IGB_DOWN, &adapter->state)) {
4516 dev_kfree_skb_any(skb);
4517 return NETDEV_TX_OK;
4518 }
4519
4520 if (skb->len <= 0) {
4521 dev_kfree_skb_any(skb);
4522 return NETDEV_TX_OK;
4523 }
4524
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004525 /*
4526 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4527 * in order to meet this minimum size requirement.
4528 */
4529 if (skb->len < 17) {
4530 if (skb_padto(skb, 17))
4531 return NETDEV_TX_OK;
4532 skb->len = 17;
4533 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004534
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004535 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004536}
4537
4538/**
4539 * igb_tx_timeout - Respond to a Tx Hang
4540 * @netdev: network interface device structure
4541 **/
4542static void igb_tx_timeout(struct net_device *netdev)
4543{
4544 struct igb_adapter *adapter = netdev_priv(netdev);
4545 struct e1000_hw *hw = &adapter->hw;
4546
4547 /* Do the reset outside of interrupt context */
4548 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004549
Alexander Duyck06218a82011-08-26 07:46:55 +00004550 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004551 hw->dev_spec._82575.global_device_reset = true;
4552
Auke Kok9d5c8242008-01-24 02:22:38 -08004553 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004554 wr32(E1000_EICS,
4555 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004556}
4557
4558static void igb_reset_task(struct work_struct *work)
4559{
4560 struct igb_adapter *adapter;
4561 adapter = container_of(work, struct igb_adapter, reset_task);
4562
Taku Izumic97ec422010-04-27 14:39:30 +00004563 igb_dump(adapter);
4564 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004565 igb_reinit_locked(adapter);
4566}
4567
4568/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004569 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004570 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004571 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004572 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004573 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004574static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4575 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004576{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004577 struct igb_adapter *adapter = netdev_priv(netdev);
4578
4579 spin_lock(&adapter->stats64_lock);
4580 igb_update_stats(adapter, &adapter->stats64);
4581 memcpy(stats, &adapter->stats64, sizeof(*stats));
4582 spin_unlock(&adapter->stats64_lock);
4583
4584 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004585}
4586
4587/**
4588 * igb_change_mtu - Change the Maximum Transfer Unit
4589 * @netdev: network interface device structure
4590 * @new_mtu: new value for maximum frame size
4591 *
4592 * Returns 0 on success, negative on failure
4593 **/
4594static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4595{
4596 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004597 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004598 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004599
Alexander Duyckc809d222009-10-27 23:52:13 +00004600 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004601 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004602 return -EINVAL;
4603 }
4604
Alexander Duyck153285f2011-08-26 07:43:32 +00004605#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004606 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004607 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004608 return -EINVAL;
4609 }
4610
4611 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4612 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004613
Auke Kok9d5c8242008-01-24 02:22:38 -08004614 /* igb_down has a dependency on max_frame_size */
4615 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004616
Alexander Duyck4c844852009-10-27 15:52:07 +00004617 if (netif_running(netdev))
4618 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004619
Alexander Duyck090b1792009-10-27 23:51:55 +00004620 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004621 netdev->mtu, new_mtu);
4622 netdev->mtu = new_mtu;
4623
4624 if (netif_running(netdev))
4625 igb_up(adapter);
4626 else
4627 igb_reset(adapter);
4628
4629 clear_bit(__IGB_RESETTING, &adapter->state);
4630
4631 return 0;
4632}
4633
4634/**
4635 * igb_update_stats - Update the board statistics counters
4636 * @adapter: board private structure
4637 **/
4638
Eric Dumazet12dcd862010-10-15 17:27:10 +00004639void igb_update_stats(struct igb_adapter *adapter,
4640 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004641{
4642 struct e1000_hw *hw = &adapter->hw;
4643 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004644 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004645 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004646 int i;
4647 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004648 unsigned int start;
4649 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004650
4651#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4652
4653 /*
4654 * Prevent stats update while adapter is being reset, or if the pci
4655 * connection is down.
4656 */
4657 if (adapter->link_speed == 0)
4658 return;
4659 if (pci_channel_offline(pdev))
4660 return;
4661
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004662 bytes = 0;
4663 packets = 0;
4664 for (i = 0; i < adapter->num_rx_queues; i++) {
4665 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004666 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004667
Alexander Duyck3025a442010-02-17 01:02:39 +00004668 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004669 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004670
4671 do {
4672 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4673 _bytes = ring->rx_stats.bytes;
4674 _packets = ring->rx_stats.packets;
4675 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4676 bytes += _bytes;
4677 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004678 }
4679
Alexander Duyck128e45e2009-11-12 18:37:38 +00004680 net_stats->rx_bytes = bytes;
4681 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004682
4683 bytes = 0;
4684 packets = 0;
4685 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004686 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004687 do {
4688 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4689 _bytes = ring->tx_stats.bytes;
4690 _packets = ring->tx_stats.packets;
4691 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4692 bytes += _bytes;
4693 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004694 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004695 net_stats->tx_bytes = bytes;
4696 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004697
4698 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004699 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4700 adapter->stats.gprc += rd32(E1000_GPRC);
4701 adapter->stats.gorc += rd32(E1000_GORCL);
4702 rd32(E1000_GORCH); /* clear GORCL */
4703 adapter->stats.bprc += rd32(E1000_BPRC);
4704 adapter->stats.mprc += rd32(E1000_MPRC);
4705 adapter->stats.roc += rd32(E1000_ROC);
4706
4707 adapter->stats.prc64 += rd32(E1000_PRC64);
4708 adapter->stats.prc127 += rd32(E1000_PRC127);
4709 adapter->stats.prc255 += rd32(E1000_PRC255);
4710 adapter->stats.prc511 += rd32(E1000_PRC511);
4711 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4712 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4713 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4714 adapter->stats.sec += rd32(E1000_SEC);
4715
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004716 mpc = rd32(E1000_MPC);
4717 adapter->stats.mpc += mpc;
4718 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004719 adapter->stats.scc += rd32(E1000_SCC);
4720 adapter->stats.ecol += rd32(E1000_ECOL);
4721 adapter->stats.mcc += rd32(E1000_MCC);
4722 adapter->stats.latecol += rd32(E1000_LATECOL);
4723 adapter->stats.dc += rd32(E1000_DC);
4724 adapter->stats.rlec += rd32(E1000_RLEC);
4725 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4726 adapter->stats.xontxc += rd32(E1000_XONTXC);
4727 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4728 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4729 adapter->stats.fcruc += rd32(E1000_FCRUC);
4730 adapter->stats.gptc += rd32(E1000_GPTC);
4731 adapter->stats.gotc += rd32(E1000_GOTCL);
4732 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004733 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004734 adapter->stats.ruc += rd32(E1000_RUC);
4735 adapter->stats.rfc += rd32(E1000_RFC);
4736 adapter->stats.rjc += rd32(E1000_RJC);
4737 adapter->stats.tor += rd32(E1000_TORH);
4738 adapter->stats.tot += rd32(E1000_TOTH);
4739 adapter->stats.tpr += rd32(E1000_TPR);
4740
4741 adapter->stats.ptc64 += rd32(E1000_PTC64);
4742 adapter->stats.ptc127 += rd32(E1000_PTC127);
4743 adapter->stats.ptc255 += rd32(E1000_PTC255);
4744 adapter->stats.ptc511 += rd32(E1000_PTC511);
4745 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4746 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4747
4748 adapter->stats.mptc += rd32(E1000_MPTC);
4749 adapter->stats.bptc += rd32(E1000_BPTC);
4750
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004751 adapter->stats.tpt += rd32(E1000_TPT);
4752 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004753
4754 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004755 /* read internal phy specific stats */
4756 reg = rd32(E1000_CTRL_EXT);
4757 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4758 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4759 adapter->stats.tncrs += rd32(E1000_TNCRS);
4760 }
4761
Auke Kok9d5c8242008-01-24 02:22:38 -08004762 adapter->stats.tsctc += rd32(E1000_TSCTC);
4763 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4764
4765 adapter->stats.iac += rd32(E1000_IAC);
4766 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4767 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4768 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4769 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4770 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4771 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4772 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4773 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4774
4775 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004776 net_stats->multicast = adapter->stats.mprc;
4777 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004778
4779 /* Rx Errors */
4780
4781 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004782 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004783 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004784 adapter->stats.crcerrs + adapter->stats.algnerrc +
4785 adapter->stats.ruc + adapter->stats.roc +
4786 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004787 net_stats->rx_length_errors = adapter->stats.ruc +
4788 adapter->stats.roc;
4789 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4790 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4791 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004792
4793 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004794 net_stats->tx_errors = adapter->stats.ecol +
4795 adapter->stats.latecol;
4796 net_stats->tx_aborted_errors = adapter->stats.ecol;
4797 net_stats->tx_window_errors = adapter->stats.latecol;
4798 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004799
4800 /* Tx Dropped needs to be maintained elsewhere */
4801
4802 /* Phy Stats */
4803 if (hw->phy.media_type == e1000_media_type_copper) {
4804 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004805 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004806 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4807 adapter->phy_stats.idle_errors += phy_tmp;
4808 }
4809 }
4810
4811 /* Management Stats */
4812 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4813 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4814 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004815
4816 /* OS2BMC Stats */
4817 reg = rd32(E1000_MANC);
4818 if (reg & E1000_MANC_EN_BMC2OS) {
4819 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4820 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4821 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4822 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4823 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004824}
4825
Auke Kok9d5c8242008-01-24 02:22:38 -08004826static irqreturn_t igb_msix_other(int irq, void *data)
4827{
Alexander Duyck047e0032009-10-27 15:49:27 +00004828 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004829 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004830 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004831 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004832
Alexander Duyck7f081d42010-01-07 17:41:00 +00004833 if (icr & E1000_ICR_DRSTA)
4834 schedule_work(&adapter->reset_task);
4835
Alexander Duyck047e0032009-10-27 15:49:27 +00004836 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004837 /* HW is reporting DMA is out of sync */
4838 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004839 /* The DMA Out of Sync is also indication of a spoof event
4840 * in IOV mode. Check the Wrong VM Behavior register to
4841 * see if it is really a spoof event. */
4842 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004843 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004844
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004845 /* Check for a mailbox event */
4846 if (icr & E1000_ICR_VMMB)
4847 igb_msg_task(adapter);
4848
4849 if (icr & E1000_ICR_LSC) {
4850 hw->mac.get_link_status = 1;
4851 /* guard against interrupt when we're going down */
4852 if (!test_bit(__IGB_DOWN, &adapter->state))
4853 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4854 }
4855
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004856 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004857
4858 return IRQ_HANDLED;
4859}
4860
Alexander Duyck047e0032009-10-27 15:49:27 +00004861static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004862{
Alexander Duyck26b39272010-02-17 01:00:41 +00004863 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004864 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004865
Alexander Duyck047e0032009-10-27 15:49:27 +00004866 if (!q_vector->set_itr)
4867 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004868
Alexander Duyck047e0032009-10-27 15:49:27 +00004869 if (!itr_val)
4870 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004871
Alexander Duyck26b39272010-02-17 01:00:41 +00004872 if (adapter->hw.mac.type == e1000_82575)
4873 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004874 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004875 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004876
4877 writel(itr_val, q_vector->itr_register);
4878 q_vector->set_itr = 0;
4879}
4880
4881static irqreturn_t igb_msix_ring(int irq, void *data)
4882{
4883 struct igb_q_vector *q_vector = data;
4884
4885 /* Write the ITR value calculated from the previous interrupt. */
4886 igb_write_itr(q_vector);
4887
4888 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004889
Auke Kok9d5c8242008-01-24 02:22:38 -08004890 return IRQ_HANDLED;
4891}
4892
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004893#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004894static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004895{
Alexander Duyck047e0032009-10-27 15:49:27 +00004896 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004897 struct e1000_hw *hw = &adapter->hw;
4898 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004899
Alexander Duyck047e0032009-10-27 15:49:27 +00004900 if (q_vector->cpu == cpu)
4901 goto out_no_update;
4902
Alexander Duyck0ba82992011-08-26 07:45:47 +00004903 if (q_vector->tx.ring) {
4904 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004905 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4906 if (hw->mac.type == e1000_82575) {
4907 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4908 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4909 } else {
4910 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4911 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4912 E1000_DCA_TXCTRL_CPUID_SHIFT;
4913 }
4914 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4915 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4916 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004917 if (q_vector->rx.ring) {
4918 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004919 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4920 if (hw->mac.type == e1000_82575) {
4921 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4922 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4923 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004924 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004925 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004926 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004927 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004928 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4929 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4930 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4931 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004932 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004933 q_vector->cpu = cpu;
4934out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004935 put_cpu();
4936}
4937
4938static void igb_setup_dca(struct igb_adapter *adapter)
4939{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004940 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004941 int i;
4942
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004943 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004944 return;
4945
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004946 /* Always use CB2 mode, difference is masked in the CB driver. */
4947 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4948
Alexander Duyck047e0032009-10-27 15:49:27 +00004949 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004950 adapter->q_vector[i]->cpu = -1;
4951 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004952 }
4953}
4954
4955static int __igb_notify_dca(struct device *dev, void *data)
4956{
4957 struct net_device *netdev = dev_get_drvdata(dev);
4958 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004959 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004960 struct e1000_hw *hw = &adapter->hw;
4961 unsigned long event = *(unsigned long *)data;
4962
4963 switch (event) {
4964 case DCA_PROVIDER_ADD:
4965 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004966 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004967 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004968 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004969 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004970 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004971 igb_setup_dca(adapter);
4972 break;
4973 }
4974 /* Fall Through since DCA is disabled. */
4975 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004976 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004977 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004978 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004979 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004980 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004981 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004982 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004983 }
4984 break;
4985 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004986
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004987 return 0;
4988}
4989
4990static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4991 void *p)
4992{
4993 int ret_val;
4994
4995 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4996 __igb_notify_dca);
4997
4998 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4999}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005000#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08005001
Greg Rose0224d662011-10-14 02:57:14 +00005002#ifdef CONFIG_PCI_IOV
5003static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5004{
5005 unsigned char mac_addr[ETH_ALEN];
5006 struct pci_dev *pdev = adapter->pdev;
5007 struct e1000_hw *hw = &adapter->hw;
5008 struct pci_dev *pvfdev;
5009 unsigned int device_id;
5010 u16 thisvf_devfn;
5011
Joe Perches7efd26d2012-07-12 19:33:06 +00005012 eth_random_addr(mac_addr);
Greg Rose0224d662011-10-14 02:57:14 +00005013 igb_set_vf_mac(adapter, vf, mac_addr);
5014
5015 switch (adapter->hw.mac.type) {
5016 case e1000_82576:
5017 device_id = IGB_82576_VF_DEV_ID;
5018 /* VF Stride for 82576 is 2 */
5019 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
5020 (pdev->devfn & 1);
5021 break;
5022 case e1000_i350:
5023 device_id = IGB_I350_VF_DEV_ID;
5024 /* VF Stride for I350 is 4 */
5025 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
5026 (pdev->devfn & 3);
5027 break;
5028 default:
5029 device_id = 0;
5030 thisvf_devfn = 0;
5031 break;
5032 }
5033
5034 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5035 while (pvfdev) {
5036 if (pvfdev->devfn == thisvf_devfn)
5037 break;
5038 pvfdev = pci_get_device(hw->vendor_id,
5039 device_id, pvfdev);
5040 }
5041
5042 if (pvfdev)
5043 adapter->vf_data[vf].vfdev = pvfdev;
5044 else
5045 dev_err(&pdev->dev,
5046 "Couldn't find pci dev ptr for VF %4.4x\n",
5047 thisvf_devfn);
5048 return pvfdev != NULL;
5049}
5050
5051static int igb_find_enabled_vfs(struct igb_adapter *adapter)
5052{
5053 struct e1000_hw *hw = &adapter->hw;
5054 struct pci_dev *pdev = adapter->pdev;
5055 struct pci_dev *pvfdev;
5056 u16 vf_devfn = 0;
5057 u16 vf_stride;
5058 unsigned int device_id;
5059 int vfs_found = 0;
5060
5061 switch (adapter->hw.mac.type) {
5062 case e1000_82576:
5063 device_id = IGB_82576_VF_DEV_ID;
5064 /* VF Stride for 82576 is 2 */
5065 vf_stride = 2;
5066 break;
5067 case e1000_i350:
5068 device_id = IGB_I350_VF_DEV_ID;
5069 /* VF Stride for I350 is 4 */
5070 vf_stride = 4;
5071 break;
5072 default:
5073 device_id = 0;
5074 vf_stride = 0;
5075 break;
5076 }
5077
5078 vf_devfn = pdev->devfn + 0x80;
5079 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5080 while (pvfdev) {
Greg Rose06292922012-02-02 23:51:43 +00005081 if (pvfdev->devfn == vf_devfn &&
5082 (pvfdev->bus->number >= pdev->bus->number))
Greg Rose0224d662011-10-14 02:57:14 +00005083 vfs_found++;
5084 vf_devfn += vf_stride;
5085 pvfdev = pci_get_device(hw->vendor_id,
5086 device_id, pvfdev);
5087 }
5088
5089 return vfs_found;
5090}
5091
5092static int igb_check_vf_assignment(struct igb_adapter *adapter)
5093{
5094 int i;
5095 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5096 if (adapter->vf_data[i].vfdev) {
5097 if (adapter->vf_data[i].vfdev->dev_flags &
5098 PCI_DEV_FLAGS_ASSIGNED)
5099 return true;
5100 }
5101 }
5102 return false;
5103}
5104
5105#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005106static void igb_ping_all_vfs(struct igb_adapter *adapter)
5107{
5108 struct e1000_hw *hw = &adapter->hw;
5109 u32 ping;
5110 int i;
5111
5112 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5113 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005114 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005115 ping |= E1000_VT_MSGTYPE_CTS;
5116 igb_write_mbx(hw, &ping, 1, i);
5117 }
5118}
5119
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005120static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5121{
5122 struct e1000_hw *hw = &adapter->hw;
5123 u32 vmolr = rd32(E1000_VMOLR(vf));
5124 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5125
Alexander Duyckd85b90042010-09-22 17:56:20 +00005126 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005127 IGB_VF_FLAG_MULTI_PROMISC);
5128 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5129
5130 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5131 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005132 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005133 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5134 } else {
5135 /*
5136 * if we have hashes and we are clearing a multicast promisc
5137 * flag we need to write the hashes to the MTA as this step
5138 * was previously skipped
5139 */
5140 if (vf_data->num_vf_mc_hashes > 30) {
5141 vmolr |= E1000_VMOLR_MPME;
5142 } else if (vf_data->num_vf_mc_hashes) {
5143 int j;
5144 vmolr |= E1000_VMOLR_ROMPE;
5145 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5146 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5147 }
5148 }
5149
5150 wr32(E1000_VMOLR(vf), vmolr);
5151
5152 /* there are flags left unprocessed, likely not supported */
5153 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5154 return -EINVAL;
5155
5156 return 0;
5157
5158}
5159
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005160static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5161 u32 *msgbuf, u32 vf)
5162{
5163 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5164 u16 *hash_list = (u16 *)&msgbuf[1];
5165 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5166 int i;
5167
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005168 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005169 * to this VF for later use to restore when the PF multi cast
5170 * list changes
5171 */
5172 vf_data->num_vf_mc_hashes = n;
5173
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005174 /* only up to 30 hash values supported */
5175 if (n > 30)
5176 n = 30;
5177
5178 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005179 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005180 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005181
5182 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005183 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005184
5185 return 0;
5186}
5187
5188static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5189{
5190 struct e1000_hw *hw = &adapter->hw;
5191 struct vf_data_storage *vf_data;
5192 int i, j;
5193
5194 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005195 u32 vmolr = rd32(E1000_VMOLR(i));
5196 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5197
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005198 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005199
5200 if ((vf_data->num_vf_mc_hashes > 30) ||
5201 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5202 vmolr |= E1000_VMOLR_MPME;
5203 } else if (vf_data->num_vf_mc_hashes) {
5204 vmolr |= E1000_VMOLR_ROMPE;
5205 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5206 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5207 }
5208 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005209 }
5210}
5211
5212static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5213{
5214 struct e1000_hw *hw = &adapter->hw;
5215 u32 pool_mask, reg, vid;
5216 int i;
5217
5218 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5219
5220 /* Find the vlan filter for this id */
5221 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5222 reg = rd32(E1000_VLVF(i));
5223
5224 /* remove the vf from the pool */
5225 reg &= ~pool_mask;
5226
5227 /* if pool is empty then remove entry from vfta */
5228 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5229 (reg & E1000_VLVF_VLANID_ENABLE)) {
5230 reg = 0;
5231 vid = reg & E1000_VLVF_VLANID_MASK;
5232 igb_vfta_set(hw, vid, false);
5233 }
5234
5235 wr32(E1000_VLVF(i), reg);
5236 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005237
5238 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005239}
5240
5241static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5242{
5243 struct e1000_hw *hw = &adapter->hw;
5244 u32 reg, i;
5245
Alexander Duyck51466232009-10-27 23:47:35 +00005246 /* The vlvf table only exists on 82576 hardware and newer */
5247 if (hw->mac.type < e1000_82576)
5248 return -1;
5249
5250 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005251 if (!adapter->vfs_allocated_count)
5252 return -1;
5253
5254 /* Find the vlan filter for this id */
5255 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5256 reg = rd32(E1000_VLVF(i));
5257 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5258 vid == (reg & E1000_VLVF_VLANID_MASK))
5259 break;
5260 }
5261
5262 if (add) {
5263 if (i == E1000_VLVF_ARRAY_SIZE) {
5264 /* Did not find a matching VLAN ID entry that was
5265 * enabled. Search for a free filter entry, i.e.
5266 * one without the enable bit set
5267 */
5268 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5269 reg = rd32(E1000_VLVF(i));
5270 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5271 break;
5272 }
5273 }
5274 if (i < E1000_VLVF_ARRAY_SIZE) {
5275 /* Found an enabled/available entry */
5276 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5277
5278 /* if !enabled we need to set this up in vfta */
5279 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005280 /* add VID to filter table */
5281 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005282 reg |= E1000_VLVF_VLANID_ENABLE;
5283 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005284 reg &= ~E1000_VLVF_VLANID_MASK;
5285 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005286 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005287
5288 /* do not modify RLPML for PF devices */
5289 if (vf >= adapter->vfs_allocated_count)
5290 return 0;
5291
5292 if (!adapter->vf_data[vf].vlans_enabled) {
5293 u32 size;
5294 reg = rd32(E1000_VMOLR(vf));
5295 size = reg & E1000_VMOLR_RLPML_MASK;
5296 size += 4;
5297 reg &= ~E1000_VMOLR_RLPML_MASK;
5298 reg |= size;
5299 wr32(E1000_VMOLR(vf), reg);
5300 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005301
Alexander Duyck51466232009-10-27 23:47:35 +00005302 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005303 }
5304 } else {
5305 if (i < E1000_VLVF_ARRAY_SIZE) {
5306 /* remove vf from the pool */
5307 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5308 /* if pool is empty then remove entry from vfta */
5309 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5310 reg = 0;
5311 igb_vfta_set(hw, vid, false);
5312 }
5313 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005314
5315 /* do not modify RLPML for PF devices */
5316 if (vf >= adapter->vfs_allocated_count)
5317 return 0;
5318
5319 adapter->vf_data[vf].vlans_enabled--;
5320 if (!adapter->vf_data[vf].vlans_enabled) {
5321 u32 size;
5322 reg = rd32(E1000_VMOLR(vf));
5323 size = reg & E1000_VMOLR_RLPML_MASK;
5324 size -= 4;
5325 reg &= ~E1000_VMOLR_RLPML_MASK;
5326 reg |= size;
5327 wr32(E1000_VMOLR(vf), reg);
5328 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005329 }
5330 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005331 return 0;
5332}
5333
5334static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5335{
5336 struct e1000_hw *hw = &adapter->hw;
5337
5338 if (vid)
5339 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5340 else
5341 wr32(E1000_VMVIR(vf), 0);
5342}
5343
5344static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5345 int vf, u16 vlan, u8 qos)
5346{
5347 int err = 0;
5348 struct igb_adapter *adapter = netdev_priv(netdev);
5349
5350 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5351 return -EINVAL;
5352 if (vlan || qos) {
5353 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5354 if (err)
5355 goto out;
5356 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5357 igb_set_vmolr(adapter, vf, !vlan);
5358 adapter->vf_data[vf].pf_vlan = vlan;
5359 adapter->vf_data[vf].pf_qos = qos;
5360 dev_info(&adapter->pdev->dev,
5361 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5362 if (test_bit(__IGB_DOWN, &adapter->state)) {
5363 dev_warn(&adapter->pdev->dev,
5364 "The VF VLAN has been set,"
5365 " but the PF device is not up.\n");
5366 dev_warn(&adapter->pdev->dev,
5367 "Bring the PF device up before"
5368 " attempting to use the VF device.\n");
5369 }
5370 } else {
5371 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5372 false, vf);
5373 igb_set_vmvir(adapter, vlan, vf);
5374 igb_set_vmolr(adapter, vf, true);
5375 adapter->vf_data[vf].pf_vlan = 0;
5376 adapter->vf_data[vf].pf_qos = 0;
5377 }
5378out:
5379 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005380}
5381
5382static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5383{
5384 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5385 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5386
5387 return igb_vlvf_set(adapter, vid, add, vf);
5388}
5389
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005390static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005391{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005392 /* clear flags - except flag that indicates PF has set the MAC */
5393 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005394 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005395
5396 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005397 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005398
5399 /* reset vlans for device */
5400 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005401 if (adapter->vf_data[vf].pf_vlan)
5402 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5403 adapter->vf_data[vf].pf_vlan,
5404 adapter->vf_data[vf].pf_qos);
5405 else
5406 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005407
5408 /* reset multicast table array for vf */
5409 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5410
5411 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005412 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005413}
5414
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005415static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5416{
5417 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5418
5419 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005420 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
Joe Perches7efd26d2012-07-12 19:33:06 +00005421 eth_random_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005422
5423 /* process remaining reset events */
5424 igb_vf_reset(adapter, vf);
5425}
5426
5427static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005428{
5429 struct e1000_hw *hw = &adapter->hw;
5430 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005431 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005432 u32 reg, msgbuf[3];
5433 u8 *addr = (u8 *)(&msgbuf[1]);
5434
5435 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005436 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005437
5438 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005439 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005440
5441 /* enable transmit and receive for vf */
5442 reg = rd32(E1000_VFTE);
5443 wr32(E1000_VFTE, reg | (1 << vf));
5444 reg = rd32(E1000_VFRE);
5445 wr32(E1000_VFRE, reg | (1 << vf));
5446
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005447 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005448
5449 /* reply to reset with ack and vf mac address */
5450 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5451 memcpy(addr, vf_mac, 6);
5452 igb_write_mbx(hw, msgbuf, 3, vf);
5453}
5454
5455static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5456{
Greg Rosede42edd2010-07-01 13:39:23 +00005457 /*
5458 * The VF MAC Address is stored in a packed array of bytes
5459 * starting at the second 32 bit word of the msg array
5460 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005461 unsigned char *addr = (char *)&msg[1];
5462 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005463
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005464 if (is_valid_ether_addr(addr))
5465 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005466
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005467 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005468}
5469
5470static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5471{
5472 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005473 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005474 u32 msg = E1000_VT_MSGTYPE_NACK;
5475
5476 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005477 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5478 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005479 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005480 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005481 }
5482}
5483
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005484static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005485{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005486 struct pci_dev *pdev = adapter->pdev;
5487 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005488 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005489 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005490 s32 retval;
5491
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005492 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005493
Alexander Duyckfef45f42009-12-11 22:57:34 -08005494 if (retval) {
5495 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005496 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005497 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5498 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5499 return;
5500 goto out;
5501 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005502
5503 /* this is a message we already processed, do nothing */
5504 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005505 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005506
5507 /*
5508 * until the vf completes a reset it should not be
5509 * allowed to start any configuration.
5510 */
5511
5512 if (msgbuf[0] == E1000_VF_RESET) {
5513 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005514 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005515 }
5516
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005517 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005518 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5519 return;
5520 retval = -1;
5521 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005522 }
5523
5524 switch ((msgbuf[0] & 0xFFFF)) {
5525 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005526 retval = -EINVAL;
5527 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5528 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5529 else
5530 dev_warn(&pdev->dev,
5531 "VF %d attempted to override administratively "
5532 "set MAC address\nReload the VF driver to "
5533 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005534 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005535 case E1000_VF_SET_PROMISC:
5536 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5537 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005538 case E1000_VF_SET_MULTICAST:
5539 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5540 break;
5541 case E1000_VF_SET_LPE:
5542 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5543 break;
5544 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005545 retval = -1;
5546 if (vf_data->pf_vlan)
5547 dev_warn(&pdev->dev,
5548 "VF %d attempted to override administratively "
5549 "set VLAN tag\nReload the VF driver to "
5550 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005551 else
5552 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005553 break;
5554 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005555 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005556 retval = -1;
5557 break;
5558 }
5559
Alexander Duyckfef45f42009-12-11 22:57:34 -08005560 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5561out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005562 /* notify the VF of the results of what it sent us */
5563 if (retval)
5564 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5565 else
5566 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5567
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005568 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005569}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005570
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005571static void igb_msg_task(struct igb_adapter *adapter)
5572{
5573 struct e1000_hw *hw = &adapter->hw;
5574 u32 vf;
5575
5576 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5577 /* process any reset requests */
5578 if (!igb_check_for_rst(hw, vf))
5579 igb_vf_reset_event(adapter, vf);
5580
5581 /* process any messages pending */
5582 if (!igb_check_for_msg(hw, vf))
5583 igb_rcv_msg_from_vf(adapter, vf);
5584
5585 /* process any acks */
5586 if (!igb_check_for_ack(hw, vf))
5587 igb_rcv_ack_from_vf(adapter, vf);
5588 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005589}
5590
Auke Kok9d5c8242008-01-24 02:22:38 -08005591/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005592 * igb_set_uta - Set unicast filter table address
5593 * @adapter: board private structure
5594 *
5595 * The unicast table address is a register array of 32-bit registers.
5596 * The table is meant to be used in a way similar to how the MTA is used
5597 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005598 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5599 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005600 **/
5601static void igb_set_uta(struct igb_adapter *adapter)
5602{
5603 struct e1000_hw *hw = &adapter->hw;
5604 int i;
5605
5606 /* The UTA table only exists on 82576 hardware and newer */
5607 if (hw->mac.type < e1000_82576)
5608 return;
5609
5610 /* we only need to do this if VMDq is enabled */
5611 if (!adapter->vfs_allocated_count)
5612 return;
5613
5614 for (i = 0; i < hw->mac.uta_reg_count; i++)
5615 array_wr32(E1000_UTA, i, ~0);
5616}
5617
5618/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005619 * igb_intr_msi - Interrupt Handler
5620 * @irq: interrupt number
5621 * @data: pointer to a network interface device structure
5622 **/
5623static irqreturn_t igb_intr_msi(int irq, void *data)
5624{
Alexander Duyck047e0032009-10-27 15:49:27 +00005625 struct igb_adapter *adapter = data;
5626 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005627 struct e1000_hw *hw = &adapter->hw;
5628 /* read ICR disables interrupts using IAM */
5629 u32 icr = rd32(E1000_ICR);
5630
Alexander Duyck047e0032009-10-27 15:49:27 +00005631 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005632
Alexander Duyck7f081d42010-01-07 17:41:00 +00005633 if (icr & E1000_ICR_DRSTA)
5634 schedule_work(&adapter->reset_task);
5635
Alexander Duyck047e0032009-10-27 15:49:27 +00005636 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005637 /* HW is reporting DMA is out of sync */
5638 adapter->stats.doosync++;
5639 }
5640
Auke Kok9d5c8242008-01-24 02:22:38 -08005641 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5642 hw->mac.get_link_status = 1;
5643 if (!test_bit(__IGB_DOWN, &adapter->state))
5644 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5645 }
5646
Alexander Duyck047e0032009-10-27 15:49:27 +00005647 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005648
5649 return IRQ_HANDLED;
5650}
5651
5652/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005653 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005654 * @irq: interrupt number
5655 * @data: pointer to a network interface device structure
5656 **/
5657static irqreturn_t igb_intr(int irq, void *data)
5658{
Alexander Duyck047e0032009-10-27 15:49:27 +00005659 struct igb_adapter *adapter = data;
5660 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005661 struct e1000_hw *hw = &adapter->hw;
5662 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5663 * need for the IMC write */
5664 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005665
5666 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5667 * not set, then the adapter didn't send an interrupt */
5668 if (!(icr & E1000_ICR_INT_ASSERTED))
5669 return IRQ_NONE;
5670
Alexander Duyck0ba82992011-08-26 07:45:47 +00005671 igb_write_itr(q_vector);
5672
Alexander Duyck7f081d42010-01-07 17:41:00 +00005673 if (icr & E1000_ICR_DRSTA)
5674 schedule_work(&adapter->reset_task);
5675
Alexander Duyck047e0032009-10-27 15:49:27 +00005676 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005677 /* HW is reporting DMA is out of sync */
5678 adapter->stats.doosync++;
5679 }
5680
Auke Kok9d5c8242008-01-24 02:22:38 -08005681 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5682 hw->mac.get_link_status = 1;
5683 /* guard against interrupt when we're going down */
5684 if (!test_bit(__IGB_DOWN, &adapter->state))
5685 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5686 }
5687
Alexander Duyck047e0032009-10-27 15:49:27 +00005688 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005689
5690 return IRQ_HANDLED;
5691}
5692
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005693static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005694{
Alexander Duyck047e0032009-10-27 15:49:27 +00005695 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005696 struct e1000_hw *hw = &adapter->hw;
5697
Alexander Duyck0ba82992011-08-26 07:45:47 +00005698 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5699 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5700 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5701 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005702 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005703 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005704 }
5705
5706 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5707 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005708 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005709 else
5710 igb_irq_enable(adapter);
5711 }
5712}
5713
Auke Kok9d5c8242008-01-24 02:22:38 -08005714/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005715 * igb_poll - NAPI Rx polling callback
5716 * @napi: napi polling structure
5717 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005718 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005719static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005720{
Alexander Duyck047e0032009-10-27 15:49:27 +00005721 struct igb_q_vector *q_vector = container_of(napi,
5722 struct igb_q_vector,
5723 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005724 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005725
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005726#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005727 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5728 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005729#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005730 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005731 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005732
Alexander Duyck0ba82992011-08-26 07:45:47 +00005733 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005734 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005735
Alexander Duyck16eb8812011-08-26 07:43:54 +00005736 /* If all work not completed, return budget and keep polling */
5737 if (!clean_complete)
5738 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005739
Alexander Duyck46544252009-02-19 20:39:04 -08005740 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005741 napi_complete(napi);
5742 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005743
Alexander Duyck16eb8812011-08-26 07:43:54 +00005744 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005745}
Al Viro6d8126f2008-03-16 22:23:24 +00005746
Richard Cochran7ebae812012-03-16 10:55:37 +00005747#ifdef CONFIG_IGB_PTP
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005748/**
5749 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5750 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005751 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005752 *
5753 * If we were asked to do hardware stamping and such a time stamp is
5754 * available, then it must have been for this skb here because we only
5755 * allow only one such packet into the queue.
5756 */
Alexander Duyck06034642011-08-26 07:44:22 +00005757static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5758 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005759{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005760 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005761 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005762 struct skb_shared_hwtstamps shhwtstamps;
5763 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005764
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005765 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005766 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005767 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5768 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005769
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005770 regval = rd32(E1000_TXSTMPL);
5771 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5772
5773 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005774 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005775}
5776
Richard Cochran7ebae812012-03-16 10:55:37 +00005777#endif
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005778/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005779 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005780 * @q_vector: pointer to q_vector containing needed info
Ben Hutchings49ce9c22012-07-10 10:56:00 +00005781 *
Auke Kok9d5c8242008-01-24 02:22:38 -08005782 * returns true if ring is completely cleaned
5783 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005784static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005785{
Alexander Duyck047e0032009-10-27 15:49:27 +00005786 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005787 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005788 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005789 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005790 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005791 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005792 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005793
Alexander Duyck13fde972011-10-05 13:35:24 +00005794 if (test_bit(__IGB_DOWN, &adapter->state))
5795 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005796
Alexander Duyck06034642011-08-26 07:44:22 +00005797 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005798 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005799 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005800
Alexander Duyck13fde972011-10-05 13:35:24 +00005801 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005802 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005803
Alexander Duyck8542db02011-08-26 07:44:43 +00005804 /* prevent any other reads prior to eop_desc */
5805 rmb();
5806
5807 /* if next_to_watch is not set then there is no work pending */
5808 if (!eop_desc)
5809 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005810
5811 /* if DD is not set pending work has not been completed */
5812 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5813 break;
5814
Alexander Duyck8542db02011-08-26 07:44:43 +00005815 /* clear next_to_watch to prevent false hangs */
5816 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005817
Alexander Duyckebe42d12011-08-26 07:45:09 +00005818 /* update the statistics for this packet */
5819 total_bytes += tx_buffer->bytecount;
5820 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005821
Richard Cochran7ebae812012-03-16 10:55:37 +00005822#ifdef CONFIG_IGB_PTP
Alexander Duyckebe42d12011-08-26 07:45:09 +00005823 /* retrieve hardware timestamp */
5824 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005825
Richard Cochran7ebae812012-03-16 10:55:37 +00005826#endif
Alexander Duyckebe42d12011-08-26 07:45:09 +00005827 /* free the skb */
5828 dev_kfree_skb_any(tx_buffer->skb);
5829 tx_buffer->skb = NULL;
5830
5831 /* unmap skb header data */
5832 dma_unmap_single(tx_ring->dev,
5833 tx_buffer->dma,
5834 tx_buffer->length,
5835 DMA_TO_DEVICE);
5836
5837 /* clear last DMA location and unmap remaining buffers */
5838 while (tx_desc != eop_desc) {
5839 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005840
Alexander Duyck13fde972011-10-05 13:35:24 +00005841 tx_buffer++;
5842 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005843 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005844 if (unlikely(!i)) {
5845 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005846 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005847 tx_desc = IGB_TX_DESC(tx_ring, 0);
5848 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005849
5850 /* unmap any remaining paged data */
5851 if (tx_buffer->dma) {
5852 dma_unmap_page(tx_ring->dev,
5853 tx_buffer->dma,
5854 tx_buffer->length,
5855 DMA_TO_DEVICE);
5856 }
5857 }
5858
5859 /* clear last DMA location */
5860 tx_buffer->dma = 0;
5861
5862 /* move us one more past the eop_desc for start of next pkt */
5863 tx_buffer++;
5864 tx_desc++;
5865 i++;
5866 if (unlikely(!i)) {
5867 i -= tx_ring->count;
5868 tx_buffer = tx_ring->tx_buffer_info;
5869 tx_desc = IGB_TX_DESC(tx_ring, 0);
5870 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005871 }
5872
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005873 netdev_tx_completed_queue(txring_txq(tx_ring),
5874 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005875 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005876 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005877 u64_stats_update_begin(&tx_ring->tx_syncp);
5878 tx_ring->tx_stats.bytes += total_bytes;
5879 tx_ring->tx_stats.packets += total_packets;
5880 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005881 q_vector->tx.total_bytes += total_bytes;
5882 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005883
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005884 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005885 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005886
Alexander Duyck8542db02011-08-26 07:44:43 +00005887 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005888
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 /* Detect a transmit hang in hardware, this serializes the
5890 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005891 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005892 if (eop_desc &&
5893 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005894 (adapter->tx_timeout_factor * HZ)) &&
5895 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005896
Auke Kok9d5c8242008-01-24 02:22:38 -08005897 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005898 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005899 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005900 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005901 " TDH <%x>\n"
5902 " TDT <%x>\n"
5903 " next_to_use <%x>\n"
5904 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005905 "buffer_info[next_to_clean]\n"
5906 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005907 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005908 " jiffies <%lx>\n"
5909 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005910 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005911 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005912 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005913 tx_ring->next_to_use,
5914 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005915 tx_buffer->time_stamp,
5916 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005917 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005918 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005919 netif_stop_subqueue(tx_ring->netdev,
5920 tx_ring->queue_index);
5921
5922 /* we are about to reset, no point in enabling stuff */
5923 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005924 }
5925 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005926
5927 if (unlikely(total_packets &&
5928 netif_carrier_ok(tx_ring->netdev) &&
5929 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5930 /* Make sure that anybody stopping the queue after this
5931 * sees the new next_to_clean.
5932 */
5933 smp_mb();
5934 if (__netif_subqueue_stopped(tx_ring->netdev,
5935 tx_ring->queue_index) &&
5936 !(test_bit(__IGB_DOWN, &adapter->state))) {
5937 netif_wake_subqueue(tx_ring->netdev,
5938 tx_ring->queue_index);
5939
5940 u64_stats_update_begin(&tx_ring->tx_syncp);
5941 tx_ring->tx_stats.restart_queue++;
5942 u64_stats_update_end(&tx_ring->tx_syncp);
5943 }
5944 }
5945
5946 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005947}
5948
Alexander Duyckcd392f52011-08-26 07:43:59 +00005949static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005950 union e1000_adv_rx_desc *rx_desc,
5951 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005952{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005953 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005954
Alexander Duyck294e7d72011-08-26 07:45:57 +00005955 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005956 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005957 return;
5958
5959 /* Rx checksum disabled via ethtool */
5960 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005961 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005962
Auke Kok9d5c8242008-01-24 02:22:38 -08005963 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005964 if (igb_test_staterr(rx_desc,
5965 E1000_RXDEXT_STATERR_TCPE |
5966 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005967 /*
5968 * work around errata with sctp packets where the TCPE aka
5969 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5970 * packets, (aka let the stack check the crc32c)
5971 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005972 if (!((skb->len == 60) &&
5973 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005974 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005975 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005976 u64_stats_update_end(&ring->rx_syncp);
5977 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005978 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005979 return;
5980 }
5981 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005982 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5983 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005984 skb->ip_summed = CHECKSUM_UNNECESSARY;
5985
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005986 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5987 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005988}
5989
Alexander Duyck077887c2011-08-26 07:46:29 +00005990static inline void igb_rx_hash(struct igb_ring *ring,
5991 union e1000_adv_rx_desc *rx_desc,
5992 struct sk_buff *skb)
5993{
5994 if (ring->netdev->features & NETIF_F_RXHASH)
5995 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5996}
5997
Richard Cochran7ebae812012-03-16 10:55:37 +00005998#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005999static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
6000 union e1000_adv_rx_desc *rx_desc,
6001 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006002{
6003 struct igb_adapter *adapter = q_vector->adapter;
6004 struct e1000_hw *hw = &adapter->hw;
6005 u64 regval;
6006
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006007 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
6008 E1000_RXDADV_STAT_TS))
6009 return;
6010
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006011 /*
6012 * If this bit is set, then the RX registers contain the time stamp. No
6013 * other packet will be time stamped until we read these registers, so
6014 * read the registers to make them available again. Because only one
6015 * packet can be time stamped at a time, we know that the register
6016 * values must belong to this one here and therefore we don't need to
6017 * compare any of the additional attributes stored for it.
6018 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00006019 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006020 * can turn into a skb_shared_hwtstamps.
6021 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006022 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006023 u32 *stamp = (u32 *)skb->data;
6024 regval = le32_to_cpu(*(stamp + 2));
6025 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
6026 skb_pull(skb, IGB_TS_HDR_LEN);
6027 } else {
6028 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
6029 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006030
Nick Nunley757b77e2010-03-26 11:36:47 +00006031 regval = rd32(E1000_RXSTMPL);
6032 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
6033 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006034
6035 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6036}
Alexander Duyck8be10e92011-08-26 07:47:11 +00006037
Richard Cochran7ebae812012-03-16 10:55:37 +00006038#endif
Alexander Duyck8be10e92011-08-26 07:47:11 +00006039static void igb_rx_vlan(struct igb_ring *ring,
6040 union e1000_adv_rx_desc *rx_desc,
6041 struct sk_buff *skb)
6042{
6043 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6044 u16 vid;
6045 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6046 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
6047 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6048 else
6049 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6050
6051 __vlan_hwaccel_put_tag(skb, vid);
6052 }
6053}
6054
Alexander Duyck44390ca2011-08-26 07:43:38 +00006055static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006056{
6057 /* HW will not DMA in data larger than the given buffer, even if it
6058 * parses the (NFS, of course) header to be larger. In that case, it
6059 * fills the header buffer and spills the rest into the page.
6060 */
6061 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
6062 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006063 if (hlen > IGB_RX_HDR_LEN)
6064 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006065 return hlen;
6066}
6067
Alexander Duyckcd392f52011-08-26 07:43:59 +00006068static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006069{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006070 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006071 union e1000_adv_rx_desc *rx_desc;
6072 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08006073 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006074 u16 cleaned_count = igb_desc_unused(rx_ring);
6075 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08006076
Alexander Duyck601369062011-08-26 07:44:05 +00006077 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08006078
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006079 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006080 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00006081 struct sk_buff *skb = buffer_info->skb;
6082 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006083
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006084 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006085 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006086
6087 i++;
6088 if (i == rx_ring->count)
6089 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00006090
Alexander Duyck601369062011-08-26 07:44:05 +00006091 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006092 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006093
Alexander Duyck16eb8812011-08-26 07:43:54 +00006094 /*
6095 * This memory barrier is needed to keep us from reading
6096 * any other fields out of the rx_desc until we know the
6097 * RXD_STAT_DD bit is set
6098 */
6099 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006100
Alexander Duyck16eb8812011-08-26 07:43:54 +00006101 if (!skb_is_nonlinear(skb)) {
6102 __skb_put(skb, igb_get_hlen(rx_desc));
6103 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006104 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006105 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006106 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006107 }
6108
Alexander Duyck16eb8812011-08-26 07:43:54 +00006109 if (rx_desc->wb.upper.length) {
6110 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006111
Koki Sanagiaa913402010-04-27 01:01:19 +00006112 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006113 buffer_info->page,
6114 buffer_info->page_offset,
6115 length);
6116
Alexander Duyck16eb8812011-08-26 07:43:54 +00006117 skb->len += length;
6118 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006119 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006120
Alexander Duyckd1eff352009-11-12 18:38:35 +00006121 if ((page_count(buffer_info->page) != 1) ||
6122 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006123 buffer_info->page = NULL;
6124 else
6125 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006126
Alexander Duyck16eb8812011-08-26 07:43:54 +00006127 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6128 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6129 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006130 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006131
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006132 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006133 struct igb_rx_buffer *next_buffer;
6134 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006135 buffer_info->skb = next_buffer->skb;
6136 buffer_info->dma = next_buffer->dma;
6137 next_buffer->skb = skb;
6138 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006139 goto next_desc;
6140 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006141
Ben Greear89eaefb2012-03-06 09:41:58 +00006142 if (unlikely((igb_test_staterr(rx_desc,
6143 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6144 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006145 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006146 goto next_desc;
6147 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006148
Richard Cochran7ebae812012-03-16 10:55:37 +00006149#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006150 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Richard Cochran7ebae812012-03-16 10:55:37 +00006151#endif
Alexander Duyck077887c2011-08-26 07:46:29 +00006152 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006153 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006154 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006155
6156 total_bytes += skb->len;
6157 total_packets++;
6158
6159 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6160
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006161 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006162
Alexander Duyck16eb8812011-08-26 07:43:54 +00006163 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006164next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006165 if (!budget)
6166 break;
6167
6168 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006169 /* return some buffers to hardware, one at a time is too slow */
6170 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006171 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006172 cleaned_count = 0;
6173 }
6174
6175 /* use prefetched values */
6176 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006177 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006178
Auke Kok9d5c8242008-01-24 02:22:38 -08006179 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006180 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006181 rx_ring->rx_stats.packets += total_packets;
6182 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006183 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006184 q_vector->rx.total_packets += total_packets;
6185 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006186
6187 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006188 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006189
Alexander Duyck16eb8812011-08-26 07:43:54 +00006190 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006191}
6192
Alexander Duyckc023cd82011-08-26 07:43:43 +00006193static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006194 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006195{
6196 struct sk_buff *skb = bi->skb;
6197 dma_addr_t dma = bi->dma;
6198
6199 if (dma)
6200 return true;
6201
6202 if (likely(!skb)) {
6203 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6204 IGB_RX_HDR_LEN);
6205 bi->skb = skb;
6206 if (!skb) {
6207 rx_ring->rx_stats.alloc_failed++;
6208 return false;
6209 }
6210
6211 /* initialize skb for ring */
6212 skb_record_rx_queue(skb, rx_ring->queue_index);
6213 }
6214
6215 dma = dma_map_single(rx_ring->dev, skb->data,
6216 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6217
6218 if (dma_mapping_error(rx_ring->dev, dma)) {
6219 rx_ring->rx_stats.alloc_failed++;
6220 return false;
6221 }
6222
6223 bi->dma = dma;
6224 return true;
6225}
6226
6227static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006228 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006229{
6230 struct page *page = bi->page;
6231 dma_addr_t page_dma = bi->page_dma;
6232 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6233
6234 if (page_dma)
6235 return true;
6236
6237 if (!page) {
Mel Gorman06140022012-07-31 16:44:24 -07006238 page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006239 bi->page = page;
6240 if (unlikely(!page)) {
6241 rx_ring->rx_stats.alloc_failed++;
6242 return false;
6243 }
6244 }
6245
6246 page_dma = dma_map_page(rx_ring->dev, page,
6247 page_offset, PAGE_SIZE / 2,
6248 DMA_FROM_DEVICE);
6249
6250 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6251 rx_ring->rx_stats.alloc_failed++;
6252 return false;
6253 }
6254
6255 bi->page_dma = page_dma;
6256 bi->page_offset = page_offset;
6257 return true;
6258}
6259
Auke Kok9d5c8242008-01-24 02:22:38 -08006260/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006261 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006262 * @adapter: address of board private structure
6263 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006264void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006265{
Auke Kok9d5c8242008-01-24 02:22:38 -08006266 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006267 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006268 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006269
Alexander Duyck601369062011-08-26 07:44:05 +00006270 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006271 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006272 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006273
6274 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006275 if (!igb_alloc_mapped_skb(rx_ring, bi))
6276 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006277
Alexander Duyckc023cd82011-08-26 07:43:43 +00006278 /* Refresh the desc even if buffer_addrs didn't change
6279 * because each write-back erases this info. */
6280 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006281
Alexander Duyckc023cd82011-08-26 07:43:43 +00006282 if (!igb_alloc_mapped_page(rx_ring, bi))
6283 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006284
Alexander Duyckc023cd82011-08-26 07:43:43 +00006285 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006286
Alexander Duyckc023cd82011-08-26 07:43:43 +00006287 rx_desc++;
6288 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006289 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006290 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006291 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006292 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006293 i -= rx_ring->count;
6294 }
6295
6296 /* clear the hdr_addr for the next_to_use descriptor */
6297 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006298 }
6299
Alexander Duyckc023cd82011-08-26 07:43:43 +00006300 i += rx_ring->count;
6301
Auke Kok9d5c8242008-01-24 02:22:38 -08006302 if (rx_ring->next_to_use != i) {
6303 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006304
6305 /* Force memory writes to complete before letting h/w
6306 * know there are new descriptors to fetch. (Only
6307 * applicable for weak-ordered memory model archs,
6308 * such as IA-64). */
6309 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006310 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006311 }
6312}
6313
6314/**
6315 * igb_mii_ioctl -
6316 * @netdev:
6317 * @ifreq:
6318 * @cmd:
6319 **/
6320static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6321{
6322 struct igb_adapter *adapter = netdev_priv(netdev);
6323 struct mii_ioctl_data *data = if_mii(ifr);
6324
6325 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6326 return -EOPNOTSUPP;
6327
6328 switch (cmd) {
6329 case SIOCGMIIPHY:
6330 data->phy_id = adapter->hw.phy.addr;
6331 break;
6332 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006333 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6334 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006335 return -EIO;
6336 break;
6337 case SIOCSMIIREG:
6338 default:
6339 return -EOPNOTSUPP;
6340 }
6341 return 0;
6342}
6343
6344/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006345 * igb_hwtstamp_ioctl - control hardware time stamping
6346 * @netdev:
6347 * @ifreq:
6348 * @cmd:
6349 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006350 * Outgoing time stamping can be enabled and disabled. Play nice and
6351 * disable it when requested, although it shouldn't case any overhead
6352 * when no packet needs it. At most one packet in the queue may be
6353 * marked for time stamping, otherwise it would be impossible to tell
6354 * for sure to which packet the hardware time stamp belongs.
6355 *
6356 * Incoming time stamping has to be configured via the hardware
6357 * filters. Not all combinations are supported, in particular event
6358 * type has to be specified. Matching the kind of event packet is
6359 * not supported, with the exception of "all V2 events regardless of
6360 * level 2 or 4".
6361 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006362 **/
6363static int igb_hwtstamp_ioctl(struct net_device *netdev,
6364 struct ifreq *ifr, int cmd)
6365{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 struct igb_adapter *adapter = netdev_priv(netdev);
6367 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006368 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006369 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6370 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006371 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006372 bool is_l4 = false;
6373 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006374 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006375
6376 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6377 return -EFAULT;
6378
6379 /* reserved for future extensions */
6380 if (config.flags)
6381 return -EINVAL;
6382
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006383 switch (config.tx_type) {
6384 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006385 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006386 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006387 break;
6388 default:
6389 return -ERANGE;
6390 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006391
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006392 switch (config.rx_filter) {
6393 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006394 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006395 break;
6396 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6397 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6398 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6399 case HWTSTAMP_FILTER_ALL:
6400 /*
6401 * register TSYNCRXCFG must be set, therefore it is not
6402 * possible to time stamp both Sync and Delay_Req messages
6403 * => fall back to time stamping all packets
6404 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006405 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006406 config.rx_filter = HWTSTAMP_FILTER_ALL;
6407 break;
6408 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006409 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006410 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006411 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006412 break;
6413 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006414 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006415 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006416 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006417 break;
6418 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6419 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006420 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006421 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006422 is_l2 = true;
6423 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006424 config.rx_filter = HWTSTAMP_FILTER_SOME;
6425 break;
6426 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6427 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006428 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006429 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006430 is_l2 = true;
6431 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006432 config.rx_filter = HWTSTAMP_FILTER_SOME;
6433 break;
6434 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6435 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6436 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006437 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006438 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006439 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006440 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006441 break;
6442 default:
6443 return -ERANGE;
6444 }
6445
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006446 if (hw->mac.type == e1000_82575) {
6447 if (tsync_rx_ctl | tsync_tx_ctl)
6448 return -EINVAL;
6449 return 0;
6450 }
6451
Nick Nunley757b77e2010-03-26 11:36:47 +00006452 /*
6453 * Per-packet timestamping only works if all packets are
6454 * timestamped, so enable timestamping in all packets as
6455 * long as one rx filter was configured.
6456 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006457 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006458 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6459 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6460 }
6461
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006462 /* enable/disable TX */
6463 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006464 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6465 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006466 wr32(E1000_TSYNCTXCTL, regval);
6467
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006468 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006469 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006470 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6471 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006472 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006473
6474 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006475 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6476
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006477 /* define ethertype filter for timestamped packets */
6478 if (is_l2)
6479 wr32(E1000_ETQF(3),
6480 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6481 E1000_ETQF_1588 | /* enable timestamping */
6482 ETH_P_1588)); /* 1588 eth protocol type */
6483 else
6484 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006485
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006486#define PTP_PORT 319
6487 /* L4 Queue Filter[3]: filter by destination port and protocol */
6488 if (is_l4) {
6489 u32 ftqf = (IPPROTO_UDP /* UDP */
6490 | E1000_FTQF_VF_BP /* VF not compared */
6491 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6492 | E1000_FTQF_MASK); /* mask all inputs */
6493 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006494
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006495 wr32(E1000_IMIR(3), htons(PTP_PORT));
6496 wr32(E1000_IMIREXT(3),
6497 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6498 if (hw->mac.type == e1000_82576) {
6499 /* enable source port check */
6500 wr32(E1000_SPQF(3), htons(PTP_PORT));
6501 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6502 }
6503 wr32(E1000_FTQF(3), ftqf);
6504 } else {
6505 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6506 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006507 wrfl();
6508
6509 adapter->hwtstamp_config = config;
6510
6511 /* clear TX/RX time stamp registers, just to be sure */
6512 regval = rd32(E1000_TXSTMPH);
6513 regval = rd32(E1000_RXSTMPH);
6514
6515 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6516 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006517}
6518
6519/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006520 * igb_ioctl -
6521 * @netdev:
6522 * @ifreq:
6523 * @cmd:
6524 **/
6525static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6526{
6527 switch (cmd) {
6528 case SIOCGMIIPHY:
6529 case SIOCGMIIREG:
6530 case SIOCSMIIREG:
6531 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006532 case SIOCSHWTSTAMP:
6533 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006534 default:
6535 return -EOPNOTSUPP;
6536 }
6537}
6538
Alexander Duyck009bc062009-07-23 18:08:35 +00006539s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6540{
6541 struct igb_adapter *adapter = hw->back;
6542 u16 cap_offset;
6543
Jon Masonbdaae042011-06-27 07:44:01 +00006544 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006545 if (!cap_offset)
6546 return -E1000_ERR_CONFIG;
6547
6548 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6549
6550 return 0;
6551}
6552
6553s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6554{
6555 struct igb_adapter *adapter = hw->back;
6556 u16 cap_offset;
6557
Jon Masonbdaae042011-06-27 07:44:01 +00006558 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006559 if (!cap_offset)
6560 return -E1000_ERR_CONFIG;
6561
6562 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6563
6564 return 0;
6565}
6566
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006567static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006568{
6569 struct igb_adapter *adapter = netdev_priv(netdev);
6570 struct e1000_hw *hw = &adapter->hw;
6571 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006572 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006573
Alexander Duyck5faf0302011-08-26 07:46:08 +00006574 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006575 /* enable VLAN tag insert/strip */
6576 ctrl = rd32(E1000_CTRL);
6577 ctrl |= E1000_CTRL_VME;
6578 wr32(E1000_CTRL, ctrl);
6579
Alexander Duyck51466232009-10-27 23:47:35 +00006580 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006581 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006582 rctl &= ~E1000_RCTL_CFIEN;
6583 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006584 } else {
6585 /* disable VLAN tag insert/strip */
6586 ctrl = rd32(E1000_CTRL);
6587 ctrl &= ~E1000_CTRL_VME;
6588 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006589 }
6590
Alexander Duycke1739522009-02-19 20:39:44 -08006591 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006592}
6593
Jiri Pirko8e586132011-12-08 19:52:37 -05006594static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006595{
6596 struct igb_adapter *adapter = netdev_priv(netdev);
6597 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006598 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006599
Alexander Duyck51466232009-10-27 23:47:35 +00006600 /* attempt to add filter to vlvf array */
6601 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006602
Alexander Duyck51466232009-10-27 23:47:35 +00006603 /* add the filter since PF can receive vlans w/o entry in vlvf */
6604 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006605
6606 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006607
6608 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006609}
6610
Jiri Pirko8e586132011-12-08 19:52:37 -05006611static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006612{
6613 struct igb_adapter *adapter = netdev_priv(netdev);
6614 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006615 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006616 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006617
Alexander Duyck51466232009-10-27 23:47:35 +00006618 /* remove vlan from VLVF table array */
6619 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006620
Alexander Duyck51466232009-10-27 23:47:35 +00006621 /* if vid was not present in VLVF just remove it from table */
6622 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006623 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006624
6625 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006626
6627 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006628}
6629
6630static void igb_restore_vlan(struct igb_adapter *adapter)
6631{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006632 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006633
Alexander Duyck5faf0302011-08-26 07:46:08 +00006634 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6635
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006636 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6637 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006638}
6639
David Decotigny14ad2512011-04-27 18:32:43 +00006640int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006641{
Alexander Duyck090b1792009-10-27 23:51:55 +00006642 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006643 struct e1000_mac_info *mac = &adapter->hw.mac;
6644
6645 mac->autoneg = 0;
6646
David Decotigny14ad2512011-04-27 18:32:43 +00006647 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6648 * for the switch() below to work */
6649 if ((spd & 1) || (dplx & ~1))
6650 goto err_inval;
6651
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006652 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6653 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006654 spd != SPEED_1000 &&
6655 dplx != DUPLEX_FULL)
6656 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006657
David Decotigny14ad2512011-04-27 18:32:43 +00006658 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006659 case SPEED_10 + DUPLEX_HALF:
6660 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6661 break;
6662 case SPEED_10 + DUPLEX_FULL:
6663 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6664 break;
6665 case SPEED_100 + DUPLEX_HALF:
6666 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6667 break;
6668 case SPEED_100 + DUPLEX_FULL:
6669 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6670 break;
6671 case SPEED_1000 + DUPLEX_FULL:
6672 mac->autoneg = 1;
6673 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6674 break;
6675 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6676 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006677 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006678 }
Jesse Brandeburg8376dad2012-07-26 02:31:19 +00006679
6680 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6681 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6682
Auke Kok9d5c8242008-01-24 02:22:38 -08006683 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006684
6685err_inval:
6686 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6687 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006688}
6689
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006690static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6691 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006692{
6693 struct net_device *netdev = pci_get_drvdata(pdev);
6694 struct igb_adapter *adapter = netdev_priv(netdev);
6695 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006696 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006697 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006698#ifdef CONFIG_PM
6699 int retval = 0;
6700#endif
6701
6702 netif_device_detach(netdev);
6703
Alexander Duycka88f10e2008-07-08 15:13:38 -07006704 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006705 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006706
Alexander Duyck047e0032009-10-27 15:49:27 +00006707 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006708
6709#ifdef CONFIG_PM
6710 retval = pci_save_state(pdev);
6711 if (retval)
6712 return retval;
6713#endif
6714
6715 status = rd32(E1000_STATUS);
6716 if (status & E1000_STATUS_LU)
6717 wufc &= ~E1000_WUFC_LNKC;
6718
6719 if (wufc) {
6720 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006721 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006722
6723 /* turn on all-multi mode if wake on multicast is enabled */
6724 if (wufc & E1000_WUFC_MC) {
6725 rctl = rd32(E1000_RCTL);
6726 rctl |= E1000_RCTL_MPE;
6727 wr32(E1000_RCTL, rctl);
6728 }
6729
6730 ctrl = rd32(E1000_CTRL);
6731 /* advertise wake from D3Cold */
6732 #define E1000_CTRL_ADVD3WUC 0x00100000
6733 /* phy power management enable */
6734 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6735 ctrl |= E1000_CTRL_ADVD3WUC;
6736 wr32(E1000_CTRL, ctrl);
6737
Auke Kok9d5c8242008-01-24 02:22:38 -08006738 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006739 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006740
6741 wr32(E1000_WUC, E1000_WUC_PME_EN);
6742 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006743 } else {
6744 wr32(E1000_WUC, 0);
6745 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006746 }
6747
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006748 *enable_wake = wufc || adapter->en_mng_pt;
6749 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006750 igb_power_down_link(adapter);
6751 else
6752 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006753
6754 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6755 * would have already happened in close and is redundant. */
6756 igb_release_hw_control(adapter);
6757
6758 pci_disable_device(pdev);
6759
Auke Kok9d5c8242008-01-24 02:22:38 -08006760 return 0;
6761}
6762
6763#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006764#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006765static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006766{
6767 int retval;
6768 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006769 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006770
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006771 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006772 if (retval)
6773 return retval;
6774
6775 if (wake) {
6776 pci_prepare_to_sleep(pdev);
6777 } else {
6778 pci_wake_from_d3(pdev, false);
6779 pci_set_power_state(pdev, PCI_D3hot);
6780 }
6781
6782 return 0;
6783}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006784#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006785
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006786static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006787{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006788 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006789 struct net_device *netdev = pci_get_drvdata(pdev);
6790 struct igb_adapter *adapter = netdev_priv(netdev);
6791 struct e1000_hw *hw = &adapter->hw;
6792 u32 err;
6793
6794 pci_set_power_state(pdev, PCI_D0);
6795 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006796 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006797
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006798 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006799 if (err) {
6800 dev_err(&pdev->dev,
6801 "igb: Cannot enable PCI device from suspend\n");
6802 return err;
6803 }
6804 pci_set_master(pdev);
6805
6806 pci_enable_wake(pdev, PCI_D3hot, 0);
6807 pci_enable_wake(pdev, PCI_D3cold, 0);
6808
Benjamin Poiriercfb8c3a2012-05-10 15:38:37 +00006809 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006810 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6811 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006812 }
6813
Auke Kok9d5c8242008-01-24 02:22:38 -08006814 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006815
6816 /* let the f/w know that the h/w is now under the control of the
6817 * driver. */
6818 igb_get_hw_control(adapter);
6819
Auke Kok9d5c8242008-01-24 02:22:38 -08006820 wr32(E1000_WUS, ~0);
6821
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006822 if (netdev->flags & IFF_UP) {
6823 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006824 if (err)
6825 return err;
6826 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006827
6828 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006829 return 0;
6830}
6831
6832#ifdef CONFIG_PM_RUNTIME
6833static int igb_runtime_idle(struct device *dev)
6834{
6835 struct pci_dev *pdev = to_pci_dev(dev);
6836 struct net_device *netdev = pci_get_drvdata(pdev);
6837 struct igb_adapter *adapter = netdev_priv(netdev);
6838
6839 if (!igb_has_link(adapter))
6840 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6841
6842 return -EBUSY;
6843}
6844
6845static int igb_runtime_suspend(struct device *dev)
6846{
6847 struct pci_dev *pdev = to_pci_dev(dev);
6848 int retval;
6849 bool wake;
6850
6851 retval = __igb_shutdown(pdev, &wake, 1);
6852 if (retval)
6853 return retval;
6854
6855 if (wake) {
6856 pci_prepare_to_sleep(pdev);
6857 } else {
6858 pci_wake_from_d3(pdev, false);
6859 pci_set_power_state(pdev, PCI_D3hot);
6860 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006861
Auke Kok9d5c8242008-01-24 02:22:38 -08006862 return 0;
6863}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006864
6865static int igb_runtime_resume(struct device *dev)
6866{
6867 return igb_resume(dev);
6868}
6869#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006870#endif
6871
6872static void igb_shutdown(struct pci_dev *pdev)
6873{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006874 bool wake;
6875
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006876 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006877
6878 if (system_state == SYSTEM_POWER_OFF) {
6879 pci_wake_from_d3(pdev, wake);
6880 pci_set_power_state(pdev, PCI_D3hot);
6881 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006882}
6883
6884#ifdef CONFIG_NET_POLL_CONTROLLER
6885/*
6886 * Polling 'interrupt' - used by things like netconsole to send skbs
6887 * without having to re-enable interrupts. It's not called while
6888 * the interrupt routine is executing.
6889 */
6890static void igb_netpoll(struct net_device *netdev)
6891{
6892 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006893 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006894 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006895 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006896
Alexander Duyck047e0032009-10-27 15:49:27 +00006897 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006898 q_vector = adapter->q_vector[i];
6899 if (adapter->msix_entries)
6900 wr32(E1000_EIMC, q_vector->eims_value);
6901 else
6902 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006903 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006904 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006905}
6906#endif /* CONFIG_NET_POLL_CONTROLLER */
6907
6908/**
6909 * igb_io_error_detected - called when PCI error is detected
6910 * @pdev: Pointer to PCI device
6911 * @state: The current pci connection state
6912 *
6913 * This function is called after a PCI bus error affecting
6914 * this device has been detected.
6915 */
6916static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6917 pci_channel_state_t state)
6918{
6919 struct net_device *netdev = pci_get_drvdata(pdev);
6920 struct igb_adapter *adapter = netdev_priv(netdev);
6921
6922 netif_device_detach(netdev);
6923
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006924 if (state == pci_channel_io_perm_failure)
6925 return PCI_ERS_RESULT_DISCONNECT;
6926
Auke Kok9d5c8242008-01-24 02:22:38 -08006927 if (netif_running(netdev))
6928 igb_down(adapter);
6929 pci_disable_device(pdev);
6930
6931 /* Request a slot slot reset. */
6932 return PCI_ERS_RESULT_NEED_RESET;
6933}
6934
6935/**
6936 * igb_io_slot_reset - called after the pci bus has been reset.
6937 * @pdev: Pointer to PCI device
6938 *
6939 * Restart the card from scratch, as if from a cold-boot. Implementation
6940 * resembles the first-half of the igb_resume routine.
6941 */
6942static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6943{
6944 struct net_device *netdev = pci_get_drvdata(pdev);
6945 struct igb_adapter *adapter = netdev_priv(netdev);
6946 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006947 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006948 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006949
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006950 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006951 dev_err(&pdev->dev,
6952 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006953 result = PCI_ERS_RESULT_DISCONNECT;
6954 } else {
6955 pci_set_master(pdev);
6956 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006957 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006958
6959 pci_enable_wake(pdev, PCI_D3hot, 0);
6960 pci_enable_wake(pdev, PCI_D3cold, 0);
6961
6962 igb_reset(adapter);
6963 wr32(E1000_WUS, ~0);
6964 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006965 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006966
Jeff Kirsherea943d42008-12-11 20:34:19 -08006967 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6968 if (err) {
6969 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6970 "failed 0x%0x\n", err);
6971 /* non-fatal, continue */
6972 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006973
Alexander Duyck40a914f2008-11-27 00:24:37 -08006974 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006975}
6976
6977/**
6978 * igb_io_resume - called when traffic can start flowing again.
6979 * @pdev: Pointer to PCI device
6980 *
6981 * This callback is called when the error recovery driver tells us that
6982 * its OK to resume normal operation. Implementation resembles the
6983 * second-half of the igb_resume routine.
6984 */
6985static void igb_io_resume(struct pci_dev *pdev)
6986{
6987 struct net_device *netdev = pci_get_drvdata(pdev);
6988 struct igb_adapter *adapter = netdev_priv(netdev);
6989
Auke Kok9d5c8242008-01-24 02:22:38 -08006990 if (netif_running(netdev)) {
6991 if (igb_up(adapter)) {
6992 dev_err(&pdev->dev, "igb_up failed after reset\n");
6993 return;
6994 }
6995 }
6996
6997 netif_device_attach(netdev);
6998
6999 /* let the f/w know that the h/w is now under the control of the
7000 * driver. */
7001 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08007002}
7003
Alexander Duyck26ad9172009-10-05 06:32:49 +00007004static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7005 u8 qsel)
7006{
7007 u32 rar_low, rar_high;
7008 struct e1000_hw *hw = &adapter->hw;
7009
7010 /* HW expects these in little endian so we reverse the byte order
7011 * from network order (big endian) to little endian
7012 */
7013 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7014 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7015 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7016
7017 /* Indicate to hardware the Address is Valid. */
7018 rar_high |= E1000_RAH_AV;
7019
7020 if (hw->mac.type == e1000_82575)
7021 rar_high |= E1000_RAH_POOL_1 * qsel;
7022 else
7023 rar_high |= E1000_RAH_POOL_1 << qsel;
7024
7025 wr32(E1000_RAL(index), rar_low);
7026 wrfl();
7027 wr32(E1000_RAH(index), rar_high);
7028 wrfl();
7029}
7030
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007031static int igb_set_vf_mac(struct igb_adapter *adapter,
7032 int vf, unsigned char *mac_addr)
7033{
7034 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007035 /* VF MAC addresses start at end of receive addresses and moves
7036 * torwards the first, as a result a collision should not be possible */
7037 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007038
Alexander Duyck37680112009-02-19 20:40:30 -08007039 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007040
Alexander Duyck26ad9172009-10-05 06:32:49 +00007041 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007042
7043 return 0;
7044}
7045
Williams, Mitch A8151d292010-02-10 01:44:24 +00007046static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7047{
7048 struct igb_adapter *adapter = netdev_priv(netdev);
7049 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7050 return -EINVAL;
7051 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7052 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7053 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
7054 " change effective.");
7055 if (test_bit(__IGB_DOWN, &adapter->state)) {
7056 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
7057 " but the PF device is not up.\n");
7058 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
7059 " attempting to use the VF device.\n");
7060 }
7061 return igb_set_vf_mac(adapter, vf, mac);
7062}
7063
Lior Levy17dc5662011-02-08 02:28:46 +00007064static int igb_link_mbps(int internal_link_speed)
7065{
7066 switch (internal_link_speed) {
7067 case SPEED_100:
7068 return 100;
7069 case SPEED_1000:
7070 return 1000;
7071 default:
7072 return 0;
7073 }
7074}
7075
7076static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7077 int link_speed)
7078{
7079 int rf_dec, rf_int;
7080 u32 bcnrc_val;
7081
7082 if (tx_rate != 0) {
7083 /* Calculate the rate factor values to set */
7084 rf_int = link_speed / tx_rate;
7085 rf_dec = (link_speed - (rf_int * tx_rate));
7086 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
7087
7088 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7089 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7090 E1000_RTTBCNRC_RF_INT_MASK);
7091 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7092 } else {
7093 bcnrc_val = 0;
7094 }
7095
7096 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
Lior Levyf00b0da2011-06-04 06:05:03 +00007097 /*
7098 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7099 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7100 */
7101 wr32(E1000_RTTBCNRM, 0x14);
Lior Levy17dc5662011-02-08 02:28:46 +00007102 wr32(E1000_RTTBCNRC, bcnrc_val);
7103}
7104
7105static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7106{
7107 int actual_link_speed, i;
7108 bool reset_rate = false;
7109
7110 /* VF TX rate limit was not set or not supported */
7111 if ((adapter->vf_rate_link_speed == 0) ||
7112 (adapter->hw.mac.type != e1000_82576))
7113 return;
7114
7115 actual_link_speed = igb_link_mbps(adapter->link_speed);
7116 if (actual_link_speed != adapter->vf_rate_link_speed) {
7117 reset_rate = true;
7118 adapter->vf_rate_link_speed = 0;
7119 dev_info(&adapter->pdev->dev,
7120 "Link speed has been changed. VF Transmit "
7121 "rate is disabled\n");
7122 }
7123
7124 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7125 if (reset_rate)
7126 adapter->vf_data[i].tx_rate = 0;
7127
7128 igb_set_vf_rate_limit(&adapter->hw, i,
7129 adapter->vf_data[i].tx_rate,
7130 actual_link_speed);
7131 }
7132}
7133
Williams, Mitch A8151d292010-02-10 01:44:24 +00007134static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7135{
Lior Levy17dc5662011-02-08 02:28:46 +00007136 struct igb_adapter *adapter = netdev_priv(netdev);
7137 struct e1000_hw *hw = &adapter->hw;
7138 int actual_link_speed;
7139
7140 if (hw->mac.type != e1000_82576)
7141 return -EOPNOTSUPP;
7142
7143 actual_link_speed = igb_link_mbps(adapter->link_speed);
7144 if ((vf >= adapter->vfs_allocated_count) ||
7145 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7146 (tx_rate < 0) || (tx_rate > actual_link_speed))
7147 return -EINVAL;
7148
7149 adapter->vf_rate_link_speed = actual_link_speed;
7150 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7151 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7152
7153 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007154}
7155
7156static int igb_ndo_get_vf_config(struct net_device *netdev,
7157 int vf, struct ifla_vf_info *ivi)
7158{
7159 struct igb_adapter *adapter = netdev_priv(netdev);
7160 if (vf >= adapter->vfs_allocated_count)
7161 return -EINVAL;
7162 ivi->vf = vf;
7163 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007164 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007165 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7166 ivi->qos = adapter->vf_data[vf].pf_qos;
7167 return 0;
7168}
7169
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007170static void igb_vmm_control(struct igb_adapter *adapter)
7171{
7172 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007173 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007174
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007175 switch (hw->mac.type) {
7176 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00007177 case e1000_i210:
7178 case e1000_i211:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007179 default:
7180 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007181 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007182 case e1000_82576:
7183 /* notify HW that the MAC is adding vlan tags */
7184 reg = rd32(E1000_DTXCTL);
7185 reg |= E1000_DTXCTL_VLAN_ADDED;
7186 wr32(E1000_DTXCTL, reg);
7187 case e1000_82580:
7188 /* enable replication vlan tag stripping */
7189 reg = rd32(E1000_RPLOLR);
7190 reg |= E1000_RPLOLR_STRVLAN;
7191 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007192 case e1000_i350:
7193 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007194 break;
7195 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007196
Alexander Duyckd4960302009-10-27 15:53:45 +00007197 if (adapter->vfs_allocated_count) {
7198 igb_vmdq_set_loopback_pf(hw, true);
7199 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007200 igb_vmdq_set_anti_spoofing_pf(hw, true,
7201 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007202 } else {
7203 igb_vmdq_set_loopback_pf(hw, false);
7204 igb_vmdq_set_replication_pf(hw, false);
7205 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007206}
7207
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007208static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7209{
7210 struct e1000_hw *hw = &adapter->hw;
7211 u32 dmac_thr;
7212 u16 hwm;
7213
7214 if (hw->mac.type > e1000_82580) {
7215 if (adapter->flags & IGB_FLAG_DMAC) {
7216 u32 reg;
7217
7218 /* force threshold to 0. */
7219 wr32(E1000_DMCTXTH, 0);
7220
7221 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007222 * DMA Coalescing high water mark needs to be greater
7223 * than the Rx threshold. Set hwm to PBA - max frame
7224 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007225 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007226 hwm = 64 * pba - adapter->max_frame_size / 16;
7227 if (hwm < 64 * (pba - 6))
7228 hwm = 64 * (pba - 6);
7229 reg = rd32(E1000_FCRTC);
7230 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7231 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7232 & E1000_FCRTC_RTH_COAL_MASK);
7233 wr32(E1000_FCRTC, reg);
7234
7235 /*
7236 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7237 * frame size, capping it at PBA - 10KB.
7238 */
7239 dmac_thr = pba - adapter->max_frame_size / 512;
7240 if (dmac_thr < pba - 10)
7241 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007242 reg = rd32(E1000_DMACR);
7243 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007244 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7245 & E1000_DMACR_DMACTHR_MASK);
7246
7247 /* transition to L0x or L1 if available..*/
7248 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7249
7250 /* watchdog timer= +-1000 usec in 32usec intervals */
7251 reg |= (1000 >> 5);
Matthew Vick0c02dd92012-04-14 05:20:32 +00007252
7253 /* Disable BMC-to-OS Watchdog Enable */
7254 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007255 wr32(E1000_DMACR, reg);
7256
7257 /*
7258 * no lower threshold to disable
7259 * coalescing(smart fifb)-UTRESH=0
7260 */
7261 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007262
7263 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7264
7265 wr32(E1000_DMCTLX, reg);
7266
7267 /*
7268 * free space in tx packet buffer to wake from
7269 * DMA coal
7270 */
7271 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7272 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7273
7274 /*
7275 * make low power state decision controlled
7276 * by DMA coal
7277 */
7278 reg = rd32(E1000_PCIEMISC);
7279 reg &= ~E1000_PCIEMISC_LX_DECISION;
7280 wr32(E1000_PCIEMISC, reg);
7281 } /* endif adapter->dmac is not disabled */
7282 } else if (hw->mac.type == e1000_82580) {
7283 u32 reg = rd32(E1000_PCIEMISC);
7284 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7285 wr32(E1000_DMACR, 0);
7286 }
7287}
7288
Auke Kok9d5c8242008-01-24 02:22:38 -08007289/* igb_main.c */