blob: ba21f9c72a21deef9ca15e66e91e90aa3ebd99d3 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny6e861322012-01-18 22:13:27 +00004 Copyright(c) 2007-2012 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define MAJ 3
Carolyn Wybornybe0c0062012-04-09 23:13:02 +000063#define MIN 4
64#define BUILD 7
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny6e861322012-01-18 22:13:27 +000071static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -0700100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +0000102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +0000103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +0000104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000119static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000131static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000140static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000144static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700145#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000146static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700147static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700148#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700149static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000150static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800158static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800162static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000171static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000172
173#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
175static int igb_find_enabled_vfs(struct igb_adapter *adapter);
176static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000177#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800178
Auke Kok9d5c8242008-01-24 02:22:38 -0800179#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000180#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000181static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000182#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000183static int igb_resume(struct device *);
184#ifdef CONFIG_PM_RUNTIME
185static int igb_runtime_suspend(struct device *dev);
186static int igb_runtime_resume(struct device *dev);
187static int igb_runtime_idle(struct device *dev);
188#endif
189static const struct dev_pm_ops igb_pm_ops = {
190 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
191 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
192 igb_runtime_idle)
193};
Auke Kok9d5c8242008-01-24 02:22:38 -0800194#endif
195static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700196#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700197static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
198static struct notifier_block dca_notifier = {
199 .notifier_call = igb_notify_dca,
200 .next = NULL,
201 .priority = 0
202};
203#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800204#ifdef CONFIG_NET_POLL_CONTROLLER
205/* for netdump / net console */
206static void igb_netpoll(struct net_device *);
207#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800208#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000209static unsigned int max_vfs = 0;
210module_param(max_vfs, uint, 0);
211MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
212 "per physical function");
213#endif /* CONFIG_PCI_IOV */
214
Auke Kok9d5c8242008-01-24 02:22:38 -0800215static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
216 pci_channel_state_t);
217static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
218static void igb_io_resume(struct pci_dev *);
219
220static struct pci_error_handlers igb_err_handler = {
221 .error_detected = igb_io_error_detected,
222 .slot_reset = igb_io_slot_reset,
223 .resume = igb_io_resume,
224};
225
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000226static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800227
228static struct pci_driver igb_driver = {
229 .name = igb_driver_name,
230 .id_table = igb_pci_tbl,
231 .probe = igb_probe,
232 .remove = __devexit_p(igb_remove),
233#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000234 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800235#endif
236 .shutdown = igb_shutdown,
237 .err_handler = &igb_err_handler
238};
239
240MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242MODULE_LICENSE("GPL");
243MODULE_VERSION(DRV_VERSION);
244
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000245#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246static int debug = -1;
247module_param(debug, int, 0);
248MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
Taku Izumic97ec422010-04-27 14:39:30 +0000250struct igb_reg_info {
251 u32 ofs;
252 char *name;
253};
254
255static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257 /* General Registers */
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262 /* Interrupt Registers */
263 {E1000_ICR, "ICR"},
264
265 /* RX Registers */
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274 /* TX Registers */
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287 /* List Terminator */
288 {}
289};
290
291/*
292 * igb_regdump - register printout routine
293 */
294static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
295{
296 int n = 0;
297 char rname[16];
298 u32 regs[8];
299
300 switch (reginfo->ofs) {
301 case E1000_RDLEN(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDLEN(n));
304 break;
305 case E1000_RDH(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RDH(n));
308 break;
309 case E1000_RDT(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDT(n));
312 break;
313 case E1000_RXDCTL(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RXDCTL(n));
316 break;
317 case E1000_RDBAL(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_RDBAL(n));
320 break;
321 case E1000_RDBAH(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RDBAH(n));
324 break;
325 case E1000_TDBAL(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDBAL(n));
328 break;
329 case E1000_TDBAH(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_TDBAH(n));
332 break;
333 case E1000_TDLEN(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_TDLEN(n));
336 break;
337 case E1000_TDH(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TDH(n));
340 break;
341 case E1000_TDT(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_TDT(n));
344 break;
345 case E1000_TXDCTL(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_TXDCTL(n));
348 break;
349 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000350 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000351 return;
352 }
353
354 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000355 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
356 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000357}
358
359/*
360 * igb_dump - Print registers, tx-rings and rx-rings
361 */
362static void igb_dump(struct igb_adapter *adapter)
363{
364 struct net_device *netdev = adapter->netdev;
365 struct e1000_hw *hw = &adapter->hw;
366 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000367 struct igb_ring *tx_ring;
368 union e1000_adv_tx_desc *tx_desc;
369 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000370 struct igb_ring *rx_ring;
371 union e1000_adv_rx_desc *rx_desc;
372 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000373 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000374
375 if (!netif_msg_hw(adapter))
376 return;
377
378 /* Print netdevice Info */
379 if (netdev) {
380 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000381 pr_info("Device Name state trans_start "
382 "last_rx\n");
383 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
384 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000385 }
386
387 /* Print Registers */
388 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000389 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000390 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
391 reginfo->name; reginfo++) {
392 igb_regdump(hw, reginfo);
393 }
394
395 /* Print TX Ring Summary */
396 if (!netdev || !netif_running(netdev))
397 goto exit;
398
399 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000400 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000401 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000402 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000403 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000404 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000405 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 (u64)buffer_info->dma,
408 buffer_info->length,
409 buffer_info->next_to_watch,
410 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000411 }
412
413 /* Print TX Rings */
414 if (!netif_msg_tx_done(adapter))
415 goto rx_ring_summary;
416
417 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
418
419 /* Transmit Descriptor Formats
420 *
421 * Advanced Transmit Descriptor
422 * +--------------------------------------------------------------+
423 * 0 | Buffer Address [63:0] |
424 * +--------------------------------------------------------------+
425 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
426 * +--------------------------------------------------------------+
427 * 63 46 45 40 39 38 36 35 32 31 24 15 0
428 */
429
430 for (n = 0; n < adapter->num_tx_queues; n++) {
431 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000432 pr_info("------------------------------------\n");
433 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
434 pr_info("------------------------------------\n");
435 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
436 "[bi->dma ] leng ntw timestamp "
437 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000438
439 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000440 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000441 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000442 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000443 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000444 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000445 if (i == tx_ring->next_to_use &&
446 i == tx_ring->next_to_clean)
447 next_desc = " NTC/U";
448 else if (i == tx_ring->next_to_use)
449 next_desc = " NTU";
450 else if (i == tx_ring->next_to_clean)
451 next_desc = " NTC";
452 else
453 next_desc = "";
454
455 pr_info("T [0x%03X] %016llX %016llX %016llX"
456 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000457 le64_to_cpu(u0->a),
458 le64_to_cpu(u0->b),
459 (u64)buffer_info->dma,
460 buffer_info->length,
461 buffer_info->next_to_watch,
462 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000463 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000464
465 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
466 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS,
468 16, 1, phys_to_virt(buffer_info->dma),
469 buffer_info->length, true);
470 }
471 }
472
473 /* Print RX Rings Summary */
474rx_ring_summary:
475 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000476 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000477 for (n = 0; n < adapter->num_rx_queues; n++) {
478 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000479 pr_info(" %5d %5X %5X\n",
480 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000481 }
482
483 /* Print RX Rings */
484 if (!netif_msg_rx_status(adapter))
485 goto exit;
486
487 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
488
489 /* Advanced Receive Descriptor (Read) Format
490 * 63 1 0
491 * +-----------------------------------------------------+
492 * 0 | Packet Buffer Address [63:1] |A0/NSE|
493 * +----------------------------------------------+------+
494 * 8 | Header Buffer Address [63:1] | DD |
495 * +-----------------------------------------------------+
496 *
497 *
498 * Advanced Receive Descriptor (Write-Back) Format
499 *
500 * 63 48 47 32 31 30 21 20 17 16 4 3 0
501 * +------------------------------------------------------+
502 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
503 * | Checksum Ident | | | | Type | Type |
504 * +------------------------------------------------------+
505 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
506 * +------------------------------------------------------+
507 * 63 48 47 32 31 20 19 0
508 */
509
510 for (n = 0; n < adapter->num_rx_queues; n++) {
511 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000512 pr_info("------------------------------------\n");
513 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
514 pr_info("------------------------------------\n");
515 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
516 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
517 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
518 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000519
520 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000521 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000522 struct igb_rx_buffer *buffer_info;
523 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000524 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000525 u0 = (struct my_u0 *)rx_desc;
526 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000527
528 if (i == rx_ring->next_to_use)
529 next_desc = " NTU";
530 else if (i == rx_ring->next_to_clean)
531 next_desc = " NTC";
532 else
533 next_desc = "";
534
Taku Izumic97ec422010-04-27 14:39:30 +0000535 if (staterr & E1000_RXD_STAT_DD) {
536 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000537 pr_info("%s[0x%03X] %016llX %016llX -------"
538 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000539 le64_to_cpu(u0->a),
540 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000541 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000542 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000543 pr_info("%s[0x%03X] %016llX %016llX %016llX"
544 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000545 le64_to_cpu(u0->a),
546 le64_to_cpu(u0->b),
547 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000548 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000549
550 if (netif_msg_pktdata(adapter)) {
551 print_hex_dump(KERN_INFO, "",
552 DUMP_PREFIX_ADDRESS,
553 16, 1,
554 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000555 IGB_RX_HDR_LEN, true);
556 print_hex_dump(KERN_INFO, "",
557 DUMP_PREFIX_ADDRESS,
558 16, 1,
559 phys_to_virt(
560 buffer_info->page_dma +
561 buffer_info->page_offset),
562 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000563 }
564 }
Taku Izumic97ec422010-04-27 14:39:30 +0000565 }
566 }
567
568exit:
569 return;
570}
571
Auke Kok9d5c8242008-01-24 02:22:38 -0800572/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000573 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800574 * used by hardware layer to print debugging information
575 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000576struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800577{
578 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000579 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800580}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000581
582/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800583 * igb_init_module - Driver Registration Routine
584 *
585 * igb_init_module is the first routine called when the driver is
586 * loaded. All it does is register with the PCI subsystem.
587 **/
588static int __init igb_init_module(void)
589{
590 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000591 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800592 igb_driver_string, igb_driver_version);
593
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000594 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800595
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700596#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700597 dca_register_notify(&dca_notifier);
598#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800599 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800600 return ret;
601}
602
603module_init(igb_init_module);
604
605/**
606 * igb_exit_module - Driver Exit Cleanup Routine
607 *
608 * igb_exit_module is called just before the driver is removed
609 * from memory.
610 **/
611static void __exit igb_exit_module(void)
612{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700613#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700614 dca_unregister_notify(&dca_notifier);
615#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800616 pci_unregister_driver(&igb_driver);
617}
618
619module_exit(igb_exit_module);
620
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800621#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
622/**
623 * igb_cache_ring_register - Descriptor ring to register mapping
624 * @adapter: board private structure to initialize
625 *
626 * Once we know the feature-set enabled for the device, we'll cache
627 * the register offset the descriptor ring is assigned to.
628 **/
629static void igb_cache_ring_register(struct igb_adapter *adapter)
630{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000631 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000632 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800633
634 switch (adapter->hw.mac.type) {
635 case e1000_82576:
636 /* The queues are allocated for virtualization such that VF 0
637 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
638 * In order to avoid collision we start at the first free queue
639 * and continue consuming queues in the same sequence
640 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000641 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000642 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000643 adapter->rx_ring[i]->reg_idx = rbase_offset +
644 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800646 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000647 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000648 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000649 case e1000_i210:
650 case e1000_i211:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800651 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 break;
657 }
658}
659
Alexander Duyck047e0032009-10-27 15:49:27 +0000660static void igb_free_queues(struct igb_adapter *adapter)
661{
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000663
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 for (i = 0; i < adapter->num_tx_queues; i++) {
665 kfree(adapter->tx_ring[i]);
666 adapter->tx_ring[i] = NULL;
667 }
668 for (i = 0; i < adapter->num_rx_queues; i++) {
669 kfree(adapter->rx_ring[i]);
670 adapter->rx_ring[i] = NULL;
671 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000672 adapter->num_rx_queues = 0;
673 adapter->num_tx_queues = 0;
674}
675
Auke Kok9d5c8242008-01-24 02:22:38 -0800676/**
677 * igb_alloc_queues - Allocate memory for all rings
678 * @adapter: board private structure to initialize
679 *
680 * We allocate one ring per queue at run-time since we don't know the
681 * number of queues at compile-time.
682 **/
683static int igb_alloc_queues(struct igb_adapter *adapter)
684{
Alexander Duyck3025a442010-02-17 01:02:39 +0000685 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800686 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000687 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800688
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700689 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000690 if (orig_node == -1) {
691 int cur_node = next_online_node(adapter->node);
692 if (cur_node == MAX_NUMNODES)
693 cur_node = first_online_node;
694 adapter->node = cur_node;
695 }
696 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
697 adapter->node);
698 if (!ring)
699 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000700 if (!ring)
701 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800702 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700703 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000704 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000705 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000706 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000707 /* For 82575, context index must be unique per ring. */
708 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000709 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000710 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700711 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000712 /* Restore the adapter's original node */
713 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000714
Auke Kok9d5c8242008-01-24 02:22:38 -0800715 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000716 if (orig_node == -1) {
717 int cur_node = next_online_node(adapter->node);
718 if (cur_node == MAX_NUMNODES)
719 cur_node = first_online_node;
720 adapter->node = cur_node;
721 }
722 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
723 adapter->node);
724 if (!ring)
725 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000726 if (!ring)
727 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800728 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700729 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000730 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000731 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000732 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000733 /* set flag indicating ring supports SCTP checksum offload */
734 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000735 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000736
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000737 /*
738 * On i350, i210, and i211, loopback VLAN packets
739 * have the tag byte-swapped.
740 * */
741 if (adapter->hw.mac.type >= e1000_i350)
Alexander Duyck8be10e92011-08-26 07:47:11 +0000742 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
743
Alexander Duyck3025a442010-02-17 01:02:39 +0000744 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800745 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000746 /* Restore the adapter's original node */
747 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800748
749 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000750
Auke Kok9d5c8242008-01-24 02:22:38 -0800751 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800752
Alexander Duyck047e0032009-10-27 15:49:27 +0000753err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000754 /* Restore the adapter's original node */
755 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000756 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700757
Alexander Duyck047e0032009-10-27 15:49:27 +0000758 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700759}
760
Alexander Duyck4be000c2011-08-26 07:45:52 +0000761/**
762 * igb_write_ivar - configure ivar for given MSI-X vector
763 * @hw: pointer to the HW structure
764 * @msix_vector: vector number we are allocating to a given ring
765 * @index: row index of IVAR register to write within IVAR table
766 * @offset: column offset of in IVAR, should be multiple of 8
767 *
768 * This function is intended to handle the writing of the IVAR register
769 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
770 * each containing an cause allocation for an Rx and Tx ring, and a
771 * variable number of rows depending on the number of queues supported.
772 **/
773static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
774 int index, int offset)
775{
776 u32 ivar = array_rd32(E1000_IVAR0, index);
777
778 /* clear any bits that are currently set */
779 ivar &= ~((u32)0xFF << offset);
780
781 /* write vector and valid bit */
782 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
783
784 array_wr32(E1000_IVAR0, index, ivar);
785}
786
Auke Kok9d5c8242008-01-24 02:22:38 -0800787#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000788static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800789{
Alexander Duyck047e0032009-10-27 15:49:27 +0000790 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800791 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000792 int rx_queue = IGB_N0_QUEUE;
793 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000794 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000795
Alexander Duyck0ba82992011-08-26 07:45:47 +0000796 if (q_vector->rx.ring)
797 rx_queue = q_vector->rx.ring->reg_idx;
798 if (q_vector->tx.ring)
799 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700800
801 switch (hw->mac.type) {
802 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800803 /* The 82575 assigns vectors using a bitmask, which matches the
804 bitmask for the EICR/EIMS/EIMC registers. To assign one
805 or more queues to a vector, we write the appropriate bits
806 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000807 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800808 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000809 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800810 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000811 if (!adapter->msix_entries && msix_vector == 0)
812 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800813 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000814 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700815 break;
816 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000817 /*
818 * 82576 uses a table that essentially consists of 2 columns
819 * with 8 rows. The ordering is column-major so we use the
820 * lower 3 bits as the row index, and the 4th bit as the
821 * column offset.
822 */
823 if (rx_queue > IGB_N0_QUEUE)
824 igb_write_ivar(hw, msix_vector,
825 rx_queue & 0x7,
826 (rx_queue & 0x8) << 1);
827 if (tx_queue > IGB_N0_QUEUE)
828 igb_write_ivar(hw, msix_vector,
829 tx_queue & 0x7,
830 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000831 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700832 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000833 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000834 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000835 case e1000_i210:
836 case e1000_i211:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000837 /*
838 * On 82580 and newer adapters the scheme is similar to 82576
839 * however instead of ordering column-major we have things
840 * ordered row-major. So we traverse the table by using
841 * bit 0 as the column offset, and the remaining bits as the
842 * row index.
843 */
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000852 q_vector->eims_value = 1 << msix_vector;
853 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700854 default:
855 BUG();
856 break;
857 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000858
859 /* add q_vector eims value to global eims_enable_mask */
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862 /* configure q_vector to set itr on first interrupt */
863 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800864}
865
866/**
867 * igb_configure_msix - Configure MSI-X hardware
868 *
869 * igb_configure_msix sets up the hardware to properly
870 * generate MSI-X interrupts.
871 **/
872static void igb_configure_msix(struct igb_adapter *adapter)
873{
874 u32 tmp;
875 int i, vector = 0;
876 struct e1000_hw *hw = &adapter->hw;
877
878 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800879
880 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700881 switch (hw->mac.type) {
882 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800883 tmp = rd32(E1000_CTRL_EXT);
884 /* enable MSI-X PBA support*/
885 tmp |= E1000_CTRL_EXT_PBA_CLR;
886
887 /* Auto-Mask interrupts upon ICR read. */
888 tmp |= E1000_CTRL_EXT_EIAME;
889 tmp |= E1000_CTRL_EXT_IRCA;
890
891 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000892
893 /* enable msix_other interrupt */
894 array_wr32(E1000_MSIXBM(0), vector++,
895 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700896 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800897
Alexander Duyck2d064c02008-07-08 15:10:12 -0700898 break;
899
900 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000901 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000902 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +0000903 case e1000_i210:
904 case e1000_i211:
Alexander Duyck047e0032009-10-27 15:49:27 +0000905 /* Turn on MSI-X capability first, or our settings
906 * won't stick. And it will take days to debug. */
907 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
908 E1000_GPIE_PBA | E1000_GPIE_EIAME |
909 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700910
Alexander Duyck047e0032009-10-27 15:49:27 +0000911 /* enable msix_other interrupt */
912 adapter->eims_other = 1 << vector;
913 tmp = (vector++ | E1000_IVAR_VALID) << 8;
914
915 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700916 break;
917 default:
918 /* do nothing, since nothing else supports MSI-X */
919 break;
920 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000921
922 adapter->eims_enable_mask |= adapter->eims_other;
923
Alexander Duyck26b39272010-02-17 01:00:41 +0000924 for (i = 0; i < adapter->num_q_vectors; i++)
925 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000926
Auke Kok9d5c8242008-01-24 02:22:38 -0800927 wrfl();
928}
929
930/**
931 * igb_request_msix - Initialize MSI-X interrupts
932 *
933 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
934 * kernel.
935 **/
936static int igb_request_msix(struct igb_adapter *adapter)
937{
938 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 int i, err = 0, vector = 0;
941
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800943 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 if (err)
945 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 vector++;
947
948 for (i = 0; i < adapter->num_q_vectors; i++) {
949 struct igb_q_vector *q_vector = adapter->q_vector[i];
950
951 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
952
Alexander Duyck0ba82992011-08-26 07:45:47 +0000953 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000954 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000955 q_vector->rx.ring->queue_index);
956 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000958 q_vector->tx.ring->queue_index);
959 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000960 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000961 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000962 else
963 sprintf(q_vector->name, "%s-unused", netdev->name);
964
965 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800966 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 q_vector);
968 if (err)
969 goto out;
970 vector++;
971 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800972
Auke Kok9d5c8242008-01-24 02:22:38 -0800973 igb_configure_msix(adapter);
974 return 0;
975out:
976 return err;
977}
978
979static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
980{
981 if (adapter->msix_entries) {
982 pci_disable_msix(adapter->pdev);
983 kfree(adapter->msix_entries);
984 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000985 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800986 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000987 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800988}
989
Alexander Duyck047e0032009-10-27 15:49:27 +0000990/**
991 * igb_free_q_vectors - Free memory allocated for interrupt vectors
992 * @adapter: board private structure to initialize
993 *
994 * This function frees the memory allocated to the q_vectors. In addition if
995 * NAPI is enabled it will delete any references to the NAPI struct prior
996 * to freeing the q_vector.
997 **/
998static void igb_free_q_vectors(struct igb_adapter *adapter)
999{
1000 int v_idx;
1001
1002 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1003 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1004 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001005 if (!q_vector)
1006 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001007 netif_napi_del(&q_vector->napi);
1008 kfree(q_vector);
1009 }
1010 adapter->num_q_vectors = 0;
1011}
1012
1013/**
1014 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1015 *
1016 * This function resets the device so that it has 0 rx queues, tx queues, and
1017 * MSI-X interrupts allocated.
1018 */
1019static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1020{
1021 igb_free_queues(adapter);
1022 igb_free_q_vectors(adapter);
1023 igb_reset_interrupt_capability(adapter);
1024}
Auke Kok9d5c8242008-01-24 02:22:38 -08001025
1026/**
1027 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1028 *
1029 * Attempt to configure interrupts using the best available
1030 * capabilities of the hardware and kernel.
1031 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001032static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001033{
1034 int err;
1035 int numvecs, i;
1036
Alexander Duyck83b71802009-02-06 23:15:45 +00001037 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001038 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001039 if (adapter->vfs_allocated_count)
1040 adapter->num_tx_queues = 1;
1041 else
1042 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001043
Alexander Duyck047e0032009-10-27 15:49:27 +00001044 /* start with one vector for every rx queue */
1045 numvecs = adapter->num_rx_queues;
1046
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001047 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1049 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001050
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001051 /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
1052 if ((adapter->hw.mac.type == e1000_i210)
1053 || (adapter->hw.mac.type == e1000_i211))
1054 numvecs = 4;
1055
Alexander Duyck047e0032009-10-27 15:49:27 +00001056 /* store the number of vectors reserved for queues */
1057 adapter->num_q_vectors = numvecs;
1058
1059 /* add 1 vector for link status interrupts */
1060 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001061 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1062 GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001063
Auke Kok9d5c8242008-01-24 02:22:38 -08001064 if (!adapter->msix_entries)
1065 goto msi_only;
1066
1067 for (i = 0; i < numvecs; i++)
1068 adapter->msix_entries[i].entry = i;
1069
1070 err = pci_enable_msix(adapter->pdev,
1071 adapter->msix_entries,
1072 numvecs);
1073 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001074 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001075
1076 igb_reset_interrupt_capability(adapter);
1077
1078 /* If we can't do MSI-X, try MSI */
1079msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001080#ifdef CONFIG_PCI_IOV
1081 /* disable SR-IOV for non MSI-X configurations */
1082 if (adapter->vf_data) {
1083 struct e1000_hw *hw = &adapter->hw;
1084 /* disable iov and allow time for transactions to clear */
1085 pci_disable_sriov(adapter->pdev);
1086 msleep(500);
1087
1088 kfree(adapter->vf_data);
1089 adapter->vf_data = NULL;
1090 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001091 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001092 msleep(100);
1093 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1094 }
1095#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001096 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001097 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001098 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001099 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001100 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001101 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001102 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001103 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001104out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001105 /* Notify the stack of the (possibly) reduced queue counts. */
1106 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1107 return netif_set_real_num_rx_queues(adapter->netdev,
1108 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001109}
1110
1111/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001112 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1113 * @adapter: board private structure to initialize
1114 *
1115 * We allocate one q_vector per queue interrupt. If allocation fails we
1116 * return -ENOMEM.
1117 **/
1118static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1119{
1120 struct igb_q_vector *q_vector;
1121 struct e1000_hw *hw = &adapter->hw;
1122 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001123 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001124
1125 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001126 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1127 adapter->num_tx_queues)) &&
1128 (adapter->num_rx_queues == v_idx))
1129 adapter->node = orig_node;
1130 if (orig_node == -1) {
1131 int cur_node = next_online_node(adapter->node);
1132 if (cur_node == MAX_NUMNODES)
1133 cur_node = first_online_node;
1134 adapter->node = cur_node;
1135 }
1136 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1137 adapter->node);
1138 if (!q_vector)
1139 q_vector = kzalloc(sizeof(struct igb_q_vector),
1140 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001141 if (!q_vector)
1142 goto err_out;
1143 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001144 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1145 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001146 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1147 adapter->q_vector[v_idx] = q_vector;
1148 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001149 /* Restore the adapter's original node */
1150 adapter->node = orig_node;
1151
Alexander Duyck047e0032009-10-27 15:49:27 +00001152 return 0;
1153
1154err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001155 /* Restore the adapter's original node */
1156 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001157 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001158 return -ENOMEM;
1159}
1160
1161static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1162 int ring_idx, int v_idx)
1163{
Alexander Duyck3025a442010-02-17 01:02:39 +00001164 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001165
Alexander Duyck0ba82992011-08-26 07:45:47 +00001166 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1167 q_vector->rx.ring->q_vector = q_vector;
1168 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001169 q_vector->itr_val = adapter->rx_itr_setting;
1170 if (q_vector->itr_val && q_vector->itr_val <= 3)
1171 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001172}
1173
1174static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1175 int ring_idx, int v_idx)
1176{
Alexander Duyck3025a442010-02-17 01:02:39 +00001177 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001178
Alexander Duyck0ba82992011-08-26 07:45:47 +00001179 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1180 q_vector->tx.ring->q_vector = q_vector;
1181 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001182 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001183 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001184 if (q_vector->itr_val && q_vector->itr_val <= 3)
1185 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001186}
1187
1188/**
1189 * igb_map_ring_to_vector - maps allocated queues to vectors
1190 *
1191 * This function maps the recently allocated queues to vectors.
1192 **/
1193static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1194{
1195 int i;
1196 int v_idx = 0;
1197
1198 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1199 (adapter->num_q_vectors < adapter->num_tx_queues))
1200 return -ENOMEM;
1201
1202 if (adapter->num_q_vectors >=
1203 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1204 for (i = 0; i < adapter->num_rx_queues; i++)
1205 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1206 for (i = 0; i < adapter->num_tx_queues; i++)
1207 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1208 } else {
1209 for (i = 0; i < adapter->num_rx_queues; i++) {
1210 if (i < adapter->num_tx_queues)
1211 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1212 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1213 }
1214 for (; i < adapter->num_tx_queues; i++)
1215 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1216 }
1217 return 0;
1218}
1219
1220/**
1221 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1222 *
1223 * This function initializes the interrupts and allocates all of the queues.
1224 **/
1225static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1226{
1227 struct pci_dev *pdev = adapter->pdev;
1228 int err;
1229
Ben Hutchings21adef32010-09-27 08:28:39 +00001230 err = igb_set_interrupt_capability(adapter);
1231 if (err)
1232 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001233
1234 err = igb_alloc_q_vectors(adapter);
1235 if (err) {
1236 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1237 goto err_alloc_q_vectors;
1238 }
1239
1240 err = igb_alloc_queues(adapter);
1241 if (err) {
1242 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1243 goto err_alloc_queues;
1244 }
1245
1246 err = igb_map_ring_to_vector(adapter);
1247 if (err) {
1248 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1249 goto err_map_queues;
1250 }
1251
1252
1253 return 0;
1254err_map_queues:
1255 igb_free_queues(adapter);
1256err_alloc_queues:
1257 igb_free_q_vectors(adapter);
1258err_alloc_q_vectors:
1259 igb_reset_interrupt_capability(adapter);
1260 return err;
1261}
1262
1263/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001264 * igb_request_irq - initialize interrupts
1265 *
1266 * Attempts to configure interrupts using the best available
1267 * capabilities of the hardware and kernel.
1268 **/
1269static int igb_request_irq(struct igb_adapter *adapter)
1270{
1271 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001272 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001273 int err = 0;
1274
1275 if (adapter->msix_entries) {
1276 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001277 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001279 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001280 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001281 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001282 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001283 igb_free_all_tx_resources(adapter);
1284 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001285 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001286 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001287 adapter->num_q_vectors = 1;
1288 err = igb_alloc_q_vectors(adapter);
1289 if (err) {
1290 dev_err(&pdev->dev,
1291 "Unable to allocate memory for vectors\n");
1292 goto request_done;
1293 }
1294 err = igb_alloc_queues(adapter);
1295 if (err) {
1296 dev_err(&pdev->dev,
1297 "Unable to allocate memory for queues\n");
1298 igb_free_q_vectors(adapter);
1299 goto request_done;
1300 }
1301 igb_setup_all_tx_resources(adapter);
1302 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001303 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001304
Alexander Duyckc74d5882011-08-26 07:46:45 +00001305 igb_assign_vector(adapter->q_vector[0], 0);
1306
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001307 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001308 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001309 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001310 if (!err)
1311 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001312
Auke Kok9d5c8242008-01-24 02:22:38 -08001313 /* fall back to legacy interrupts */
1314 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001315 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001316 }
1317
Alexander Duyckc74d5882011-08-26 07:46:45 +00001318 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001319 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001320
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001321 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001322 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001323 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001324
1325request_done:
1326 return err;
1327}
1328
1329static void igb_free_irq(struct igb_adapter *adapter)
1330{
Auke Kok9d5c8242008-01-24 02:22:38 -08001331 if (adapter->msix_entries) {
1332 int vector = 0, i;
1333
Alexander Duyck047e0032009-10-27 15:49:27 +00001334 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001335
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001336 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001337 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001338 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001339 } else {
1340 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001341 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001342}
1343
1344/**
1345 * igb_irq_disable - Mask off interrupt generation on the NIC
1346 * @adapter: board private structure
1347 **/
1348static void igb_irq_disable(struct igb_adapter *adapter)
1349{
1350 struct e1000_hw *hw = &adapter->hw;
1351
Alexander Duyck25568a52009-10-27 23:49:59 +00001352 /*
1353 * we need to be careful when disabling interrupts. The VFs are also
1354 * mapped into these registers and so clearing the bits can cause
1355 * issues on the VF drivers so we only need to clear what we set
1356 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001357 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001358 u32 regval = rd32(E1000_EIAM);
1359 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1360 wr32(E1000_EIMC, adapter->eims_enable_mask);
1361 regval = rd32(E1000_EIAC);
1362 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001363 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001364
1365 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001366 wr32(E1000_IMC, ~0);
1367 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001368 if (adapter->msix_entries) {
1369 int i;
1370 for (i = 0; i < adapter->num_q_vectors; i++)
1371 synchronize_irq(adapter->msix_entries[i].vector);
1372 } else {
1373 synchronize_irq(adapter->pdev->irq);
1374 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001375}
1376
1377/**
1378 * igb_irq_enable - Enable default interrupt generation settings
1379 * @adapter: board private structure
1380 **/
1381static void igb_irq_enable(struct igb_adapter *adapter)
1382{
1383 struct e1000_hw *hw = &adapter->hw;
1384
1385 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001386 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001387 u32 regval = rd32(E1000_EIAC);
1388 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1389 regval = rd32(E1000_EIAM);
1390 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001391 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001392 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001393 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001394 ims |= E1000_IMS_VMMB;
1395 }
1396 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001397 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001398 wr32(E1000_IMS, IMS_ENABLE_MASK |
1399 E1000_IMS_DRSTA);
1400 wr32(E1000_IAM, IMS_ENABLE_MASK |
1401 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001402 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001403}
1404
1405static void igb_update_mng_vlan(struct igb_adapter *adapter)
1406{
Alexander Duyck51466232009-10-27 23:47:35 +00001407 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001408 u16 vid = adapter->hw.mng_cookie.vlan_id;
1409 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001410
Alexander Duyck51466232009-10-27 23:47:35 +00001411 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1412 /* add VID to filter table */
1413 igb_vfta_set(hw, vid, true);
1414 adapter->mng_vlan_id = vid;
1415 } else {
1416 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1417 }
1418
1419 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1420 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001421 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001422 /* remove VID from filter table */
1423 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001424 }
1425}
1426
1427/**
1428 * igb_release_hw_control - release control of the h/w to f/w
1429 * @adapter: address of board private structure
1430 *
1431 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1432 * For ASF and Pass Through versions of f/w this means that the
1433 * driver is no longer loaded.
1434 *
1435 **/
1436static void igb_release_hw_control(struct igb_adapter *adapter)
1437{
1438 struct e1000_hw *hw = &adapter->hw;
1439 u32 ctrl_ext;
1440
1441 /* Let firmware take over control of h/w */
1442 ctrl_ext = rd32(E1000_CTRL_EXT);
1443 wr32(E1000_CTRL_EXT,
1444 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1445}
1446
Auke Kok9d5c8242008-01-24 02:22:38 -08001447/**
1448 * igb_get_hw_control - get control of the h/w from f/w
1449 * @adapter: address of board private structure
1450 *
1451 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1452 * For ASF and Pass Through versions of f/w this means that
1453 * the driver is loaded.
1454 *
1455 **/
1456static void igb_get_hw_control(struct igb_adapter *adapter)
1457{
1458 struct e1000_hw *hw = &adapter->hw;
1459 u32 ctrl_ext;
1460
1461 /* Let firmware know the driver has taken over */
1462 ctrl_ext = rd32(E1000_CTRL_EXT);
1463 wr32(E1000_CTRL_EXT,
1464 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1465}
1466
Auke Kok9d5c8242008-01-24 02:22:38 -08001467/**
1468 * igb_configure - configure the hardware for RX and TX
1469 * @adapter: private board structure
1470 **/
1471static void igb_configure(struct igb_adapter *adapter)
1472{
1473 struct net_device *netdev = adapter->netdev;
1474 int i;
1475
1476 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001477 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001478
1479 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001480
Alexander Duyck85b430b2009-10-27 15:50:29 +00001481 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001482 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001483 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001484
1485 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001486 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001487
1488 igb_rx_fifo_flush_82575(&adapter->hw);
1489
Alexander Duyckc493ea42009-03-20 00:16:50 +00001490 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001491 * at least 1 descriptor unused to make sure
1492 * next_to_use != next_to_clean */
1493 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001494 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001495 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001496 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001497}
1498
Nick Nunley88a268c2010-02-17 01:01:59 +00001499/**
1500 * igb_power_up_link - Power up the phy/serdes link
1501 * @adapter: address of board private structure
1502 **/
1503void igb_power_up_link(struct igb_adapter *adapter)
1504{
1505 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1506 igb_power_up_phy_copper(&adapter->hw);
1507 else
1508 igb_power_up_serdes_link_82575(&adapter->hw);
Koki Sanagia95a0742012-01-04 20:23:38 +00001509 igb_reset_phy(&adapter->hw);
Nick Nunley88a268c2010-02-17 01:01:59 +00001510}
1511
1512/**
1513 * igb_power_down_link - Power down the phy/serdes link
1514 * @adapter: address of board private structure
1515 */
1516static void igb_power_down_link(struct igb_adapter *adapter)
1517{
1518 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1519 igb_power_down_phy_copper_82575(&adapter->hw);
1520 else
1521 igb_shutdown_serdes_link_82575(&adapter->hw);
1522}
Auke Kok9d5c8242008-01-24 02:22:38 -08001523
1524/**
1525 * igb_up - Open the interface and prepare it to handle traffic
1526 * @adapter: board private structure
1527 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001528int igb_up(struct igb_adapter *adapter)
1529{
1530 struct e1000_hw *hw = &adapter->hw;
1531 int i;
1532
1533 /* hardware has been reset, we need to reload some things */
1534 igb_configure(adapter);
1535
1536 clear_bit(__IGB_DOWN, &adapter->state);
1537
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001538 for (i = 0; i < adapter->num_q_vectors; i++)
1539 napi_enable(&(adapter->q_vector[i]->napi));
1540
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001541 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001542 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001543 else
1544 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001545
1546 /* Clear any pending interrupts. */
1547 rd32(E1000_ICR);
1548 igb_irq_enable(adapter);
1549
Alexander Duyckd4960302009-10-27 15:53:45 +00001550 /* notify VFs that reset has been completed */
1551 if (adapter->vfs_allocated_count) {
1552 u32 reg_data = rd32(E1000_CTRL_EXT);
1553 reg_data |= E1000_CTRL_EXT_PFRSTD;
1554 wr32(E1000_CTRL_EXT, reg_data);
1555 }
1556
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001557 netif_tx_start_all_queues(adapter->netdev);
1558
Alexander Duyck25568a52009-10-27 23:49:59 +00001559 /* start the watchdog. */
1560 hw->mac.get_link_status = 1;
1561 schedule_work(&adapter->watchdog_task);
1562
Auke Kok9d5c8242008-01-24 02:22:38 -08001563 return 0;
1564}
1565
1566void igb_down(struct igb_adapter *adapter)
1567{
Auke Kok9d5c8242008-01-24 02:22:38 -08001568 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001569 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001570 u32 tctl, rctl;
1571 int i;
1572
1573 /* signal that we're down so the interrupt handler does not
1574 * reschedule our watchdog timer */
1575 set_bit(__IGB_DOWN, &adapter->state);
1576
1577 /* disable receives in the hardware */
1578 rctl = rd32(E1000_RCTL);
1579 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1580 /* flush and sleep below */
1581
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001582 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001583
1584 /* disable transmits in the hardware */
1585 tctl = rd32(E1000_TCTL);
1586 tctl &= ~E1000_TCTL_EN;
1587 wr32(E1000_TCTL, tctl);
1588 /* flush both disables and wait for them to finish */
1589 wrfl();
1590 msleep(10);
1591
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001592 for (i = 0; i < adapter->num_q_vectors; i++)
1593 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001594
Auke Kok9d5c8242008-01-24 02:22:38 -08001595 igb_irq_disable(adapter);
1596
1597 del_timer_sync(&adapter->watchdog_timer);
1598 del_timer_sync(&adapter->phy_info_timer);
1599
Auke Kok9d5c8242008-01-24 02:22:38 -08001600 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001601
1602 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001603 spin_lock(&adapter->stats64_lock);
1604 igb_update_stats(adapter, &adapter->stats64);
1605 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001606
Auke Kok9d5c8242008-01-24 02:22:38 -08001607 adapter->link_speed = 0;
1608 adapter->link_duplex = 0;
1609
Jeff Kirsher30236822008-06-24 17:01:15 -07001610 if (!pci_channel_offline(adapter->pdev))
1611 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001612 igb_clean_all_tx_rings(adapter);
1613 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001614#ifdef CONFIG_IGB_DCA
1615
1616 /* since we reset the hardware DCA settings were cleared */
1617 igb_setup_dca(adapter);
1618#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001619}
1620
1621void igb_reinit_locked(struct igb_adapter *adapter)
1622{
1623 WARN_ON(in_interrupt());
1624 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1625 msleep(1);
1626 igb_down(adapter);
1627 igb_up(adapter);
1628 clear_bit(__IGB_RESETTING, &adapter->state);
1629}
1630
1631void igb_reset(struct igb_adapter *adapter)
1632{
Alexander Duyck090b1792009-10-27 23:51:55 +00001633 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001634 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001635 struct e1000_mac_info *mac = &hw->mac;
1636 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001637 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1638 u16 hwm;
1639
1640 /* Repartition Pba for greater than 9k mtu
1641 * To take effect CTRL.RST is required.
1642 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001644 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001645 case e1000_82580:
1646 pba = rd32(E1000_RXPBS);
1647 pba = igb_rxpbs_adjust_82580(pba);
1648 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001649 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001650 pba = rd32(E1000_RXPBS);
1651 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001652 break;
1653 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001654 case e1000_i210:
1655 case e1000_i211:
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001656 default:
1657 pba = E1000_PBA_34K;
1658 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001659 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001660
Alexander Duyck2d064c02008-07-08 15:10:12 -07001661 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1662 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001663 /* adjust PBA for jumbo frames */
1664 wr32(E1000_PBA, pba);
1665
1666 /* To maintain wire speed transmits, the Tx FIFO should be
1667 * large enough to accommodate two full transmit packets,
1668 * rounded up to the next 1KB and expressed in KB. Likewise,
1669 * the Rx FIFO should be large enough to accommodate at least
1670 * one full receive packet and is similarly rounded up and
1671 * expressed in KB. */
1672 pba = rd32(E1000_PBA);
1673 /* upper 16 bits has Tx packet buffer allocation size in KB */
1674 tx_space = pba >> 16;
1675 /* lower 16 bits has Rx packet buffer allocation size in KB */
1676 pba &= 0xffff;
1677 /* the tx fifo also stores 16 bytes of information about the tx
1678 * but don't include ethernet FCS because hardware appends it */
1679 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001680 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001681 ETH_FCS_LEN) * 2;
1682 min_tx_space = ALIGN(min_tx_space, 1024);
1683 min_tx_space >>= 10;
1684 /* software strips receive CRC, so leave room for it */
1685 min_rx_space = adapter->max_frame_size;
1686 min_rx_space = ALIGN(min_rx_space, 1024);
1687 min_rx_space >>= 10;
1688
1689 /* If current Tx allocation is less than the min Tx FIFO size,
1690 * and the min Tx FIFO size is less than the current Rx FIFO
1691 * allocation, take space away from current Rx allocation */
1692 if (tx_space < min_tx_space &&
1693 ((min_tx_space - tx_space) < pba)) {
1694 pba = pba - (min_tx_space - tx_space);
1695
1696 /* if short on rx space, rx wins and must trump tx
1697 * adjustment */
1698 if (pba < min_rx_space)
1699 pba = min_rx_space;
1700 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001701 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001702 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001703
1704 /* flow control settings */
1705 /* The high water mark must be low enough to fit one full frame
1706 * (or the size used for early receive) above it in the Rx FIFO.
1707 * Set it to the lower of:
1708 * - 90% of the Rx FIFO size, or
1709 * - the full Rx FIFO size minus one full frame */
1710 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001711 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001712
Alexander Duyckd405ea32009-12-23 13:21:27 +00001713 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1714 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001715 fc->pause_time = 0xFFFF;
1716 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001717 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001718
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001719 /* disable receive for all VFs and wait one second */
1720 if (adapter->vfs_allocated_count) {
1721 int i;
1722 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001723 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001724
1725 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001726 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001727
1728 /* disable transmits and receives */
1729 wr32(E1000_VFRE, 0);
1730 wr32(E1000_VFTE, 0);
1731 }
1732
Auke Kok9d5c8242008-01-24 02:22:38 -08001733 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001734 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001735 wr32(E1000_WUC, 0);
1736
Alexander Duyck330a6d62009-10-27 23:51:35 +00001737 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001738 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001739
Matthew Vicka27416b2012-04-18 02:57:44 +00001740 /*
1741 * Flow control settings reset on hardware reset, so guarantee flow
1742 * control is off when forcing speed.
1743 */
1744 if (!hw->mac.autoneg)
1745 igb_force_mac_fc(hw);
1746
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001747 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001748 if (!netif_running(adapter->netdev))
1749 igb_power_down_link(adapter);
1750
Auke Kok9d5c8242008-01-24 02:22:38 -08001751 igb_update_mng_vlan(adapter);
1752
1753 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1754 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1755
Alexander Duyck330a6d62009-10-27 23:51:35 +00001756 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001757}
1758
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001759static netdev_features_t igb_fix_features(struct net_device *netdev,
1760 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001761{
1762 /*
1763 * Since there is no support for separate rx/tx vlan accel
1764 * enable/disable make sure tx flag is always in same state as rx.
1765 */
1766 if (features & NETIF_F_HW_VLAN_RX)
1767 features |= NETIF_F_HW_VLAN_TX;
1768 else
1769 features &= ~NETIF_F_HW_VLAN_TX;
1770
1771 return features;
1772}
1773
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001774static int igb_set_features(struct net_device *netdev,
1775 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001776{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001777 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001778 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001779
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001780 if (changed & NETIF_F_HW_VLAN_RX)
1781 igb_vlan_mode(netdev, features);
1782
Ben Greear89eaefb2012-03-06 09:41:58 +00001783 if (!(changed & NETIF_F_RXALL))
1784 return 0;
1785
1786 netdev->features = features;
1787
1788 if (netif_running(netdev))
1789 igb_reinit_locked(adapter);
1790 else
1791 igb_reset(adapter);
1792
Michał Mirosławac52caa2011-06-08 08:38:01 +00001793 return 0;
1794}
1795
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001796static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001797 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001798 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001799 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001800 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001801 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001802 .ndo_set_mac_address = igb_set_mac,
1803 .ndo_change_mtu = igb_change_mtu,
1804 .ndo_do_ioctl = igb_ioctl,
1805 .ndo_tx_timeout = igb_tx_timeout,
1806 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001807 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1808 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001809 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1810 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1811 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1812 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001813#ifdef CONFIG_NET_POLL_CONTROLLER
1814 .ndo_poll_controller = igb_netpoll,
1815#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001816 .ndo_fix_features = igb_fix_features,
1817 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001818};
1819
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001820/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001821 * igb_probe - Device Initialization Routine
1822 * @pdev: PCI device information struct
1823 * @ent: entry in igb_pci_tbl
1824 *
1825 * Returns 0 on success, negative on failure
1826 *
1827 * igb_probe initializes an adapter identified by a pci_dev structure.
1828 * The OS initialization, configuring of the adapter private structure,
1829 * and a hardware reset occur.
1830 **/
1831static int __devinit igb_probe(struct pci_dev *pdev,
1832 const struct pci_device_id *ent)
1833{
1834 struct net_device *netdev;
1835 struct igb_adapter *adapter;
1836 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001837 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001838 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001839 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001840 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1841 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001842 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001843 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001844 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001845
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001846 /* Catch broken hardware that put the wrong VF device ID in
1847 * the PCIe SR-IOV capability.
1848 */
1849 if (pdev->is_virtfn) {
1850 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00001851 pci_name(pdev), pdev->vendor, pdev->device);
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001852 return -EINVAL;
1853 }
1854
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001855 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001856 if (err)
1857 return err;
1858
1859 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001860 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001861 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001862 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001863 if (!err)
1864 pci_using_dac = 1;
1865 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001866 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001867 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001868 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001869 if (err) {
1870 dev_err(&pdev->dev, "No usable DMA "
1871 "configuration, aborting\n");
1872 goto err_dma;
1873 }
1874 }
1875 }
1876
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001877 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1878 IORESOURCE_MEM),
1879 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001880 if (err)
1881 goto err_pci_reg;
1882
Frans Pop19d5afd2009-10-02 10:04:12 -07001883 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001884
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001886 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001887
1888 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001889 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001890 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001891 if (!netdev)
1892 goto err_alloc_etherdev;
1893
1894 SET_NETDEV_DEV(netdev, &pdev->dev);
1895
1896 pci_set_drvdata(pdev, netdev);
1897 adapter = netdev_priv(netdev);
1898 adapter->netdev = netdev;
1899 adapter->pdev = pdev;
1900 hw = &adapter->hw;
1901 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00001902 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08001903
1904 mmio_start = pci_resource_start(pdev, 0);
1905 mmio_len = pci_resource_len(pdev, 0);
1906
1907 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001908 hw->hw_addr = ioremap(mmio_start, mmio_len);
1909 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001910 goto err_ioremap;
1911
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001912 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001913 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001914 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001915
1916 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1917
1918 netdev->mem_start = mmio_start;
1919 netdev->mem_end = mmio_start + mmio_len;
1920
Auke Kok9d5c8242008-01-24 02:22:38 -08001921 /* PCI config space info */
1922 hw->vendor_id = pdev->vendor;
1923 hw->device_id = pdev->device;
1924 hw->revision_id = pdev->revision;
1925 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1926 hw->subsystem_device_id = pdev->subsystem_device;
1927
Auke Kok9d5c8242008-01-24 02:22:38 -08001928 /* Copy the default MAC, PHY and NVM function pointers */
1929 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1930 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1931 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1932 /* Initialize skew-specific constants */
1933 err = ei->get_invariants(hw);
1934 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001935 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001936
Alexander Duyck450c87c2009-02-06 23:22:11 +00001937 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001938 err = igb_sw_init(adapter);
1939 if (err)
1940 goto err_sw_init;
1941
1942 igb_get_bus_info_pcie(hw);
1943
1944 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001945
1946 /* Copper options */
1947 if (hw->phy.media_type == e1000_media_type_copper) {
1948 hw->phy.mdix = AUTO_ALL_MODES;
1949 hw->phy.disable_polarity_correction = false;
1950 hw->phy.ms_type = e1000_ms_hw_default;
1951 }
1952
1953 if (igb_check_reset_block(hw))
1954 dev_info(&pdev->dev,
1955 "PHY reset is blocked due to SOL/IDER session.\n");
1956
Alexander Duyck077887c2011-08-26 07:46:29 +00001957 /*
1958 * features is initialized to 0 in allocation, it might have bits
1959 * set by igb_sw_init so we should use an or instead of an
1960 * assignment.
1961 */
1962 netdev->features |= NETIF_F_SG |
1963 NETIF_F_IP_CSUM |
1964 NETIF_F_IPV6_CSUM |
1965 NETIF_F_TSO |
1966 NETIF_F_TSO6 |
1967 NETIF_F_RXHASH |
1968 NETIF_F_RXCSUM |
1969 NETIF_F_HW_VLAN_RX |
1970 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001971
Alexander Duyck077887c2011-08-26 07:46:29 +00001972 /* copy netdev features into list of user selectable features */
1973 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001974 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08001975
Alexander Duyck077887c2011-08-26 07:46:29 +00001976 /* set this bit last since it cannot be part of hw_features */
1977 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1978
1979 netdev->vlan_features |= NETIF_F_TSO |
1980 NETIF_F_TSO6 |
1981 NETIF_F_IP_CSUM |
1982 NETIF_F_IPV6_CSUM |
1983 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001984
Ben Greear6b8f0922012-03-06 09:41:53 +00001985 netdev->priv_flags |= IFF_SUPP_NOFCS;
1986
Yi Zou7b872a52010-09-22 17:57:58 +00001987 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001988 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001989 netdev->vlan_features |= NETIF_F_HIGHDMA;
1990 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001991
Michał Mirosławac52caa2011-06-08 08:38:01 +00001992 if (hw->mac.type >= e1000_82576) {
1993 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001994 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001995 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001996
Jiri Pirko01789342011-08-16 06:29:00 +00001997 netdev->priv_flags |= IFF_UNICAST_FLT;
1998
Alexander Duyck330a6d62009-10-27 23:51:35 +00001999 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002000
2001 /* before reading the NVM, reset the controller to put the device in a
2002 * known good starting state */
2003 hw->mac.ops.reset_hw(hw);
2004
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002005 /*
2006 * make sure the NVM is good , i211 parts have special NVM that
2007 * doesn't contain a checksum
2008 */
2009 if (hw->mac.type != e1000_i211) {
2010 if (hw->nvm.ops.validate(hw) < 0) {
2011 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2012 err = -EIO;
2013 goto err_eeprom;
2014 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002015 }
2016
2017 /* copy the MAC address out of the NVM */
2018 if (hw->mac.ops.read_mac_addr(hw))
2019 dev_err(&pdev->dev, "NVM Read Error\n");
2020
2021 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2022 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2023
2024 if (!is_valid_ether_addr(netdev->perm_addr)) {
2025 dev_err(&pdev->dev, "Invalid MAC Address\n");
2026 err = -EIO;
2027 goto err_eeprom;
2028 }
2029
Joe Perchesc061b182010-08-23 18:20:03 +00002030 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002031 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002032 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002033 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002034
2035 INIT_WORK(&adapter->reset_task, igb_reset_task);
2036 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2037
Alexander Duyck450c87c2009-02-06 23:22:11 +00002038 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002039 adapter->fc_autoneg = true;
2040 hw->mac.autoneg = true;
2041 hw->phy.autoneg_advertised = 0x2f;
2042
Alexander Duyck0cce1192009-07-23 18:10:24 +00002043 hw->fc.requested_mode = e1000_fc_default;
2044 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002045
Auke Kok9d5c8242008-01-24 02:22:38 -08002046 igb_validate_mdi_setting(hw);
2047
Auke Kok9d5c8242008-01-24 02:22:38 -08002048 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2049 * enable the ACPI Magic Packet filter
2050 */
2051
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002052 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002053 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002054 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002055 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2056 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2057 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002058 else if (hw->bus.func == 1)
2059 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002060
2061 if (eeprom_data & eeprom_apme_mask)
2062 adapter->eeprom_wol |= E1000_WUFC_MAG;
2063
2064 /* now that we have the eeprom settings, apply the special cases where
2065 * the eeprom may be wrong or the board simply won't support wake on
2066 * lan on a particular port */
2067 switch (pdev->device) {
2068 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2069 adapter->eeprom_wol = 0;
2070 break;
2071 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002072 case E1000_DEV_ID_82576_FIBER:
2073 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002074 /* Wake events only supported on port A for dual fiber
2075 * regardless of eeprom setting */
2076 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2077 adapter->eeprom_wol = 0;
2078 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002079 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002080 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002081 /* if quad port adapter, disable WoL on all but port A */
2082 if (global_quad_port_a != 0)
2083 adapter->eeprom_wol = 0;
2084 else
2085 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2086 /* Reset for multiple quad port adapters */
2087 if (++global_quad_port_a == 4)
2088 global_quad_port_a = 0;
2089 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002090 }
2091
2092 /* initialize the wol settings based on the eeprom settings */
2093 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002094 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002095
2096 /* reset the hardware with the new settings */
2097 igb_reset(adapter);
2098
2099 /* let the f/w know that the h/w is now under the control of the
2100 * driver. */
2101 igb_get_hw_control(adapter);
2102
Auke Kok9d5c8242008-01-24 02:22:38 -08002103 strcpy(netdev->name, "eth%d");
2104 err = register_netdev(netdev);
2105 if (err)
2106 goto err_register;
2107
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002108 /* carrier off reporting is important to ethtool even BEFORE open */
2109 netif_carrier_off(netdev);
2110
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002111#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002112 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002113 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002114 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002115 igb_setup_dca(adapter);
2116 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002117
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002118#endif
Richard Cochran7ebae812012-03-16 10:55:37 +00002119#ifdef CONFIG_IGB_PTP
Anders Berggren673b8b72011-02-04 07:32:32 +00002120 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002121 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00002122
Richard Cochran7ebae812012-03-16 10:55:37 +00002123#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002124 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2125 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002126 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002127 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002128 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002129 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002130 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002131 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2132 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2133 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2134 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002135 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002136
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002137 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2138 if (ret_val)
2139 strcpy(part_str, "Unknown");
2140 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002141 dev_info(&pdev->dev,
2142 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2143 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002144 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002145 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002146 switch (hw->mac.type) {
2147 case e1000_i350:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002148 case e1000_i210:
2149 case e1000_i211:
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002150 igb_set_eee_i350(hw);
2151 break;
2152 default:
2153 break;
2154 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002155
2156 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002157 return 0;
2158
2159err_register:
2160 igb_release_hw_control(adapter);
2161err_eeprom:
2162 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002163 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002164
2165 if (hw->flash_address)
2166 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002167err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002168 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002169 iounmap(hw->hw_addr);
2170err_ioremap:
2171 free_netdev(netdev);
2172err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002173 pci_release_selected_regions(pdev,
2174 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002175err_pci_reg:
2176err_dma:
2177 pci_disable_device(pdev);
2178 return err;
2179}
2180
2181/**
2182 * igb_remove - Device Removal Routine
2183 * @pdev: PCI device information struct
2184 *
2185 * igb_remove is called by the PCI subsystem to alert the driver
2186 * that it should release a PCI device. The could be caused by a
2187 * Hot-Plug event, or because the driver is going to be removed from
2188 * memory.
2189 **/
2190static void __devexit igb_remove(struct pci_dev *pdev)
2191{
2192 struct net_device *netdev = pci_get_drvdata(pdev);
2193 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002194 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002195
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002196 pm_runtime_get_noresume(&pdev->dev);
Richard Cochran7ebae812012-03-16 10:55:37 +00002197#ifdef CONFIG_IGB_PTP
2198 igb_ptp_remove(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002199
Richard Cochran7ebae812012-03-16 10:55:37 +00002200#endif
Tejun Heo760141a2010-12-12 16:45:14 +01002201 /*
2202 * The watchdog timer may be rescheduled, so explicitly
2203 * disable watchdog from being rescheduled.
2204 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002205 set_bit(__IGB_DOWN, &adapter->state);
2206 del_timer_sync(&adapter->watchdog_timer);
2207 del_timer_sync(&adapter->phy_info_timer);
2208
Tejun Heo760141a2010-12-12 16:45:14 +01002209 cancel_work_sync(&adapter->reset_task);
2210 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002211
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002212#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002213 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002214 dev_info(&pdev->dev, "DCA disabled\n");
2215 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002216 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002217 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002218 }
2219#endif
2220
Auke Kok9d5c8242008-01-24 02:22:38 -08002221 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2222 * would have already happened in close and is redundant. */
2223 igb_release_hw_control(adapter);
2224
2225 unregister_netdev(netdev);
2226
Alexander Duyck047e0032009-10-27 15:49:27 +00002227 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002228
Alexander Duyck37680112009-02-19 20:40:30 -08002229#ifdef CONFIG_PCI_IOV
2230 /* reclaim resources allocated to VFs */
2231 if (adapter->vf_data) {
2232 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002233 if (!igb_check_vf_assignment(adapter)) {
2234 pci_disable_sriov(pdev);
2235 msleep(500);
2236 } else {
2237 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2238 }
Alexander Duyck37680112009-02-19 20:40:30 -08002239
2240 kfree(adapter->vf_data);
2241 adapter->vf_data = NULL;
2242 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002243 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002244 msleep(100);
2245 dev_info(&pdev->dev, "IOV Disabled\n");
2246 }
2247#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002248
Alexander Duyck28b07592009-02-06 23:20:31 +00002249 iounmap(hw->hw_addr);
2250 if (hw->flash_address)
2251 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002252 pci_release_selected_regions(pdev,
2253 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002254
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002255 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002256 free_netdev(netdev);
2257
Frans Pop19d5afd2009-10-02 10:04:12 -07002258 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002259
Auke Kok9d5c8242008-01-24 02:22:38 -08002260 pci_disable_device(pdev);
2261}
2262
2263/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002264 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2265 * @adapter: board private structure to initialize
2266 *
2267 * This function initializes the vf specific data storage and then attempts to
2268 * allocate the VFs. The reason for ordering it this way is because it is much
2269 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2270 * the memory for the VFs.
2271 **/
2272static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2273{
2274#ifdef CONFIG_PCI_IOV
2275 struct pci_dev *pdev = adapter->pdev;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002276 struct e1000_hw *hw = &adapter->hw;
Greg Rose0224d662011-10-14 02:57:14 +00002277 int old_vfs = igb_find_enabled_vfs(adapter);
2278 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002279
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002280 /* Virtualization features not supported on i210 family. */
2281 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2282 return;
2283
Greg Rose0224d662011-10-14 02:57:14 +00002284 if (old_vfs) {
2285 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2286 "max_vfs setting of %d\n", old_vfs, max_vfs);
2287 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002288 }
2289
Greg Rose0224d662011-10-14 02:57:14 +00002290 if (!adapter->vfs_allocated_count)
2291 return;
2292
2293 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2294 sizeof(struct vf_data_storage), GFP_KERNEL);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002295
Greg Rose0224d662011-10-14 02:57:14 +00002296 /* if allocation failed then we do not support SR-IOV */
2297 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002298 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002299 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2300 "Data Storage\n");
2301 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002302 }
Greg Rose0224d662011-10-14 02:57:14 +00002303
2304 if (!old_vfs) {
2305 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2306 goto err_out;
2307 }
2308 dev_info(&pdev->dev, "%d VFs allocated\n",
2309 adapter->vfs_allocated_count);
2310 for (i = 0; i < adapter->vfs_allocated_count; i++)
2311 igb_vf_configure(adapter, i);
2312
2313 /* DMA Coalescing is not supported in IOV mode. */
2314 adapter->flags &= ~IGB_FLAG_DMAC;
2315 goto out;
2316err_out:
2317 kfree(adapter->vf_data);
2318 adapter->vf_data = NULL;
2319 adapter->vfs_allocated_count = 0;
2320out:
2321 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002322#endif /* CONFIG_PCI_IOV */
2323}
2324
Alexander Duyck115f4592009-11-12 18:37:00 +00002325/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002326 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2327 * @adapter: board private structure to initialize
2328 *
2329 * igb_sw_init initializes the Adapter private data structure.
2330 * Fields are initialized based on PCI device information and
2331 * OS network device settings (MTU size).
2332 **/
2333static int __devinit igb_sw_init(struct igb_adapter *adapter)
2334{
2335 struct e1000_hw *hw = &adapter->hw;
2336 struct net_device *netdev = adapter->netdev;
2337 struct pci_dev *pdev = adapter->pdev;
2338
2339 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2340
Alexander Duyck13fde972011-10-05 13:35:24 +00002341 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002342 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2343 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002344
2345 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002346 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2347 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2348
Alexander Duyck13fde972011-10-05 13:35:24 +00002349 /* set default work limits */
2350 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2351
Alexander Duyck153285f2011-08-26 07:43:32 +00002352 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2353 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002354 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2355
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002356 adapter->node = -1;
2357
Eric Dumazet12dcd862010-10-15 17:27:10 +00002358 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002359#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002360 switch (hw->mac.type) {
2361 case e1000_82576:
2362 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002363 if (max_vfs > 7) {
2364 dev_warn(&pdev->dev,
2365 "Maximum of 7 VFs per PF, using max\n");
2366 adapter->vfs_allocated_count = 7;
2367 } else
2368 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002369 break;
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002370 case e1000_i210:
2371 case e1000_i211:
2372 adapter->vfs_allocated_count = 0;
2373 break;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002374 default:
2375 break;
2376 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002377#endif /* CONFIG_PCI_IOV */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002378 switch (hw->mac.type) {
2379 case e1000_i210:
2380 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
2381 num_online_cpus());
2382 break;
2383 case e1000_i211:
2384 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
2385 num_online_cpus());
2386 break;
2387 default:
2388 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
2389 num_online_cpus());
2390 break;
2391 }
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002392 /* i350 cannot do RSS and SR-IOV at the same time */
2393 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2394 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002395
2396 /*
2397 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2398 * then we should combine the queues into a queue pair in order to
2399 * conserve interrupts due to limited supply
2400 */
2401 if ((adapter->rss_queues > 4) ||
2402 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2403 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2404
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002405 /* Setup and initialize a copy of the hw vlan table array */
2406 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2407 E1000_VLAN_FILTER_TBL_SIZE,
2408 GFP_ATOMIC);
2409
Alexander Duycka6b623e2009-10-27 23:47:53 +00002410 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002411 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002412 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2413 return -ENOMEM;
2414 }
2415
Alexander Duycka6b623e2009-10-27 23:47:53 +00002416 igb_probe_vfs(adapter);
2417
Auke Kok9d5c8242008-01-24 02:22:38 -08002418 /* Explicitly disable IRQ since the NIC can be in any state. */
2419 igb_irq_disable(adapter);
2420
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002421 if (hw->mac.type >= e1000_i350)
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002422 adapter->flags &= ~IGB_FLAG_DMAC;
2423
Auke Kok9d5c8242008-01-24 02:22:38 -08002424 set_bit(__IGB_DOWN, &adapter->state);
2425 return 0;
2426}
2427
2428/**
2429 * igb_open - Called when a network interface is made active
2430 * @netdev: network interface device structure
2431 *
2432 * Returns 0 on success, negative value on failure
2433 *
2434 * The open entry point is called when a network interface is made
2435 * active by the system (IFF_UP). At this point all resources needed
2436 * for transmit and receive operations are allocated, the interrupt
2437 * handler is registered with the OS, the watchdog timer is started,
2438 * and the stack is notified that the interface is ready.
2439 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002440static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002441{
2442 struct igb_adapter *adapter = netdev_priv(netdev);
2443 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002444 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002445 int err;
2446 int i;
2447
2448 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002449 if (test_bit(__IGB_TESTING, &adapter->state)) {
2450 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002451 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002452 }
2453
2454 if (!resuming)
2455 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002456
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002457 netif_carrier_off(netdev);
2458
Auke Kok9d5c8242008-01-24 02:22:38 -08002459 /* allocate transmit descriptors */
2460 err = igb_setup_all_tx_resources(adapter);
2461 if (err)
2462 goto err_setup_tx;
2463
2464 /* allocate receive descriptors */
2465 err = igb_setup_all_rx_resources(adapter);
2466 if (err)
2467 goto err_setup_rx;
2468
Nick Nunley88a268c2010-02-17 01:01:59 +00002469 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002470
Auke Kok9d5c8242008-01-24 02:22:38 -08002471 /* before we allocate an interrupt, we must be ready to handle it.
2472 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2473 * as soon as we call pci_request_irq, so we have to setup our
2474 * clean_rx handler before we do so. */
2475 igb_configure(adapter);
2476
2477 err = igb_request_irq(adapter);
2478 if (err)
2479 goto err_req_irq;
2480
2481 /* From here on the code is the same as igb_up() */
2482 clear_bit(__IGB_DOWN, &adapter->state);
2483
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002484 for (i = 0; i < adapter->num_q_vectors; i++)
2485 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002486
2487 /* Clear any pending interrupts. */
2488 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002489
2490 igb_irq_enable(adapter);
2491
Alexander Duyckd4960302009-10-27 15:53:45 +00002492 /* notify VFs that reset has been completed */
2493 if (adapter->vfs_allocated_count) {
2494 u32 reg_data = rd32(E1000_CTRL_EXT);
2495 reg_data |= E1000_CTRL_EXT_PFRSTD;
2496 wr32(E1000_CTRL_EXT, reg_data);
2497 }
2498
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002499 netif_tx_start_all_queues(netdev);
2500
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002501 if (!resuming)
2502 pm_runtime_put(&pdev->dev);
2503
Alexander Duyck25568a52009-10-27 23:49:59 +00002504 /* start the watchdog. */
2505 hw->mac.get_link_status = 1;
2506 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002507
2508 return 0;
2509
2510err_req_irq:
2511 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002512 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002513 igb_free_all_rx_resources(adapter);
2514err_setup_rx:
2515 igb_free_all_tx_resources(adapter);
2516err_setup_tx:
2517 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002518 if (!resuming)
2519 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002520
2521 return err;
2522}
2523
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002524static int igb_open(struct net_device *netdev)
2525{
2526 return __igb_open(netdev, false);
2527}
2528
Auke Kok9d5c8242008-01-24 02:22:38 -08002529/**
2530 * igb_close - Disables a network interface
2531 * @netdev: network interface device structure
2532 *
2533 * Returns 0, this is not allowed to fail
2534 *
2535 * The close entry point is called when an interface is de-activated
2536 * by the OS. The hardware is still under the driver's control, but
2537 * needs to be disabled. A global MAC reset is issued to stop the
2538 * hardware, and all transmit and receive resources are freed.
2539 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002540static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002541{
2542 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002543 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002544
2545 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002546
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002547 if (!suspending)
2548 pm_runtime_get_sync(&pdev->dev);
2549
2550 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002551 igb_free_irq(adapter);
2552
2553 igb_free_all_tx_resources(adapter);
2554 igb_free_all_rx_resources(adapter);
2555
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002556 if (!suspending)
2557 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002558 return 0;
2559}
2560
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002561static int igb_close(struct net_device *netdev)
2562{
2563 return __igb_close(netdev, false);
2564}
2565
Auke Kok9d5c8242008-01-24 02:22:38 -08002566/**
2567 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002568 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2569 *
2570 * Return 0 on success, negative on failure
2571 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002572int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002573{
Alexander Duyck59d71982010-04-27 13:09:25 +00002574 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002575 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002576 int size;
2577
Alexander Duyck06034642011-08-26 07:44:22 +00002578 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002579 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2580 if (!tx_ring->tx_buffer_info)
2581 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002582 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002583 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002584
2585 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002586 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002587 tx_ring->size = ALIGN(tx_ring->size, 4096);
2588
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002589 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002590 tx_ring->desc = dma_alloc_coherent(dev,
2591 tx_ring->size,
2592 &tx_ring->dma,
2593 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002594 set_dev_node(dev, orig_node);
2595 if (!tx_ring->desc)
2596 tx_ring->desc = dma_alloc_coherent(dev,
2597 tx_ring->size,
2598 &tx_ring->dma,
2599 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002600
2601 if (!tx_ring->desc)
2602 goto err;
2603
Auke Kok9d5c8242008-01-24 02:22:38 -08002604 tx_ring->next_to_use = 0;
2605 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002606
Auke Kok9d5c8242008-01-24 02:22:38 -08002607 return 0;
2608
2609err:
Alexander Duyck06034642011-08-26 07:44:22 +00002610 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002611 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002612 "Unable to allocate memory for the transmit descriptor ring\n");
2613 return -ENOMEM;
2614}
2615
2616/**
2617 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2618 * (Descriptors) for all queues
2619 * @adapter: board private structure
2620 *
2621 * Return 0 on success, negative on failure
2622 **/
2623static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2624{
Alexander Duyck439705e2009-10-27 23:49:20 +00002625 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002626 int i, err = 0;
2627
2628 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002629 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002630 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002631 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002632 "Allocation for Tx Queue %u failed\n", i);
2633 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002634 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002635 break;
2636 }
2637 }
2638
2639 return err;
2640}
2641
2642/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002643 * igb_setup_tctl - configure the transmit control registers
2644 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002645 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002646void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002647{
Auke Kok9d5c8242008-01-24 02:22:38 -08002648 struct e1000_hw *hw = &adapter->hw;
2649 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002650
Alexander Duyck85b430b2009-10-27 15:50:29 +00002651 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2652 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002653
2654 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002655 tctl = rd32(E1000_TCTL);
2656 tctl &= ~E1000_TCTL_CT;
2657 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2658 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2659
2660 igb_config_collision_dist(hw);
2661
Auke Kok9d5c8242008-01-24 02:22:38 -08002662 /* Enable transmits */
2663 tctl |= E1000_TCTL_EN;
2664
2665 wr32(E1000_TCTL, tctl);
2666}
2667
2668/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002669 * igb_configure_tx_ring - Configure transmit ring after Reset
2670 * @adapter: board private structure
2671 * @ring: tx ring to configure
2672 *
2673 * Configure a transmit ring after a reset.
2674 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002675void igb_configure_tx_ring(struct igb_adapter *adapter,
2676 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002677{
2678 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002679 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002680 u64 tdba = ring->dma;
2681 int reg_idx = ring->reg_idx;
2682
2683 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002684 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002685 wrfl();
2686 mdelay(10);
2687
2688 wr32(E1000_TDLEN(reg_idx),
2689 ring->count * sizeof(union e1000_adv_tx_desc));
2690 wr32(E1000_TDBAL(reg_idx),
2691 tdba & 0x00000000ffffffffULL);
2692 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2693
Alexander Duyckfce99e32009-10-27 15:51:27 +00002694 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002695 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002696 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002697
2698 txdctl |= IGB_TX_PTHRESH;
2699 txdctl |= IGB_TX_HTHRESH << 8;
2700 txdctl |= IGB_TX_WTHRESH << 16;
2701
2702 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2703 wr32(E1000_TXDCTL(reg_idx), txdctl);
2704}
2705
2706/**
2707 * igb_configure_tx - Configure transmit Unit after Reset
2708 * @adapter: board private structure
2709 *
2710 * Configure the Tx unit of the MAC after a reset.
2711 **/
2712static void igb_configure_tx(struct igb_adapter *adapter)
2713{
2714 int i;
2715
2716 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002717 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002718}
2719
2720/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002721 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002722 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2723 *
2724 * Returns 0 on success, negative on failure
2725 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002726int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002727{
Alexander Duyck59d71982010-04-27 13:09:25 +00002728 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002729 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 int size, desc_len;
2731
Alexander Duyck06034642011-08-26 07:44:22 +00002732 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002733 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2734 if (!rx_ring->rx_buffer_info)
2735 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002736 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002737 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002738
2739 desc_len = sizeof(union e1000_adv_rx_desc);
2740
2741 /* Round up to nearest 4K */
2742 rx_ring->size = rx_ring->count * desc_len;
2743 rx_ring->size = ALIGN(rx_ring->size, 4096);
2744
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002745 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002746 rx_ring->desc = dma_alloc_coherent(dev,
2747 rx_ring->size,
2748 &rx_ring->dma,
2749 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002750 set_dev_node(dev, orig_node);
2751 if (!rx_ring->desc)
2752 rx_ring->desc = dma_alloc_coherent(dev,
2753 rx_ring->size,
2754 &rx_ring->dma,
2755 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002756
2757 if (!rx_ring->desc)
2758 goto err;
2759
2760 rx_ring->next_to_clean = 0;
2761 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002762
Auke Kok9d5c8242008-01-24 02:22:38 -08002763 return 0;
2764
2765err:
Alexander Duyck06034642011-08-26 07:44:22 +00002766 vfree(rx_ring->rx_buffer_info);
2767 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002768 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2769 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002770 return -ENOMEM;
2771}
2772
2773/**
2774 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2775 * (Descriptors) for all queues
2776 * @adapter: board private structure
2777 *
2778 * Return 0 on success, negative on failure
2779 **/
2780static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2781{
Alexander Duyck439705e2009-10-27 23:49:20 +00002782 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002783 int i, err = 0;
2784
2785 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002786 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002787 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002788 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002789 "Allocation for Rx Queue %u failed\n", i);
2790 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002791 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002792 break;
2793 }
2794 }
2795
2796 return err;
2797}
2798
2799/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002800 * igb_setup_mrqc - configure the multiple receive queue control registers
2801 * @adapter: Board private structure
2802 **/
2803static void igb_setup_mrqc(struct igb_adapter *adapter)
2804{
2805 struct e1000_hw *hw = &adapter->hw;
2806 u32 mrqc, rxcsum;
2807 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2808 union e1000_reta {
2809 u32 dword;
2810 u8 bytes[4];
2811 } reta;
2812 static const u8 rsshash[40] = {
2813 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2814 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2815 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2816 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2817
2818 /* Fill out hash function seeds */
2819 for (j = 0; j < 10; j++) {
2820 u32 rsskey = rsshash[(j * 4)];
2821 rsskey |= rsshash[(j * 4) + 1] << 8;
2822 rsskey |= rsshash[(j * 4) + 2] << 16;
2823 rsskey |= rsshash[(j * 4) + 3] << 24;
2824 array_wr32(E1000_RSSRK(0), j, rsskey);
2825 }
2826
Alexander Duycka99955f2009-11-12 18:37:19 +00002827 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002828
2829 if (adapter->vfs_allocated_count) {
2830 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2831 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002832 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002833 case e1000_82580:
2834 num_rx_queues = 1;
2835 shift = 0;
2836 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002837 case e1000_82576:
2838 shift = 3;
2839 num_rx_queues = 2;
2840 break;
2841 case e1000_82575:
2842 shift = 2;
2843 shift2 = 6;
2844 default:
2845 break;
2846 }
2847 } else {
2848 if (hw->mac.type == e1000_82575)
2849 shift = 6;
2850 }
2851
2852 for (j = 0; j < (32 * 4); j++) {
2853 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2854 if (shift2)
2855 reta.bytes[j & 3] |= num_rx_queues << shift2;
2856 if ((j & 3) == 3)
2857 wr32(E1000_RETA(j >> 2), reta.dword);
2858 }
2859
2860 /*
2861 * Disable raw packet checksumming so that RSS hash is placed in
2862 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2863 * offloads as they are enabled by default
2864 */
2865 rxcsum = rd32(E1000_RXCSUM);
2866 rxcsum |= E1000_RXCSUM_PCSD;
2867
2868 if (adapter->hw.mac.type >= e1000_82576)
2869 /* Enable Receive Checksum Offload for SCTP */
2870 rxcsum |= E1000_RXCSUM_CRCOFL;
2871
2872 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2873 wr32(E1000_RXCSUM, rxcsum);
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002874 /*
2875 * Generate RSS hash based on TCP port numbers and/or
2876 * IPv4/v6 src and dst addresses since UDP cannot be
2877 * hashed reliably due to IP fragmentation
2878 */
2879
2880 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2881 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2882 E1000_MRQC_RSS_FIELD_IPV6 |
2883 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2884 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002885
2886 /* If VMDq is enabled then we set the appropriate mode for that, else
2887 * we default to RSS so that an RSS hash is calculated per packet even
2888 * if we are only using one queue */
2889 if (adapter->vfs_allocated_count) {
2890 if (hw->mac.type > e1000_82575) {
2891 /* Set the default pool for the PF's first queue */
2892 u32 vtctl = rd32(E1000_VT_CTL);
2893 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2894 E1000_VT_CTL_DISABLE_DEF_POOL);
2895 vtctl |= adapter->vfs_allocated_count <<
2896 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2897 wr32(E1000_VT_CTL, vtctl);
2898 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002899 if (adapter->rss_queues > 1)
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002900 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002901 else
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002902 mrqc |= E1000_MRQC_ENABLE_VMDQ;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002903 } else {
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00002904 if (hw->mac.type != e1000_i211)
2905 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002906 }
2907 igb_vmm_control(adapter);
2908
Alexander Duyck06cf2662009-10-27 15:53:25 +00002909 wr32(E1000_MRQC, mrqc);
2910}
2911
2912/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002913 * igb_setup_rctl - configure the receive control registers
2914 * @adapter: Board private structure
2915 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002916void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002917{
2918 struct e1000_hw *hw = &adapter->hw;
2919 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002920
2921 rctl = rd32(E1000_RCTL);
2922
2923 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002924 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002925
Alexander Duyck69d728b2008-11-25 01:04:03 -08002926 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002927 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002928
Auke Kok87cb7e82008-07-08 15:08:29 -07002929 /*
2930 * enable stripping of CRC. It's unlikely this will break BMC
2931 * redirection as it did with e1000. Newer features require
2932 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002933 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002934 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002935
Alexander Duyck559e9c42009-10-27 23:52:50 +00002936 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002937 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002938
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002939 /* enable LPE to prevent packets larger than max_frame_size */
2940 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002941
Alexander Duyck952f72a2009-10-27 15:51:07 +00002942 /* disable queue 0 to prevent tail write w/o re-config */
2943 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002944
Alexander Duycke1739522009-02-19 20:39:44 -08002945 /* Attention!!! For SR-IOV PF driver operations you must enable
2946 * queue drop for all VF and PF queues to prevent head of line blocking
2947 * if an un-trusted VF does not provide descriptors to hardware.
2948 */
2949 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002950 /* set all queue drop enable bits */
2951 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002952 }
2953
Ben Greear89eaefb2012-03-06 09:41:58 +00002954 /* This is useful for sniffing bad packets. */
2955 if (adapter->netdev->features & NETIF_F_RXALL) {
2956 /* UPE and MPE will be handled by normal PROMISC logic
2957 * in e1000e_set_rx_mode */
2958 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
2959 E1000_RCTL_BAM | /* RX All Bcast Pkts */
2960 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
2961
2962 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
2963 E1000_RCTL_DPF | /* Allow filtered pause */
2964 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
2965 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
2966 * and that breaks VLANs.
2967 */
2968 }
2969
Auke Kok9d5c8242008-01-24 02:22:38 -08002970 wr32(E1000_RCTL, rctl);
2971}
2972
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002973static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2974 int vfn)
2975{
2976 struct e1000_hw *hw = &adapter->hw;
2977 u32 vmolr;
2978
2979 /* if it isn't the PF check to see if VFs are enabled and
2980 * increase the size to support vlan tags */
2981 if (vfn < adapter->vfs_allocated_count &&
2982 adapter->vf_data[vfn].vlans_enabled)
2983 size += VLAN_TAG_SIZE;
2984
2985 vmolr = rd32(E1000_VMOLR(vfn));
2986 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2987 vmolr |= size | E1000_VMOLR_LPE;
2988 wr32(E1000_VMOLR(vfn), vmolr);
2989
2990 return 0;
2991}
2992
Auke Kok9d5c8242008-01-24 02:22:38 -08002993/**
Alexander Duycke1739522009-02-19 20:39:44 -08002994 * igb_rlpml_set - set maximum receive packet size
2995 * @adapter: board private structure
2996 *
2997 * Configure maximum receivable packet size.
2998 **/
2999static void igb_rlpml_set(struct igb_adapter *adapter)
3000{
Alexander Duyck153285f2011-08-26 07:43:32 +00003001 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003002 struct e1000_hw *hw = &adapter->hw;
3003 u16 pf_id = adapter->vfs_allocated_count;
3004
Alexander Duycke1739522009-02-19 20:39:44 -08003005 if (pf_id) {
3006 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003007 /*
3008 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3009 * to our max jumbo frame size, in case we need to enable
3010 * jumbo frames on one of the rings later.
3011 * This will not pass over-length frames into the default
3012 * queue because it's gated by the VMOLR.RLPML.
3013 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003014 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003015 }
3016
3017 wr32(E1000_RLPML, max_frame_size);
3018}
3019
Williams, Mitch A8151d292010-02-10 01:44:24 +00003020static inline void igb_set_vmolr(struct igb_adapter *adapter,
3021 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003022{
3023 struct e1000_hw *hw = &adapter->hw;
3024 u32 vmolr;
3025
3026 /*
3027 * This register exists only on 82576 and newer so if we are older then
3028 * we should exit and do nothing
3029 */
3030 if (hw->mac.type < e1000_82576)
3031 return;
3032
3033 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003034 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3035 if (aupe)
3036 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3037 else
3038 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003039
3040 /* clear all bits that might not be set */
3041 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3042
Alexander Duycka99955f2009-11-12 18:37:19 +00003043 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003044 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3045 /*
3046 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3047 * multicast packets
3048 */
3049 if (vfn <= adapter->vfs_allocated_count)
3050 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3051
3052 wr32(E1000_VMOLR(vfn), vmolr);
3053}
3054
Alexander Duycke1739522009-02-19 20:39:44 -08003055/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003056 * igb_configure_rx_ring - Configure a receive ring after Reset
3057 * @adapter: board private structure
3058 * @ring: receive ring to be configured
3059 *
3060 * Configure the Rx unit of the MAC after a reset.
3061 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003062void igb_configure_rx_ring(struct igb_adapter *adapter,
3063 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003064{
3065 struct e1000_hw *hw = &adapter->hw;
3066 u64 rdba = ring->dma;
3067 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003068 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003069
3070 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003071 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003072
3073 /* Set DMA base address registers */
3074 wr32(E1000_RDBAL(reg_idx),
3075 rdba & 0x00000000ffffffffULL);
3076 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3077 wr32(E1000_RDLEN(reg_idx),
3078 ring->count * sizeof(union e1000_adv_rx_desc));
3079
3080 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003081 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003082 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003083 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003084
Alexander Duyck952f72a2009-10-27 15:51:07 +00003085 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003086 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003087#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003088 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003089#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003090 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003091#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003092 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003093 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003094 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003095 /* Only set Drop Enable if we are supporting multiple queues */
3096 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3097 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003098
3099 wr32(E1000_SRRCTL(reg_idx), srrctl);
3100
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003101 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003102 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003103
Alexander Duyck85b430b2009-10-27 15:50:29 +00003104 rxdctl |= IGB_RX_PTHRESH;
3105 rxdctl |= IGB_RX_HTHRESH << 8;
3106 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003107
3108 /* enable receive descriptor fetching */
3109 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003110 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3111}
3112
3113/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003114 * igb_configure_rx - Configure receive Unit after Reset
3115 * @adapter: board private structure
3116 *
3117 * Configure the Rx unit of the MAC after a reset.
3118 **/
3119static void igb_configure_rx(struct igb_adapter *adapter)
3120{
Hannes Eder91075842009-02-18 19:36:04 -08003121 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003122
Alexander Duyck68d480c2009-10-05 06:33:08 +00003123 /* set UTA to appropriate mode */
3124 igb_set_uta(adapter);
3125
Alexander Duyck26ad9172009-10-05 06:32:49 +00003126 /* set the correct pool for the PF default MAC address in entry 0 */
3127 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3128 adapter->vfs_allocated_count);
3129
Alexander Duyck06cf2662009-10-27 15:53:25 +00003130 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3131 * the Base and Length of the Rx Descriptor Ring */
3132 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003133 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003134}
3135
3136/**
3137 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003138 * @tx_ring: Tx descriptor ring for a specific queue
3139 *
3140 * Free all transmit software resources
3141 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003142void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003143{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003144 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003145
Alexander Duyck06034642011-08-26 07:44:22 +00003146 vfree(tx_ring->tx_buffer_info);
3147 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003148
Alexander Duyck439705e2009-10-27 23:49:20 +00003149 /* if not set, then don't free */
3150 if (!tx_ring->desc)
3151 return;
3152
Alexander Duyck59d71982010-04-27 13:09:25 +00003153 dma_free_coherent(tx_ring->dev, tx_ring->size,
3154 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003155
3156 tx_ring->desc = NULL;
3157}
3158
3159/**
3160 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3161 * @adapter: board private structure
3162 *
3163 * Free all transmit software resources
3164 **/
3165static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3166{
3167 int i;
3168
3169 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003170 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003171}
3172
Alexander Duyckebe42d12011-08-26 07:45:09 +00003173void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3174 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003175{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003176 if (tx_buffer->skb) {
3177 dev_kfree_skb_any(tx_buffer->skb);
3178 if (tx_buffer->dma)
3179 dma_unmap_single(ring->dev,
3180 tx_buffer->dma,
3181 tx_buffer->length,
3182 DMA_TO_DEVICE);
3183 } else if (tx_buffer->dma) {
3184 dma_unmap_page(ring->dev,
3185 tx_buffer->dma,
3186 tx_buffer->length,
3187 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003188 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003189 tx_buffer->next_to_watch = NULL;
3190 tx_buffer->skb = NULL;
3191 tx_buffer->dma = 0;
3192 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003193}
3194
3195/**
3196 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003197 * @tx_ring: ring to be cleaned
3198 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003199static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003200{
Alexander Duyck06034642011-08-26 07:44:22 +00003201 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003202 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003203 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003204
Alexander Duyck06034642011-08-26 07:44:22 +00003205 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003206 return;
3207 /* Free all the Tx ring sk_buffs */
3208
3209 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003210 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003211 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003212 }
3213
John Fastabenddad8a3b2012-04-23 12:22:39 +00003214 netdev_tx_reset_queue(txring_txq(tx_ring));
3215
Alexander Duyck06034642011-08-26 07:44:22 +00003216 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3217 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003218
3219 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003220 memset(tx_ring->desc, 0, tx_ring->size);
3221
3222 tx_ring->next_to_use = 0;
3223 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003224}
3225
3226/**
3227 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3228 * @adapter: board private structure
3229 **/
3230static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3231{
3232 int i;
3233
3234 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003235 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003236}
3237
3238/**
3239 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003240 * @rx_ring: ring to clean the resources from
3241 *
3242 * Free all receive software resources
3243 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003244void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003245{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003246 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003247
Alexander Duyck06034642011-08-26 07:44:22 +00003248 vfree(rx_ring->rx_buffer_info);
3249 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003250
Alexander Duyck439705e2009-10-27 23:49:20 +00003251 /* if not set, then don't free */
3252 if (!rx_ring->desc)
3253 return;
3254
Alexander Duyck59d71982010-04-27 13:09:25 +00003255 dma_free_coherent(rx_ring->dev, rx_ring->size,
3256 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003257
3258 rx_ring->desc = NULL;
3259}
3260
3261/**
3262 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3263 * @adapter: board private structure
3264 *
3265 * Free all receive software resources
3266 **/
3267static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3268{
3269 int i;
3270
3271 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003272 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003273}
3274
3275/**
3276 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003277 * @rx_ring: ring to free buffers from
3278 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003279static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003280{
Auke Kok9d5c8242008-01-24 02:22:38 -08003281 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003282 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003283
Alexander Duyck06034642011-08-26 07:44:22 +00003284 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003285 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003286
Auke Kok9d5c8242008-01-24 02:22:38 -08003287 /* Free all the Rx ring sk_buffs */
3288 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003289 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003290 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003291 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003292 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003293 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003294 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003295 buffer_info->dma = 0;
3296 }
3297
3298 if (buffer_info->skb) {
3299 dev_kfree_skb(buffer_info->skb);
3300 buffer_info->skb = NULL;
3301 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003302 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003303 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003304 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003305 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003306 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003307 buffer_info->page_dma = 0;
3308 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003309 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003310 put_page(buffer_info->page);
3311 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003312 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003313 }
3314 }
3315
Alexander Duyck06034642011-08-26 07:44:22 +00003316 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3317 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003318
3319 /* Zero out the descriptor ring */
3320 memset(rx_ring->desc, 0, rx_ring->size);
3321
3322 rx_ring->next_to_clean = 0;
3323 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003324}
3325
3326/**
3327 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3328 * @adapter: board private structure
3329 **/
3330static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3331{
3332 int i;
3333
3334 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003335 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003336}
3337
3338/**
3339 * igb_set_mac - Change the Ethernet Address of the NIC
3340 * @netdev: network interface device structure
3341 * @p: pointer to an address structure
3342 *
3343 * Returns 0 on success, negative on failure
3344 **/
3345static int igb_set_mac(struct net_device *netdev, void *p)
3346{
3347 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003348 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003349 struct sockaddr *addr = p;
3350
3351 if (!is_valid_ether_addr(addr->sa_data))
3352 return -EADDRNOTAVAIL;
3353
3354 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003355 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003356
Alexander Duyck26ad9172009-10-05 06:32:49 +00003357 /* set the correct pool for the new PF MAC address in entry 0 */
3358 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3359 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003360
Auke Kok9d5c8242008-01-24 02:22:38 -08003361 return 0;
3362}
3363
3364/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003365 * igb_write_mc_addr_list - write multicast addresses to MTA
3366 * @netdev: network interface device structure
3367 *
3368 * Writes multicast address list to the MTA hash table.
3369 * Returns: -ENOMEM on failure
3370 * 0 on no addresses written
3371 * X on writing X addresses to MTA
3372 **/
3373static int igb_write_mc_addr_list(struct net_device *netdev)
3374{
3375 struct igb_adapter *adapter = netdev_priv(netdev);
3376 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003377 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003378 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003379 int i;
3380
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003381 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003382 /* nothing to program, so clear mc list */
3383 igb_update_mc_addr_list(hw, NULL, 0);
3384 igb_restore_vf_multicasts(adapter);
3385 return 0;
3386 }
3387
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003388 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003389 if (!mta_list)
3390 return -ENOMEM;
3391
Alexander Duyck68d480c2009-10-05 06:33:08 +00003392 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003393 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003394 netdev_for_each_mc_addr(ha, netdev)
3395 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003396
Alexander Duyck68d480c2009-10-05 06:33:08 +00003397 igb_update_mc_addr_list(hw, mta_list, i);
3398 kfree(mta_list);
3399
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003400 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003401}
3402
3403/**
3404 * igb_write_uc_addr_list - write unicast addresses to RAR table
3405 * @netdev: network interface device structure
3406 *
3407 * Writes unicast address list to the RAR table.
3408 * Returns: -ENOMEM on failure/insufficient address space
3409 * 0 on no addresses written
3410 * X on writing X addresses to the RAR table
3411 **/
3412static int igb_write_uc_addr_list(struct net_device *netdev)
3413{
3414 struct igb_adapter *adapter = netdev_priv(netdev);
3415 struct e1000_hw *hw = &adapter->hw;
3416 unsigned int vfn = adapter->vfs_allocated_count;
3417 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3418 int count = 0;
3419
3420 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003421 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003422 return -ENOMEM;
3423
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003424 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003425 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003426
3427 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428 if (!rar_entries)
3429 break;
3430 igb_rar_set_qsel(adapter, ha->addr,
3431 rar_entries--,
3432 vfn);
3433 count++;
3434 }
3435 }
3436 /* write the addresses in reverse order to avoid write combining */
3437 for (; rar_entries > 0 ; rar_entries--) {
3438 wr32(E1000_RAH(rar_entries), 0);
3439 wr32(E1000_RAL(rar_entries), 0);
3440 }
3441 wrfl();
3442
3443 return count;
3444}
3445
3446/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003447 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003448 * @netdev: network interface device structure
3449 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003450 * The set_rx_mode entry point is called whenever the unicast or multicast
3451 * address lists or the network interface flags are updated. This routine is
3452 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003453 * promiscuous mode, and all-multi behavior.
3454 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003455static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003456{
3457 struct igb_adapter *adapter = netdev_priv(netdev);
3458 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003459 unsigned int vfn = adapter->vfs_allocated_count;
3460 u32 rctl, vmolr = 0;
3461 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003462
3463 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003464 rctl = rd32(E1000_RCTL);
3465
Alexander Duyck68d480c2009-10-05 06:33:08 +00003466 /* clear the effected bits */
3467 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3468
Patrick McHardy746b9f02008-07-16 20:15:45 -07003469 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003470 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003471 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003472 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003473 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003474 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003475 vmolr |= E1000_VMOLR_MPME;
3476 } else {
3477 /*
3478 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003479 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003480 * that we can at least receive multicast traffic
3481 */
3482 count = igb_write_mc_addr_list(netdev);
3483 if (count < 0) {
3484 rctl |= E1000_RCTL_MPE;
3485 vmolr |= E1000_VMOLR_MPME;
3486 } else if (count) {
3487 vmolr |= E1000_VMOLR_ROMPE;
3488 }
3489 }
3490 /*
3491 * Write addresses to available RAR registers, if there is not
3492 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003493 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003494 */
3495 count = igb_write_uc_addr_list(netdev);
3496 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003497 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003498 vmolr |= E1000_VMOLR_ROPE;
3499 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003500 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003501 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003502 wr32(E1000_RCTL, rctl);
3503
Alexander Duyck68d480c2009-10-05 06:33:08 +00003504 /*
3505 * In order to support SR-IOV and eventually VMDq it is necessary to set
3506 * the VMOLR to enable the appropriate modes. Without this workaround
3507 * we will have issues with VLAN tag stripping not being done for frames
3508 * that are only arriving because we are the default pool
3509 */
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003510 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003511 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003512
Alexander Duyck68d480c2009-10-05 06:33:08 +00003513 vmolr |= rd32(E1000_VMOLR(vfn)) &
3514 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3515 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003516 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003517}
3518
Greg Rose13800462010-11-06 02:08:26 +00003519static void igb_check_wvbr(struct igb_adapter *adapter)
3520{
3521 struct e1000_hw *hw = &adapter->hw;
3522 u32 wvbr = 0;
3523
3524 switch (hw->mac.type) {
3525 case e1000_82576:
3526 case e1000_i350:
3527 if (!(wvbr = rd32(E1000_WVBR)))
3528 return;
3529 break;
3530 default:
3531 break;
3532 }
3533
3534 adapter->wvbr |= wvbr;
3535}
3536
3537#define IGB_STAGGERED_QUEUE_OFFSET 8
3538
3539static void igb_spoof_check(struct igb_adapter *adapter)
3540{
3541 int j;
3542
3543 if (!adapter->wvbr)
3544 return;
3545
3546 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3547 if (adapter->wvbr & (1 << j) ||
3548 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3549 dev_warn(&adapter->pdev->dev,
3550 "Spoof event(s) detected on VF %d\n", j);
3551 adapter->wvbr &=
3552 ~((1 << j) |
3553 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3554 }
3555 }
3556}
3557
Auke Kok9d5c8242008-01-24 02:22:38 -08003558/* Need to wait a few seconds after link up to get diagnostic information from
3559 * the phy */
3560static void igb_update_phy_info(unsigned long data)
3561{
3562 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003563 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003564}
3565
3566/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003567 * igb_has_link - check shared code for link and determine up/down
3568 * @adapter: pointer to driver private info
3569 **/
Nick Nunley31455352010-02-17 01:01:21 +00003570bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003571{
3572 struct e1000_hw *hw = &adapter->hw;
3573 bool link_active = false;
3574 s32 ret_val = 0;
3575
3576 /* get_link_status is set on LSC (link status) interrupt or
3577 * rx sequence error interrupt. get_link_status will stay
3578 * false until the e1000_check_for_link establishes link
3579 * for copper adapters ONLY
3580 */
3581 switch (hw->phy.media_type) {
3582 case e1000_media_type_copper:
3583 if (hw->mac.get_link_status) {
3584 ret_val = hw->mac.ops.check_for_link(hw);
3585 link_active = !hw->mac.get_link_status;
3586 } else {
3587 link_active = true;
3588 }
3589 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003590 case e1000_media_type_internal_serdes:
3591 ret_val = hw->mac.ops.check_for_link(hw);
3592 link_active = hw->mac.serdes_has_link;
3593 break;
3594 default:
3595 case e1000_media_type_unknown:
3596 break;
3597 }
3598
3599 return link_active;
3600}
3601
Stefan Assmann563988d2011-04-05 04:27:15 +00003602static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3603{
3604 bool ret = false;
3605 u32 ctrl_ext, thstat;
3606
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00003607 /* check for thermal sensor event on i350 copper only */
Stefan Assmann563988d2011-04-05 04:27:15 +00003608 if (hw->mac.type == e1000_i350) {
3609 thstat = rd32(E1000_THSTAT);
3610 ctrl_ext = rd32(E1000_CTRL_EXT);
3611
3612 if ((hw->phy.media_type == e1000_media_type_copper) &&
3613 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3614 ret = !!(thstat & event);
3615 }
3616 }
3617
3618 return ret;
3619}
3620
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003621/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003622 * igb_watchdog - Timer Call-back
3623 * @data: pointer to adapter cast into an unsigned long
3624 **/
3625static void igb_watchdog(unsigned long data)
3626{
3627 struct igb_adapter *adapter = (struct igb_adapter *)data;
3628 /* Do the rest outside of interrupt context */
3629 schedule_work(&adapter->watchdog_task);
3630}
3631
3632static void igb_watchdog_task(struct work_struct *work)
3633{
3634 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003635 struct igb_adapter,
3636 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003637 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003638 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003639 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003640 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003641
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003642 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003643 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003644 /* Cancel scheduled suspend requests. */
3645 pm_runtime_resume(netdev->dev.parent);
3646
Auke Kok9d5c8242008-01-24 02:22:38 -08003647 if (!netif_carrier_ok(netdev)) {
3648 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003649 hw->mac.ops.get_speed_and_duplex(hw,
3650 &adapter->link_speed,
3651 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003652
3653 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003654 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003655 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3656 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003657 netdev->name,
3658 adapter->link_speed,
3659 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003660 "Full" : "Half",
3661 (ctrl & E1000_CTRL_TFCE) &&
3662 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3663 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3664 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003665
Stefan Assmann563988d2011-04-05 04:27:15 +00003666 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003667 if (igb_thermal_sensor_event(hw,
3668 E1000_THSTAT_LINK_THROTTLE)) {
3669 netdev_info(netdev, "The network adapter link "
3670 "speed was downshifted because it "
3671 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003672 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003673
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003674 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003675 adapter->tx_timeout_factor = 1;
3676 switch (adapter->link_speed) {
3677 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003678 adapter->tx_timeout_factor = 14;
3679 break;
3680 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003681 /* maybe add some timeout factor ? */
3682 break;
3683 }
3684
3685 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003686
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003687 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003688 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003689
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003690 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003691 if (!test_bit(__IGB_DOWN, &adapter->state))
3692 mod_timer(&adapter->phy_info_timer,
3693 round_jiffies(jiffies + 2 * HZ));
3694 }
3695 } else {
3696 if (netif_carrier_ok(netdev)) {
3697 adapter->link_speed = 0;
3698 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003699
3700 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003701 if (igb_thermal_sensor_event(hw,
3702 E1000_THSTAT_PWR_DOWN)) {
3703 netdev_err(netdev, "The network adapter was "
3704 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003705 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003706
Alexander Duyck527d47c2008-11-27 00:21:39 -08003707 /* Links status message must follow this format */
3708 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3709 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003710 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003711
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003712 igb_ping_all_vfs(adapter);
3713
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003714 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 if (!test_bit(__IGB_DOWN, &adapter->state))
3716 mod_timer(&adapter->phy_info_timer,
3717 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003718
3719 pm_schedule_suspend(netdev->dev.parent,
3720 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003721 }
3722 }
3723
Eric Dumazet12dcd862010-10-15 17:27:10 +00003724 spin_lock(&adapter->stats64_lock);
3725 igb_update_stats(adapter, &adapter->stats64);
3726 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003727
Alexander Duyckdbabb062009-11-12 18:38:16 +00003728 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003729 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003730 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003731 /* We've lost link, so the controller stops DMA,
3732 * but we've got queued Tx work that's never going
3733 * to get done, so reset controller to flush Tx.
3734 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003735 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3736 adapter->tx_timeout_count++;
3737 schedule_work(&adapter->reset_task);
3738 /* return immediately since reset is imminent */
3739 return;
3740 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003741 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003742
Alexander Duyckdbabb062009-11-12 18:38:16 +00003743 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003744 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003745 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003746
Auke Kok9d5c8242008-01-24 02:22:38 -08003747 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003748 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003749 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003750 for (i = 0; i < adapter->num_q_vectors; i++)
3751 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003752 wr32(E1000_EICS, eics);
3753 } else {
3754 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3755 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003756
Greg Rose13800462010-11-06 02:08:26 +00003757 igb_spoof_check(adapter);
3758
Auke Kok9d5c8242008-01-24 02:22:38 -08003759 /* Reset the timer */
3760 if (!test_bit(__IGB_DOWN, &adapter->state))
3761 mod_timer(&adapter->watchdog_timer,
3762 round_jiffies(jiffies + 2 * HZ));
3763}
3764
3765enum latency_range {
3766 lowest_latency = 0,
3767 low_latency = 1,
3768 bulk_latency = 2,
3769 latency_invalid = 255
3770};
3771
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003772/**
3773 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3774 *
3775 * Stores a new ITR value based on strictly on packet size. This
3776 * algorithm is less sophisticated than that used in igb_update_itr,
3777 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003778 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003779 * were determined based on theoretical maximum wire speed and testing
3780 * data, in order to minimize response time while increasing bulk
3781 * throughput.
3782 * This functionality is controlled by the InterruptThrottleRate module
3783 * parameter (see igb_param.c)
3784 * NOTE: This function is called only when operating in a multiqueue
3785 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003786 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003787 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003788static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003789{
Alexander Duyck047e0032009-10-27 15:49:27 +00003790 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003791 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003792 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003793 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003794
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003795 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3796 * ints/sec - ITR timer value of 120 ticks.
3797 */
3798 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003799 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003800 goto set_itr_val;
3801 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003802
Alexander Duyck0ba82992011-08-26 07:45:47 +00003803 packets = q_vector->rx.total_packets;
3804 if (packets)
3805 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003806
Alexander Duyck0ba82992011-08-26 07:45:47 +00003807 packets = q_vector->tx.total_packets;
3808 if (packets)
3809 avg_wire_size = max_t(u32, avg_wire_size,
3810 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003811
3812 /* if avg_wire_size isn't set no work was done */
3813 if (!avg_wire_size)
3814 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003815
3816 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3817 avg_wire_size += 24;
3818
3819 /* Don't starve jumbo frames */
3820 avg_wire_size = min(avg_wire_size, 3000);
3821
3822 /* Give a little boost to mid-size frames */
3823 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3824 new_val = avg_wire_size / 3;
3825 else
3826 new_val = avg_wire_size / 2;
3827
Alexander Duyck0ba82992011-08-26 07:45:47 +00003828 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3829 if (new_val < IGB_20K_ITR &&
3830 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3831 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3832 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003833
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003834set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003835 if (new_val != q_vector->itr_val) {
3836 q_vector->itr_val = new_val;
3837 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003838 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003839clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003840 q_vector->rx.total_bytes = 0;
3841 q_vector->rx.total_packets = 0;
3842 q_vector->tx.total_bytes = 0;
3843 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003844}
3845
3846/**
3847 * igb_update_itr - update the dynamic ITR value based on statistics
3848 * Stores a new ITR value based on packets and byte
3849 * counts during the last interrupt. The advantage of per interrupt
3850 * computation is faster updates and more accurate ITR for the current
3851 * traffic pattern. Constants in this function were computed
3852 * based on theoretical maximum wire speed and thresholds were set based
3853 * on testing data as well as attempting to minimize response time
3854 * while increasing bulk throughput.
3855 * this functionality is controlled by the InterruptThrottleRate module
3856 * parameter (see igb_param.c)
3857 * NOTE: These calculations are only valid when operating in a single-
3858 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003859 * @q_vector: pointer to q_vector
3860 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003861 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003862static void igb_update_itr(struct igb_q_vector *q_vector,
3863 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003864{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003865 unsigned int packets = ring_container->total_packets;
3866 unsigned int bytes = ring_container->total_bytes;
3867 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003868
Alexander Duyck0ba82992011-08-26 07:45:47 +00003869 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003870 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003871 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003872
Alexander Duyck0ba82992011-08-26 07:45:47 +00003873 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003874 case lowest_latency:
3875 /* handle TSO and jumbo frames */
3876 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003877 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003878 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003879 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003880 break;
3881 case low_latency: /* 50 usec aka 20000 ints/s */
3882 if (bytes > 10000) {
3883 /* this if handles the TSO accounting */
3884 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003885 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003886 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003887 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003888 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003889 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003890 }
3891 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003892 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003893 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003894 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003895 }
3896 break;
3897 case bulk_latency: /* 250 usec aka 4000 ints/s */
3898 if (bytes > 25000) {
3899 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003900 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003901 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003902 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003903 }
3904 break;
3905 }
3906
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 /* clear work counters since we have the values we need */
3908 ring_container->total_bytes = 0;
3909 ring_container->total_packets = 0;
3910
3911 /* write updated itr to ring container */
3912 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003913}
3914
Alexander Duyck0ba82992011-08-26 07:45:47 +00003915static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003916{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003918 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003920
3921 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3922 if (adapter->link_speed != SPEED_1000) {
3923 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003924 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003925 goto set_itr_now;
3926 }
3927
Alexander Duyck0ba82992011-08-26 07:45:47 +00003928 igb_update_itr(q_vector, &q_vector->tx);
3929 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003930
Alexander Duyck0ba82992011-08-26 07:45:47 +00003931 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003932
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003933 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003934 if (current_itr == lowest_latency &&
3935 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3936 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003937 current_itr = low_latency;
3938
Auke Kok9d5c8242008-01-24 02:22:38 -08003939 switch (current_itr) {
3940 /* counts and packets in update_itr are dependent on these numbers */
3941 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003942 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003943 break;
3944 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003945 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003946 break;
3947 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003948 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003949 break;
3950 default:
3951 break;
3952 }
3953
3954set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003955 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003956 /* this attempts to bias the interrupt rate towards Bulk
3957 * by adding intermediate steps when interrupt rate is
3958 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003959 new_itr = new_itr > q_vector->itr_val ?
3960 max((new_itr * q_vector->itr_val) /
3961 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003962 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003963 new_itr;
3964 /* Don't write the value here; it resets the adapter's
3965 * internal timer, and causes us to delay far longer than
3966 * we should between interrupts. Instead, we write the ITR
3967 * value at the beginning of the next interrupt so the timing
3968 * ends up being correct.
3969 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003970 q_vector->itr_val = new_itr;
3971 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003972 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003973}
3974
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00003975static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3976 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003977{
3978 struct e1000_adv_tx_context_desc *context_desc;
3979 u16 i = tx_ring->next_to_use;
3980
3981 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3982
3983 i++;
3984 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3985
3986 /* set bits to identify this as an advanced context descriptor */
3987 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3988
3989 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00003990 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003991 mss_l4len_idx |= tx_ring->reg_idx << 4;
3992
3993 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3994 context_desc->seqnum_seed = 0;
3995 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3996 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3997}
3998
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003999static int igb_tso(struct igb_ring *tx_ring,
4000 struct igb_tx_buffer *first,
4001 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004002{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004003 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004004 u32 vlan_macip_lens, type_tucmd;
4005 u32 mss_l4len_idx, l4len;
4006
4007 if (!skb_is_gso(skb))
4008 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004009
4010 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004011 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004012 if (err)
4013 return err;
4014 }
4015
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004016 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4017 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004018
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004019 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004020 struct iphdr *iph = ip_hdr(skb);
4021 iph->tot_len = 0;
4022 iph->check = 0;
4023 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4024 iph->daddr, 0,
4025 IPPROTO_TCP,
4026 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004027 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004028 first->tx_flags |= IGB_TX_FLAGS_TSO |
4029 IGB_TX_FLAGS_CSUM |
4030 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004031 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004032 ipv6_hdr(skb)->payload_len = 0;
4033 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4034 &ipv6_hdr(skb)->daddr,
4035 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004036 first->tx_flags |= IGB_TX_FLAGS_TSO |
4037 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004038 }
4039
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004040 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004041 l4len = tcp_hdrlen(skb);
4042 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004043
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004044 /* update gso size and bytecount with header size */
4045 first->gso_segs = skb_shinfo(skb)->gso_segs;
4046 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4047
Auke Kok9d5c8242008-01-24 02:22:38 -08004048 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004049 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4050 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004051
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004052 /* VLAN MACLEN IPLEN */
4053 vlan_macip_lens = skb_network_header_len(skb);
4054 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004055 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004056
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004057 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004058
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004059 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004060}
4061
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004062static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004063{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004064 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004065 u32 vlan_macip_lens = 0;
4066 u32 mss_l4len_idx = 0;
4067 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004068
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004069 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004070 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4071 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004072 } else {
4073 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004074 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004075 case __constant_htons(ETH_P_IP):
4076 vlan_macip_lens |= skb_network_header_len(skb);
4077 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4078 l4_hdr = ip_hdr(skb)->protocol;
4079 break;
4080 case __constant_htons(ETH_P_IPV6):
4081 vlan_macip_lens |= skb_network_header_len(skb);
4082 l4_hdr = ipv6_hdr(skb)->nexthdr;
4083 break;
4084 default:
4085 if (unlikely(net_ratelimit())) {
4086 dev_warn(tx_ring->dev,
4087 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004088 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004089 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004090 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004091 }
4092
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004093 switch (l4_hdr) {
4094 case IPPROTO_TCP:
4095 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4096 mss_l4len_idx = tcp_hdrlen(skb) <<
4097 E1000_ADVTXD_L4LEN_SHIFT;
4098 break;
4099 case IPPROTO_SCTP:
4100 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4101 mss_l4len_idx = sizeof(struct sctphdr) <<
4102 E1000_ADVTXD_L4LEN_SHIFT;
4103 break;
4104 case IPPROTO_UDP:
4105 mss_l4len_idx = sizeof(struct udphdr) <<
4106 E1000_ADVTXD_L4LEN_SHIFT;
4107 break;
4108 default:
4109 if (unlikely(net_ratelimit())) {
4110 dev_warn(tx_ring->dev,
4111 "partial checksum but l4 proto=%x!\n",
4112 l4_hdr);
4113 }
4114 break;
4115 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004116
4117 /* update TX checksum flag */
4118 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004119 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004120
4121 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004122 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004123
4124 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004125}
4126
Alexander Duycke032afc2011-08-26 07:44:48 +00004127static __le32 igb_tx_cmd_type(u32 tx_flags)
4128{
4129 /* set type for advanced descriptor with frame checksum insertion */
4130 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4131 E1000_ADVTXD_DCMD_IFCS |
4132 E1000_ADVTXD_DCMD_DEXT);
4133
4134 /* set HW vlan bit if vlan is present */
4135 if (tx_flags & IGB_TX_FLAGS_VLAN)
4136 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4137
4138 /* set timestamp bit if present */
4139 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4140 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4141
4142 /* set segmentation bits for TSO */
4143 if (tx_flags & IGB_TX_FLAGS_TSO)
4144 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4145
4146 return cmd_type;
4147}
4148
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004149static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4150 union e1000_adv_tx_desc *tx_desc,
4151 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004152{
4153 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4154
4155 /* 82575 requires a unique index per ring if any offload is enabled */
4156 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004157 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004158 olinfo_status |= tx_ring->reg_idx << 4;
4159
4160 /* insert L4 checksum */
4161 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4162 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4163
4164 /* insert IPv4 checksum */
4165 if (tx_flags & IGB_TX_FLAGS_IPV4)
4166 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4167 }
4168
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004169 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004170}
4171
Alexander Duyckebe42d12011-08-26 07:45:09 +00004172/*
4173 * The largest size we can write to the descriptor is 65535. In order to
4174 * maintain a power of two alignment we have to limit ourselves to 32K.
4175 */
4176#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004177#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004178
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004179static void igb_tx_map(struct igb_ring *tx_ring,
4180 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004181 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004182{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004183 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004184 struct igb_tx_buffer *tx_buffer_info;
4185 union e1000_adv_tx_desc *tx_desc;
4186 dma_addr_t dma;
4187 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4188 unsigned int data_len = skb->data_len;
4189 unsigned int size = skb_headlen(skb);
4190 unsigned int paylen = skb->len - hdr_len;
4191 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004192 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004193 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004194
4195 tx_desc = IGB_TX_DESC(tx_ring, i);
4196
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004197 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004198 cmd_type = igb_tx_cmd_type(tx_flags);
4199
4200 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4201 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004202 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004203
Alexander Duyckebe42d12011-08-26 07:45:09 +00004204 /* record length, and DMA address */
4205 first->length = size;
4206 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004207 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004208
Alexander Duyckebe42d12011-08-26 07:45:09 +00004209 for (;;) {
4210 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4211 tx_desc->read.cmd_type_len =
4212 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004213
Alexander Duyckebe42d12011-08-26 07:45:09 +00004214 i++;
4215 tx_desc++;
4216 if (i == tx_ring->count) {
4217 tx_desc = IGB_TX_DESC(tx_ring, 0);
4218 i = 0;
4219 }
4220
4221 dma += IGB_MAX_DATA_PER_TXD;
4222 size -= IGB_MAX_DATA_PER_TXD;
4223
4224 tx_desc->read.olinfo_status = 0;
4225 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4226 }
4227
4228 if (likely(!data_len))
4229 break;
4230
4231 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4232
Alexander Duyck65689fe2009-03-20 00:17:43 +00004233 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004234 tx_desc++;
4235 if (i == tx_ring->count) {
4236 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004237 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004238 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004239
Eric Dumazet9e903e02011-10-18 21:00:24 +00004240 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004241 data_len -= size;
4242
4243 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4244 size, DMA_TO_DEVICE);
4245 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004246 goto dma_error;
4247
Alexander Duyckebe42d12011-08-26 07:45:09 +00004248 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4249 tx_buffer_info->length = size;
4250 tx_buffer_info->dma = dma;
4251
4252 tx_desc->read.olinfo_status = 0;
4253 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4254
4255 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004256 }
4257
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004258 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4259
Alexander Duyckebe42d12011-08-26 07:45:09 +00004260 /* write last descriptor with RS and EOP bits */
4261 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
Ben Greear6b8f0922012-03-06 09:41:53 +00004262 if (unlikely(skb->no_fcs))
4263 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
Alexander Duyckebe42d12011-08-26 07:45:09 +00004264 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004265
4266 /* set the timestamp */
4267 first->time_stamp = jiffies;
4268
Alexander Duyckebe42d12011-08-26 07:45:09 +00004269 /*
4270 * Force memory writes to complete before letting h/w know there
4271 * are new descriptors to fetch. (Only applicable for weak-ordered
4272 * memory model archs, such as IA-64).
4273 *
4274 * We also need this memory barrier to make certain all of the
4275 * status bits have been updated before next_to_watch is written.
4276 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004277 wmb();
4278
Alexander Duyckebe42d12011-08-26 07:45:09 +00004279 /* set next_to_watch value indicating a packet is present */
4280 first->next_to_watch = tx_desc;
4281
4282 i++;
4283 if (i == tx_ring->count)
4284 i = 0;
4285
Auke Kok9d5c8242008-01-24 02:22:38 -08004286 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004287
Alexander Duyckfce99e32009-10-27 15:51:27 +00004288 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004289
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 /* we need this if more than one processor can write to our tail
4291 * at a time, it syncronizes IO on IA64/Altix systems */
4292 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004293
4294 return;
4295
4296dma_error:
4297 dev_err(tx_ring->dev, "TX DMA map failed\n");
4298
4299 /* clear dma mappings for failed tx_buffer_info map */
4300 for (;;) {
4301 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4302 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4303 if (tx_buffer_info == first)
4304 break;
4305 if (i == 0)
4306 i = tx_ring->count;
4307 i--;
4308 }
4309
4310 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004311}
4312
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004313static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004314{
Alexander Duycke694e962009-10-27 15:53:06 +00004315 struct net_device *netdev = tx_ring->netdev;
4316
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004317 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004318
Auke Kok9d5c8242008-01-24 02:22:38 -08004319 /* Herbert's original patch had:
4320 * smp_mb__after_netif_stop_queue();
4321 * but since that doesn't exist yet, just open code it. */
4322 smp_mb();
4323
4324 /* We need to check again in a case another CPU has just
4325 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004326 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004327 return -EBUSY;
4328
4329 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004330 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004331
4332 u64_stats_update_begin(&tx_ring->tx_syncp2);
4333 tx_ring->tx_stats.restart_queue2++;
4334 u64_stats_update_end(&tx_ring->tx_syncp2);
4335
Auke Kok9d5c8242008-01-24 02:22:38 -08004336 return 0;
4337}
4338
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004339static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004340{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004341 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004342 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004343 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004344}
4345
Alexander Duyckcd392f52011-08-26 07:43:59 +00004346netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4347 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004348{
Alexander Duyck8542db02011-08-26 07:44:43 +00004349 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004350 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004351 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004352 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004353 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004354
Auke Kok9d5c8242008-01-24 02:22:38 -08004355 /* need: 1 descriptor per page,
4356 * + 2 desc gap to keep tail from touching head,
4357 * + 1 desc for skb->data,
4358 * + 1 desc for context descriptor,
4359 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004360 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004361 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004362 return NETDEV_TX_BUSY;
4363 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004364
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004365 /* record the location of the first descriptor for this packet */
4366 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4367 first->skb = skb;
4368 first->bytecount = skb->len;
4369 first->gso_segs = 1;
4370
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004371 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4372 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004373 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004374 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004375
Jesse Grosseab6d182010-10-20 13:56:03 +00004376 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004377 tx_flags |= IGB_TX_FLAGS_VLAN;
4378 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4379 }
4380
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004381 /* record initial flags and protocol */
4382 first->tx_flags = tx_flags;
4383 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004384
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004385 tso = igb_tso(tx_ring, first, &hdr_len);
4386 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004387 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004388 else if (!tso)
4389 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004390
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004391 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004392
4393 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004394 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004395
Auke Kok9d5c8242008-01-24 02:22:38 -08004396 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004397
4398out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004399 igb_unmap_and_free_tx_resource(tx_ring, first);
4400
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004401 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004402}
4403
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004404static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4405 struct sk_buff *skb)
4406{
4407 unsigned int r_idx = skb->queue_mapping;
4408
4409 if (r_idx >= adapter->num_tx_queues)
4410 r_idx = r_idx % adapter->num_tx_queues;
4411
4412 return adapter->tx_ring[r_idx];
4413}
4414
Alexander Duyckcd392f52011-08-26 07:43:59 +00004415static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4416 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004417{
4418 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004419
4420 if (test_bit(__IGB_DOWN, &adapter->state)) {
4421 dev_kfree_skb_any(skb);
4422 return NETDEV_TX_OK;
4423 }
4424
4425 if (skb->len <= 0) {
4426 dev_kfree_skb_any(skb);
4427 return NETDEV_TX_OK;
4428 }
4429
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004430 /*
4431 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4432 * in order to meet this minimum size requirement.
4433 */
4434 if (skb->len < 17) {
4435 if (skb_padto(skb, 17))
4436 return NETDEV_TX_OK;
4437 skb->len = 17;
4438 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004439
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004440 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004441}
4442
4443/**
4444 * igb_tx_timeout - Respond to a Tx Hang
4445 * @netdev: network interface device structure
4446 **/
4447static void igb_tx_timeout(struct net_device *netdev)
4448{
4449 struct igb_adapter *adapter = netdev_priv(netdev);
4450 struct e1000_hw *hw = &adapter->hw;
4451
4452 /* Do the reset outside of interrupt context */
4453 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004454
Alexander Duyck06218a82011-08-26 07:46:55 +00004455 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004456 hw->dev_spec._82575.global_device_reset = true;
4457
Auke Kok9d5c8242008-01-24 02:22:38 -08004458 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004459 wr32(E1000_EICS,
4460 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004461}
4462
4463static void igb_reset_task(struct work_struct *work)
4464{
4465 struct igb_adapter *adapter;
4466 adapter = container_of(work, struct igb_adapter, reset_task);
4467
Taku Izumic97ec422010-04-27 14:39:30 +00004468 igb_dump(adapter);
4469 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004470 igb_reinit_locked(adapter);
4471}
4472
4473/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004474 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004475 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004476 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004477 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004478 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004479static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4480 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004481{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004482 struct igb_adapter *adapter = netdev_priv(netdev);
4483
4484 spin_lock(&adapter->stats64_lock);
4485 igb_update_stats(adapter, &adapter->stats64);
4486 memcpy(stats, &adapter->stats64, sizeof(*stats));
4487 spin_unlock(&adapter->stats64_lock);
4488
4489 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004490}
4491
4492/**
4493 * igb_change_mtu - Change the Maximum Transfer Unit
4494 * @netdev: network interface device structure
4495 * @new_mtu: new value for maximum frame size
4496 *
4497 * Returns 0 on success, negative on failure
4498 **/
4499static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4500{
4501 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004502 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004503 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004504
Alexander Duyckc809d222009-10-27 23:52:13 +00004505 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004506 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004507 return -EINVAL;
4508 }
4509
Alexander Duyck153285f2011-08-26 07:43:32 +00004510#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004511 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004512 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004513 return -EINVAL;
4514 }
4515
4516 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4517 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004518
Auke Kok9d5c8242008-01-24 02:22:38 -08004519 /* igb_down has a dependency on max_frame_size */
4520 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004521
Alexander Duyck4c844852009-10-27 15:52:07 +00004522 if (netif_running(netdev))
4523 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004524
Alexander Duyck090b1792009-10-27 23:51:55 +00004525 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004526 netdev->mtu, new_mtu);
4527 netdev->mtu = new_mtu;
4528
4529 if (netif_running(netdev))
4530 igb_up(adapter);
4531 else
4532 igb_reset(adapter);
4533
4534 clear_bit(__IGB_RESETTING, &adapter->state);
4535
4536 return 0;
4537}
4538
4539/**
4540 * igb_update_stats - Update the board statistics counters
4541 * @adapter: board private structure
4542 **/
4543
Eric Dumazet12dcd862010-10-15 17:27:10 +00004544void igb_update_stats(struct igb_adapter *adapter,
4545 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004546{
4547 struct e1000_hw *hw = &adapter->hw;
4548 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004549 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004550 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004551 int i;
4552 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004553 unsigned int start;
4554 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004555
4556#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4557
4558 /*
4559 * Prevent stats update while adapter is being reset, or if the pci
4560 * connection is down.
4561 */
4562 if (adapter->link_speed == 0)
4563 return;
4564 if (pci_channel_offline(pdev))
4565 return;
4566
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004567 bytes = 0;
4568 packets = 0;
4569 for (i = 0; i < adapter->num_rx_queues; i++) {
4570 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004571 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004572
Alexander Duyck3025a442010-02-17 01:02:39 +00004573 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004574 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004575
4576 do {
4577 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4578 _bytes = ring->rx_stats.bytes;
4579 _packets = ring->rx_stats.packets;
4580 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4581 bytes += _bytes;
4582 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004583 }
4584
Alexander Duyck128e45e2009-11-12 18:37:38 +00004585 net_stats->rx_bytes = bytes;
4586 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004587
4588 bytes = 0;
4589 packets = 0;
4590 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004591 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004592 do {
4593 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4594 _bytes = ring->tx_stats.bytes;
4595 _packets = ring->tx_stats.packets;
4596 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4597 bytes += _bytes;
4598 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004599 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004600 net_stats->tx_bytes = bytes;
4601 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004602
4603 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004604 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4605 adapter->stats.gprc += rd32(E1000_GPRC);
4606 adapter->stats.gorc += rd32(E1000_GORCL);
4607 rd32(E1000_GORCH); /* clear GORCL */
4608 adapter->stats.bprc += rd32(E1000_BPRC);
4609 adapter->stats.mprc += rd32(E1000_MPRC);
4610 adapter->stats.roc += rd32(E1000_ROC);
4611
4612 adapter->stats.prc64 += rd32(E1000_PRC64);
4613 adapter->stats.prc127 += rd32(E1000_PRC127);
4614 adapter->stats.prc255 += rd32(E1000_PRC255);
4615 adapter->stats.prc511 += rd32(E1000_PRC511);
4616 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4617 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4618 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4619 adapter->stats.sec += rd32(E1000_SEC);
4620
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004621 mpc = rd32(E1000_MPC);
4622 adapter->stats.mpc += mpc;
4623 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004624 adapter->stats.scc += rd32(E1000_SCC);
4625 adapter->stats.ecol += rd32(E1000_ECOL);
4626 adapter->stats.mcc += rd32(E1000_MCC);
4627 adapter->stats.latecol += rd32(E1000_LATECOL);
4628 adapter->stats.dc += rd32(E1000_DC);
4629 adapter->stats.rlec += rd32(E1000_RLEC);
4630 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4631 adapter->stats.xontxc += rd32(E1000_XONTXC);
4632 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4633 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4634 adapter->stats.fcruc += rd32(E1000_FCRUC);
4635 adapter->stats.gptc += rd32(E1000_GPTC);
4636 adapter->stats.gotc += rd32(E1000_GOTCL);
4637 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004638 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004639 adapter->stats.ruc += rd32(E1000_RUC);
4640 adapter->stats.rfc += rd32(E1000_RFC);
4641 adapter->stats.rjc += rd32(E1000_RJC);
4642 adapter->stats.tor += rd32(E1000_TORH);
4643 adapter->stats.tot += rd32(E1000_TOTH);
4644 adapter->stats.tpr += rd32(E1000_TPR);
4645
4646 adapter->stats.ptc64 += rd32(E1000_PTC64);
4647 adapter->stats.ptc127 += rd32(E1000_PTC127);
4648 adapter->stats.ptc255 += rd32(E1000_PTC255);
4649 adapter->stats.ptc511 += rd32(E1000_PTC511);
4650 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4651 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4652
4653 adapter->stats.mptc += rd32(E1000_MPTC);
4654 adapter->stats.bptc += rd32(E1000_BPTC);
4655
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004656 adapter->stats.tpt += rd32(E1000_TPT);
4657 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004658
4659 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004660 /* read internal phy specific stats */
4661 reg = rd32(E1000_CTRL_EXT);
4662 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4663 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4664 adapter->stats.tncrs += rd32(E1000_TNCRS);
4665 }
4666
Auke Kok9d5c8242008-01-24 02:22:38 -08004667 adapter->stats.tsctc += rd32(E1000_TSCTC);
4668 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4669
4670 adapter->stats.iac += rd32(E1000_IAC);
4671 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4672 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4673 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4674 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4675 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4676 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4677 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4678 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4679
4680 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004681 net_stats->multicast = adapter->stats.mprc;
4682 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004683
4684 /* Rx Errors */
4685
4686 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004687 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004688 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004689 adapter->stats.crcerrs + adapter->stats.algnerrc +
4690 adapter->stats.ruc + adapter->stats.roc +
4691 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004692 net_stats->rx_length_errors = adapter->stats.ruc +
4693 adapter->stats.roc;
4694 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4695 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4696 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004697
4698 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004699 net_stats->tx_errors = adapter->stats.ecol +
4700 adapter->stats.latecol;
4701 net_stats->tx_aborted_errors = adapter->stats.ecol;
4702 net_stats->tx_window_errors = adapter->stats.latecol;
4703 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004704
4705 /* Tx Dropped needs to be maintained elsewhere */
4706
4707 /* Phy Stats */
4708 if (hw->phy.media_type == e1000_media_type_copper) {
4709 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004710 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004711 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4712 adapter->phy_stats.idle_errors += phy_tmp;
4713 }
4714 }
4715
4716 /* Management Stats */
4717 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4718 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4719 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004720
4721 /* OS2BMC Stats */
4722 reg = rd32(E1000_MANC);
4723 if (reg & E1000_MANC_EN_BMC2OS) {
4724 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4725 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4726 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4727 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4728 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004729}
4730
Auke Kok9d5c8242008-01-24 02:22:38 -08004731static irqreturn_t igb_msix_other(int irq, void *data)
4732{
Alexander Duyck047e0032009-10-27 15:49:27 +00004733 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004734 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004735 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004736 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004737
Alexander Duyck7f081d42010-01-07 17:41:00 +00004738 if (icr & E1000_ICR_DRSTA)
4739 schedule_work(&adapter->reset_task);
4740
Alexander Duyck047e0032009-10-27 15:49:27 +00004741 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004742 /* HW is reporting DMA is out of sync */
4743 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004744 /* The DMA Out of Sync is also indication of a spoof event
4745 * in IOV mode. Check the Wrong VM Behavior register to
4746 * see if it is really a spoof event. */
4747 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004748 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004749
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004750 /* Check for a mailbox event */
4751 if (icr & E1000_ICR_VMMB)
4752 igb_msg_task(adapter);
4753
4754 if (icr & E1000_ICR_LSC) {
4755 hw->mac.get_link_status = 1;
4756 /* guard against interrupt when we're going down */
4757 if (!test_bit(__IGB_DOWN, &adapter->state))
4758 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4759 }
4760
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004761 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004762
4763 return IRQ_HANDLED;
4764}
4765
Alexander Duyck047e0032009-10-27 15:49:27 +00004766static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004767{
Alexander Duyck26b39272010-02-17 01:00:41 +00004768 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004769 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004770
Alexander Duyck047e0032009-10-27 15:49:27 +00004771 if (!q_vector->set_itr)
4772 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004773
Alexander Duyck047e0032009-10-27 15:49:27 +00004774 if (!itr_val)
4775 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004776
Alexander Duyck26b39272010-02-17 01:00:41 +00004777 if (adapter->hw.mac.type == e1000_82575)
4778 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004779 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004780 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004781
4782 writel(itr_val, q_vector->itr_register);
4783 q_vector->set_itr = 0;
4784}
4785
4786static irqreturn_t igb_msix_ring(int irq, void *data)
4787{
4788 struct igb_q_vector *q_vector = data;
4789
4790 /* Write the ITR value calculated from the previous interrupt. */
4791 igb_write_itr(q_vector);
4792
4793 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004794
Auke Kok9d5c8242008-01-24 02:22:38 -08004795 return IRQ_HANDLED;
4796}
4797
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004798#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004799static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004800{
Alexander Duyck047e0032009-10-27 15:49:27 +00004801 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004802 struct e1000_hw *hw = &adapter->hw;
4803 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004804
Alexander Duyck047e0032009-10-27 15:49:27 +00004805 if (q_vector->cpu == cpu)
4806 goto out_no_update;
4807
Alexander Duyck0ba82992011-08-26 07:45:47 +00004808 if (q_vector->tx.ring) {
4809 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004810 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4811 if (hw->mac.type == e1000_82575) {
4812 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4813 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4814 } else {
4815 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4816 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4817 E1000_DCA_TXCTRL_CPUID_SHIFT;
4818 }
4819 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4820 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4821 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004822 if (q_vector->rx.ring) {
4823 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004824 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4825 if (hw->mac.type == e1000_82575) {
4826 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4827 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4828 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004829 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004830 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004831 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004832 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004833 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4834 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4835 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4836 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004837 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004838 q_vector->cpu = cpu;
4839out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004840 put_cpu();
4841}
4842
4843static void igb_setup_dca(struct igb_adapter *adapter)
4844{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004845 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004846 int i;
4847
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004848 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004849 return;
4850
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004851 /* Always use CB2 mode, difference is masked in the CB driver. */
4852 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4853
Alexander Duyck047e0032009-10-27 15:49:27 +00004854 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004855 adapter->q_vector[i]->cpu = -1;
4856 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004857 }
4858}
4859
4860static int __igb_notify_dca(struct device *dev, void *data)
4861{
4862 struct net_device *netdev = dev_get_drvdata(dev);
4863 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004864 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004865 struct e1000_hw *hw = &adapter->hw;
4866 unsigned long event = *(unsigned long *)data;
4867
4868 switch (event) {
4869 case DCA_PROVIDER_ADD:
4870 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004871 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004872 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004873 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004874 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004875 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004876 igb_setup_dca(adapter);
4877 break;
4878 }
4879 /* Fall Through since DCA is disabled. */
4880 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004881 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004882 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004883 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004884 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004885 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004886 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004887 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004888 }
4889 break;
4890 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004891
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004892 return 0;
4893}
4894
4895static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4896 void *p)
4897{
4898 int ret_val;
4899
4900 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4901 __igb_notify_dca);
4902
4903 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4904}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004905#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004906
Greg Rose0224d662011-10-14 02:57:14 +00004907#ifdef CONFIG_PCI_IOV
4908static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4909{
4910 unsigned char mac_addr[ETH_ALEN];
4911 struct pci_dev *pdev = adapter->pdev;
4912 struct e1000_hw *hw = &adapter->hw;
4913 struct pci_dev *pvfdev;
4914 unsigned int device_id;
4915 u16 thisvf_devfn;
4916
4917 random_ether_addr(mac_addr);
4918 igb_set_vf_mac(adapter, vf, mac_addr);
4919
4920 switch (adapter->hw.mac.type) {
4921 case e1000_82576:
4922 device_id = IGB_82576_VF_DEV_ID;
4923 /* VF Stride for 82576 is 2 */
4924 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4925 (pdev->devfn & 1);
4926 break;
4927 case e1000_i350:
4928 device_id = IGB_I350_VF_DEV_ID;
4929 /* VF Stride for I350 is 4 */
4930 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4931 (pdev->devfn & 3);
4932 break;
4933 default:
4934 device_id = 0;
4935 thisvf_devfn = 0;
4936 break;
4937 }
4938
4939 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4940 while (pvfdev) {
4941 if (pvfdev->devfn == thisvf_devfn)
4942 break;
4943 pvfdev = pci_get_device(hw->vendor_id,
4944 device_id, pvfdev);
4945 }
4946
4947 if (pvfdev)
4948 adapter->vf_data[vf].vfdev = pvfdev;
4949 else
4950 dev_err(&pdev->dev,
4951 "Couldn't find pci dev ptr for VF %4.4x\n",
4952 thisvf_devfn);
4953 return pvfdev != NULL;
4954}
4955
4956static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4957{
4958 struct e1000_hw *hw = &adapter->hw;
4959 struct pci_dev *pdev = adapter->pdev;
4960 struct pci_dev *pvfdev;
4961 u16 vf_devfn = 0;
4962 u16 vf_stride;
4963 unsigned int device_id;
4964 int vfs_found = 0;
4965
4966 switch (adapter->hw.mac.type) {
4967 case e1000_82576:
4968 device_id = IGB_82576_VF_DEV_ID;
4969 /* VF Stride for 82576 is 2 */
4970 vf_stride = 2;
4971 break;
4972 case e1000_i350:
4973 device_id = IGB_I350_VF_DEV_ID;
4974 /* VF Stride for I350 is 4 */
4975 vf_stride = 4;
4976 break;
4977 default:
4978 device_id = 0;
4979 vf_stride = 0;
4980 break;
4981 }
4982
4983 vf_devfn = pdev->devfn + 0x80;
4984 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4985 while (pvfdev) {
Greg Rose06292922012-02-02 23:51:43 +00004986 if (pvfdev->devfn == vf_devfn &&
4987 (pvfdev->bus->number >= pdev->bus->number))
Greg Rose0224d662011-10-14 02:57:14 +00004988 vfs_found++;
4989 vf_devfn += vf_stride;
4990 pvfdev = pci_get_device(hw->vendor_id,
4991 device_id, pvfdev);
4992 }
4993
4994 return vfs_found;
4995}
4996
4997static int igb_check_vf_assignment(struct igb_adapter *adapter)
4998{
4999 int i;
5000 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5001 if (adapter->vf_data[i].vfdev) {
5002 if (adapter->vf_data[i].vfdev->dev_flags &
5003 PCI_DEV_FLAGS_ASSIGNED)
5004 return true;
5005 }
5006 }
5007 return false;
5008}
5009
5010#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005011static void igb_ping_all_vfs(struct igb_adapter *adapter)
5012{
5013 struct e1000_hw *hw = &adapter->hw;
5014 u32 ping;
5015 int i;
5016
5017 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5018 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005019 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005020 ping |= E1000_VT_MSGTYPE_CTS;
5021 igb_write_mbx(hw, &ping, 1, i);
5022 }
5023}
5024
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005025static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5026{
5027 struct e1000_hw *hw = &adapter->hw;
5028 u32 vmolr = rd32(E1000_VMOLR(vf));
5029 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5030
Alexander Duyckd85b90042010-09-22 17:56:20 +00005031 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005032 IGB_VF_FLAG_MULTI_PROMISC);
5033 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5034
5035 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5036 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005037 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005038 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5039 } else {
5040 /*
5041 * if we have hashes and we are clearing a multicast promisc
5042 * flag we need to write the hashes to the MTA as this step
5043 * was previously skipped
5044 */
5045 if (vf_data->num_vf_mc_hashes > 30) {
5046 vmolr |= E1000_VMOLR_MPME;
5047 } else if (vf_data->num_vf_mc_hashes) {
5048 int j;
5049 vmolr |= E1000_VMOLR_ROMPE;
5050 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5051 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5052 }
5053 }
5054
5055 wr32(E1000_VMOLR(vf), vmolr);
5056
5057 /* there are flags left unprocessed, likely not supported */
5058 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5059 return -EINVAL;
5060
5061 return 0;
5062
5063}
5064
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005065static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5066 u32 *msgbuf, u32 vf)
5067{
5068 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5069 u16 *hash_list = (u16 *)&msgbuf[1];
5070 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5071 int i;
5072
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005073 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005074 * to this VF for later use to restore when the PF multi cast
5075 * list changes
5076 */
5077 vf_data->num_vf_mc_hashes = n;
5078
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005079 /* only up to 30 hash values supported */
5080 if (n > 30)
5081 n = 30;
5082
5083 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005084 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005085 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005086
5087 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005088 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005089
5090 return 0;
5091}
5092
5093static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5094{
5095 struct e1000_hw *hw = &adapter->hw;
5096 struct vf_data_storage *vf_data;
5097 int i, j;
5098
5099 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005100 u32 vmolr = rd32(E1000_VMOLR(i));
5101 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5102
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005103 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005104
5105 if ((vf_data->num_vf_mc_hashes > 30) ||
5106 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5107 vmolr |= E1000_VMOLR_MPME;
5108 } else if (vf_data->num_vf_mc_hashes) {
5109 vmolr |= E1000_VMOLR_ROMPE;
5110 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5111 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5112 }
5113 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005114 }
5115}
5116
5117static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5118{
5119 struct e1000_hw *hw = &adapter->hw;
5120 u32 pool_mask, reg, vid;
5121 int i;
5122
5123 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5124
5125 /* Find the vlan filter for this id */
5126 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5127 reg = rd32(E1000_VLVF(i));
5128
5129 /* remove the vf from the pool */
5130 reg &= ~pool_mask;
5131
5132 /* if pool is empty then remove entry from vfta */
5133 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5134 (reg & E1000_VLVF_VLANID_ENABLE)) {
5135 reg = 0;
5136 vid = reg & E1000_VLVF_VLANID_MASK;
5137 igb_vfta_set(hw, vid, false);
5138 }
5139
5140 wr32(E1000_VLVF(i), reg);
5141 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005142
5143 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005144}
5145
5146static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5147{
5148 struct e1000_hw *hw = &adapter->hw;
5149 u32 reg, i;
5150
Alexander Duyck51466232009-10-27 23:47:35 +00005151 /* The vlvf table only exists on 82576 hardware and newer */
5152 if (hw->mac.type < e1000_82576)
5153 return -1;
5154
5155 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005156 if (!adapter->vfs_allocated_count)
5157 return -1;
5158
5159 /* Find the vlan filter for this id */
5160 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5161 reg = rd32(E1000_VLVF(i));
5162 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5163 vid == (reg & E1000_VLVF_VLANID_MASK))
5164 break;
5165 }
5166
5167 if (add) {
5168 if (i == E1000_VLVF_ARRAY_SIZE) {
5169 /* Did not find a matching VLAN ID entry that was
5170 * enabled. Search for a free filter entry, i.e.
5171 * one without the enable bit set
5172 */
5173 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5174 reg = rd32(E1000_VLVF(i));
5175 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5176 break;
5177 }
5178 }
5179 if (i < E1000_VLVF_ARRAY_SIZE) {
5180 /* Found an enabled/available entry */
5181 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5182
5183 /* if !enabled we need to set this up in vfta */
5184 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005185 /* add VID to filter table */
5186 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005187 reg |= E1000_VLVF_VLANID_ENABLE;
5188 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005189 reg &= ~E1000_VLVF_VLANID_MASK;
5190 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005191 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005192
5193 /* do not modify RLPML for PF devices */
5194 if (vf >= adapter->vfs_allocated_count)
5195 return 0;
5196
5197 if (!adapter->vf_data[vf].vlans_enabled) {
5198 u32 size;
5199 reg = rd32(E1000_VMOLR(vf));
5200 size = reg & E1000_VMOLR_RLPML_MASK;
5201 size += 4;
5202 reg &= ~E1000_VMOLR_RLPML_MASK;
5203 reg |= size;
5204 wr32(E1000_VMOLR(vf), reg);
5205 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005206
Alexander Duyck51466232009-10-27 23:47:35 +00005207 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005208 }
5209 } else {
5210 if (i < E1000_VLVF_ARRAY_SIZE) {
5211 /* remove vf from the pool */
5212 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5213 /* if pool is empty then remove entry from vfta */
5214 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5215 reg = 0;
5216 igb_vfta_set(hw, vid, false);
5217 }
5218 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005219
5220 /* do not modify RLPML for PF devices */
5221 if (vf >= adapter->vfs_allocated_count)
5222 return 0;
5223
5224 adapter->vf_data[vf].vlans_enabled--;
5225 if (!adapter->vf_data[vf].vlans_enabled) {
5226 u32 size;
5227 reg = rd32(E1000_VMOLR(vf));
5228 size = reg & E1000_VMOLR_RLPML_MASK;
5229 size -= 4;
5230 reg &= ~E1000_VMOLR_RLPML_MASK;
5231 reg |= size;
5232 wr32(E1000_VMOLR(vf), reg);
5233 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005234 }
5235 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005236 return 0;
5237}
5238
5239static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5240{
5241 struct e1000_hw *hw = &adapter->hw;
5242
5243 if (vid)
5244 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5245 else
5246 wr32(E1000_VMVIR(vf), 0);
5247}
5248
5249static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5250 int vf, u16 vlan, u8 qos)
5251{
5252 int err = 0;
5253 struct igb_adapter *adapter = netdev_priv(netdev);
5254
5255 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5256 return -EINVAL;
5257 if (vlan || qos) {
5258 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5259 if (err)
5260 goto out;
5261 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5262 igb_set_vmolr(adapter, vf, !vlan);
5263 adapter->vf_data[vf].pf_vlan = vlan;
5264 adapter->vf_data[vf].pf_qos = qos;
5265 dev_info(&adapter->pdev->dev,
5266 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5267 if (test_bit(__IGB_DOWN, &adapter->state)) {
5268 dev_warn(&adapter->pdev->dev,
5269 "The VF VLAN has been set,"
5270 " but the PF device is not up.\n");
5271 dev_warn(&adapter->pdev->dev,
5272 "Bring the PF device up before"
5273 " attempting to use the VF device.\n");
5274 }
5275 } else {
5276 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5277 false, vf);
5278 igb_set_vmvir(adapter, vlan, vf);
5279 igb_set_vmolr(adapter, vf, true);
5280 adapter->vf_data[vf].pf_vlan = 0;
5281 adapter->vf_data[vf].pf_qos = 0;
5282 }
5283out:
5284 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005285}
5286
5287static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5288{
5289 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5290 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5291
5292 return igb_vlvf_set(adapter, vid, add, vf);
5293}
5294
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005295static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005296{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005297 /* clear flags - except flag that indicates PF has set the MAC */
5298 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005299 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005300
5301 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005302 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005303
5304 /* reset vlans for device */
5305 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005306 if (adapter->vf_data[vf].pf_vlan)
5307 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5308 adapter->vf_data[vf].pf_vlan,
5309 adapter->vf_data[vf].pf_qos);
5310 else
5311 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005312
5313 /* reset multicast table array for vf */
5314 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5315
5316 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005317 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005318}
5319
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005320static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5321{
5322 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5323
5324 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005325 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5326 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005327
5328 /* process remaining reset events */
5329 igb_vf_reset(adapter, vf);
5330}
5331
5332static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005333{
5334 struct e1000_hw *hw = &adapter->hw;
5335 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005336 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005337 u32 reg, msgbuf[3];
5338 u8 *addr = (u8 *)(&msgbuf[1]);
5339
5340 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005341 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005342
5343 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005344 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005345
5346 /* enable transmit and receive for vf */
5347 reg = rd32(E1000_VFTE);
5348 wr32(E1000_VFTE, reg | (1 << vf));
5349 reg = rd32(E1000_VFRE);
5350 wr32(E1000_VFRE, reg | (1 << vf));
5351
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005352 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005353
5354 /* reply to reset with ack and vf mac address */
5355 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5356 memcpy(addr, vf_mac, 6);
5357 igb_write_mbx(hw, msgbuf, 3, vf);
5358}
5359
5360static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5361{
Greg Rosede42edd2010-07-01 13:39:23 +00005362 /*
5363 * The VF MAC Address is stored in a packed array of bytes
5364 * starting at the second 32 bit word of the msg array
5365 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005366 unsigned char *addr = (char *)&msg[1];
5367 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005368
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005369 if (is_valid_ether_addr(addr))
5370 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005371
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005372 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005373}
5374
5375static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5376{
5377 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005378 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005379 u32 msg = E1000_VT_MSGTYPE_NACK;
5380
5381 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005382 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5383 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005384 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005385 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005386 }
5387}
5388
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005389static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005390{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005391 struct pci_dev *pdev = adapter->pdev;
5392 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005393 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005394 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005395 s32 retval;
5396
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005397 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005398
Alexander Duyckfef45f42009-12-11 22:57:34 -08005399 if (retval) {
5400 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005401 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005402 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5403 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5404 return;
5405 goto out;
5406 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005407
5408 /* this is a message we already processed, do nothing */
5409 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005410 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005411
5412 /*
5413 * until the vf completes a reset it should not be
5414 * allowed to start any configuration.
5415 */
5416
5417 if (msgbuf[0] == E1000_VF_RESET) {
5418 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005419 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005420 }
5421
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005422 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005423 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5424 return;
5425 retval = -1;
5426 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005427 }
5428
5429 switch ((msgbuf[0] & 0xFFFF)) {
5430 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005431 retval = -EINVAL;
5432 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5433 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5434 else
5435 dev_warn(&pdev->dev,
5436 "VF %d attempted to override administratively "
5437 "set MAC address\nReload the VF driver to "
5438 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005439 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005440 case E1000_VF_SET_PROMISC:
5441 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5442 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005443 case E1000_VF_SET_MULTICAST:
5444 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5445 break;
5446 case E1000_VF_SET_LPE:
5447 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5448 break;
5449 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005450 retval = -1;
5451 if (vf_data->pf_vlan)
5452 dev_warn(&pdev->dev,
5453 "VF %d attempted to override administratively "
5454 "set VLAN tag\nReload the VF driver to "
5455 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005456 else
5457 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005458 break;
5459 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005460 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005461 retval = -1;
5462 break;
5463 }
5464
Alexander Duyckfef45f42009-12-11 22:57:34 -08005465 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5466out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005467 /* notify the VF of the results of what it sent us */
5468 if (retval)
5469 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5470 else
5471 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5472
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005473 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005474}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005475
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005476static void igb_msg_task(struct igb_adapter *adapter)
5477{
5478 struct e1000_hw *hw = &adapter->hw;
5479 u32 vf;
5480
5481 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5482 /* process any reset requests */
5483 if (!igb_check_for_rst(hw, vf))
5484 igb_vf_reset_event(adapter, vf);
5485
5486 /* process any messages pending */
5487 if (!igb_check_for_msg(hw, vf))
5488 igb_rcv_msg_from_vf(adapter, vf);
5489
5490 /* process any acks */
5491 if (!igb_check_for_ack(hw, vf))
5492 igb_rcv_ack_from_vf(adapter, vf);
5493 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005494}
5495
Auke Kok9d5c8242008-01-24 02:22:38 -08005496/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005497 * igb_set_uta - Set unicast filter table address
5498 * @adapter: board private structure
5499 *
5500 * The unicast table address is a register array of 32-bit registers.
5501 * The table is meant to be used in a way similar to how the MTA is used
5502 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005503 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5504 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005505 **/
5506static void igb_set_uta(struct igb_adapter *adapter)
5507{
5508 struct e1000_hw *hw = &adapter->hw;
5509 int i;
5510
5511 /* The UTA table only exists on 82576 hardware and newer */
5512 if (hw->mac.type < e1000_82576)
5513 return;
5514
5515 /* we only need to do this if VMDq is enabled */
5516 if (!adapter->vfs_allocated_count)
5517 return;
5518
5519 for (i = 0; i < hw->mac.uta_reg_count; i++)
5520 array_wr32(E1000_UTA, i, ~0);
5521}
5522
5523/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005524 * igb_intr_msi - Interrupt Handler
5525 * @irq: interrupt number
5526 * @data: pointer to a network interface device structure
5527 **/
5528static irqreturn_t igb_intr_msi(int irq, void *data)
5529{
Alexander Duyck047e0032009-10-27 15:49:27 +00005530 struct igb_adapter *adapter = data;
5531 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005532 struct e1000_hw *hw = &adapter->hw;
5533 /* read ICR disables interrupts using IAM */
5534 u32 icr = rd32(E1000_ICR);
5535
Alexander Duyck047e0032009-10-27 15:49:27 +00005536 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005537
Alexander Duyck7f081d42010-01-07 17:41:00 +00005538 if (icr & E1000_ICR_DRSTA)
5539 schedule_work(&adapter->reset_task);
5540
Alexander Duyck047e0032009-10-27 15:49:27 +00005541 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005542 /* HW is reporting DMA is out of sync */
5543 adapter->stats.doosync++;
5544 }
5545
Auke Kok9d5c8242008-01-24 02:22:38 -08005546 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5547 hw->mac.get_link_status = 1;
5548 if (!test_bit(__IGB_DOWN, &adapter->state))
5549 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5550 }
5551
Alexander Duyck047e0032009-10-27 15:49:27 +00005552 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005553
5554 return IRQ_HANDLED;
5555}
5556
5557/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005558 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005559 * @irq: interrupt number
5560 * @data: pointer to a network interface device structure
5561 **/
5562static irqreturn_t igb_intr(int irq, void *data)
5563{
Alexander Duyck047e0032009-10-27 15:49:27 +00005564 struct igb_adapter *adapter = data;
5565 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005566 struct e1000_hw *hw = &adapter->hw;
5567 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5568 * need for the IMC write */
5569 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005570
5571 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5572 * not set, then the adapter didn't send an interrupt */
5573 if (!(icr & E1000_ICR_INT_ASSERTED))
5574 return IRQ_NONE;
5575
Alexander Duyck0ba82992011-08-26 07:45:47 +00005576 igb_write_itr(q_vector);
5577
Alexander Duyck7f081d42010-01-07 17:41:00 +00005578 if (icr & E1000_ICR_DRSTA)
5579 schedule_work(&adapter->reset_task);
5580
Alexander Duyck047e0032009-10-27 15:49:27 +00005581 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005582 /* HW is reporting DMA is out of sync */
5583 adapter->stats.doosync++;
5584 }
5585
Auke Kok9d5c8242008-01-24 02:22:38 -08005586 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5587 hw->mac.get_link_status = 1;
5588 /* guard against interrupt when we're going down */
5589 if (!test_bit(__IGB_DOWN, &adapter->state))
5590 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5591 }
5592
Alexander Duyck047e0032009-10-27 15:49:27 +00005593 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005594
5595 return IRQ_HANDLED;
5596}
5597
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005598static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005599{
Alexander Duyck047e0032009-10-27 15:49:27 +00005600 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005601 struct e1000_hw *hw = &adapter->hw;
5602
Alexander Duyck0ba82992011-08-26 07:45:47 +00005603 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5604 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5605 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5606 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005607 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005608 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005609 }
5610
5611 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5612 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005613 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005614 else
5615 igb_irq_enable(adapter);
5616 }
5617}
5618
Auke Kok9d5c8242008-01-24 02:22:38 -08005619/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005620 * igb_poll - NAPI Rx polling callback
5621 * @napi: napi polling structure
5622 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005623 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005624static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005625{
Alexander Duyck047e0032009-10-27 15:49:27 +00005626 struct igb_q_vector *q_vector = container_of(napi,
5627 struct igb_q_vector,
5628 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005629 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005630
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005631#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005632 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5633 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005634#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005635 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005636 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005637
Alexander Duyck0ba82992011-08-26 07:45:47 +00005638 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005639 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005640
Alexander Duyck16eb8812011-08-26 07:43:54 +00005641 /* If all work not completed, return budget and keep polling */
5642 if (!clean_complete)
5643 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005644
Alexander Duyck46544252009-02-19 20:39:04 -08005645 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005646 napi_complete(napi);
5647 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005648
Alexander Duyck16eb8812011-08-26 07:43:54 +00005649 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005650}
Al Viro6d8126f2008-03-16 22:23:24 +00005651
Richard Cochran7ebae812012-03-16 10:55:37 +00005652#ifdef CONFIG_IGB_PTP
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005653/**
5654 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5655 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005656 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005657 *
5658 * If we were asked to do hardware stamping and such a time stamp is
5659 * available, then it must have been for this skb here because we only
5660 * allow only one such packet into the queue.
5661 */
Alexander Duyck06034642011-08-26 07:44:22 +00005662static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5663 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005664{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005665 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005666 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005667 struct skb_shared_hwtstamps shhwtstamps;
5668 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005669
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005670 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005671 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005672 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5673 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005674
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005675 regval = rd32(E1000_TXSTMPL);
5676 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5677
5678 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005679 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005680}
5681
Richard Cochran7ebae812012-03-16 10:55:37 +00005682#endif
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005683/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005684 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005685 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005686 * returns true if ring is completely cleaned
5687 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005688static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005689{
Alexander Duyck047e0032009-10-27 15:49:27 +00005690 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005691 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005692 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005693 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005695 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005696 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005697
Alexander Duyck13fde972011-10-05 13:35:24 +00005698 if (test_bit(__IGB_DOWN, &adapter->state))
5699 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005700
Alexander Duyck06034642011-08-26 07:44:22 +00005701 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005702 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005703 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005704
Alexander Duyck13fde972011-10-05 13:35:24 +00005705 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005706 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005707
Alexander Duyck8542db02011-08-26 07:44:43 +00005708 /* prevent any other reads prior to eop_desc */
5709 rmb();
5710
5711 /* if next_to_watch is not set then there is no work pending */
5712 if (!eop_desc)
5713 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005714
5715 /* if DD is not set pending work has not been completed */
5716 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5717 break;
5718
Alexander Duyck8542db02011-08-26 07:44:43 +00005719 /* clear next_to_watch to prevent false hangs */
5720 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005721
Alexander Duyckebe42d12011-08-26 07:45:09 +00005722 /* update the statistics for this packet */
5723 total_bytes += tx_buffer->bytecount;
5724 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005725
Richard Cochran7ebae812012-03-16 10:55:37 +00005726#ifdef CONFIG_IGB_PTP
Alexander Duyckebe42d12011-08-26 07:45:09 +00005727 /* retrieve hardware timestamp */
5728 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005729
Richard Cochran7ebae812012-03-16 10:55:37 +00005730#endif
Alexander Duyckebe42d12011-08-26 07:45:09 +00005731 /* free the skb */
5732 dev_kfree_skb_any(tx_buffer->skb);
5733 tx_buffer->skb = NULL;
5734
5735 /* unmap skb header data */
5736 dma_unmap_single(tx_ring->dev,
5737 tx_buffer->dma,
5738 tx_buffer->length,
5739 DMA_TO_DEVICE);
5740
5741 /* clear last DMA location and unmap remaining buffers */
5742 while (tx_desc != eop_desc) {
5743 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005744
Alexander Duyck13fde972011-10-05 13:35:24 +00005745 tx_buffer++;
5746 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005747 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005748 if (unlikely(!i)) {
5749 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005750 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005751 tx_desc = IGB_TX_DESC(tx_ring, 0);
5752 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005753
5754 /* unmap any remaining paged data */
5755 if (tx_buffer->dma) {
5756 dma_unmap_page(tx_ring->dev,
5757 tx_buffer->dma,
5758 tx_buffer->length,
5759 DMA_TO_DEVICE);
5760 }
5761 }
5762
5763 /* clear last DMA location */
5764 tx_buffer->dma = 0;
5765
5766 /* move us one more past the eop_desc for start of next pkt */
5767 tx_buffer++;
5768 tx_desc++;
5769 i++;
5770 if (unlikely(!i)) {
5771 i -= tx_ring->count;
5772 tx_buffer = tx_ring->tx_buffer_info;
5773 tx_desc = IGB_TX_DESC(tx_ring, 0);
5774 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005775 }
5776
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005777 netdev_tx_completed_queue(txring_txq(tx_ring),
5778 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005779 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005780 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005781 u64_stats_update_begin(&tx_ring->tx_syncp);
5782 tx_ring->tx_stats.bytes += total_bytes;
5783 tx_ring->tx_stats.packets += total_packets;
5784 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005785 q_vector->tx.total_bytes += total_bytes;
5786 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005787
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005788 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005789 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005790
Alexander Duyck8542db02011-08-26 07:44:43 +00005791 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005792
Auke Kok9d5c8242008-01-24 02:22:38 -08005793 /* Detect a transmit hang in hardware, this serializes the
5794 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005795 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005796 if (eop_desc &&
5797 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005798 (adapter->tx_timeout_factor * HZ)) &&
5799 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005800
Auke Kok9d5c8242008-01-24 02:22:38 -08005801 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005802 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005803 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005804 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005805 " TDH <%x>\n"
5806 " TDT <%x>\n"
5807 " next_to_use <%x>\n"
5808 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005809 "buffer_info[next_to_clean]\n"
5810 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005811 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005812 " jiffies <%lx>\n"
5813 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005814 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005815 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005816 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005817 tx_ring->next_to_use,
5818 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005819 tx_buffer->time_stamp,
5820 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005821 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005822 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005823 netif_stop_subqueue(tx_ring->netdev,
5824 tx_ring->queue_index);
5825
5826 /* we are about to reset, no point in enabling stuff */
5827 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005828 }
5829 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005830
5831 if (unlikely(total_packets &&
5832 netif_carrier_ok(tx_ring->netdev) &&
5833 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5834 /* Make sure that anybody stopping the queue after this
5835 * sees the new next_to_clean.
5836 */
5837 smp_mb();
5838 if (__netif_subqueue_stopped(tx_ring->netdev,
5839 tx_ring->queue_index) &&
5840 !(test_bit(__IGB_DOWN, &adapter->state))) {
5841 netif_wake_subqueue(tx_ring->netdev,
5842 tx_ring->queue_index);
5843
5844 u64_stats_update_begin(&tx_ring->tx_syncp);
5845 tx_ring->tx_stats.restart_queue++;
5846 u64_stats_update_end(&tx_ring->tx_syncp);
5847 }
5848 }
5849
5850 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005851}
5852
Alexander Duyckcd392f52011-08-26 07:43:59 +00005853static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005854 union e1000_adv_rx_desc *rx_desc,
5855 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005856{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005857 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005858
Alexander Duyck294e7d72011-08-26 07:45:57 +00005859 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005860 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005861 return;
5862
5863 /* Rx checksum disabled via ethtool */
5864 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005865 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005866
Auke Kok9d5c8242008-01-24 02:22:38 -08005867 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005868 if (igb_test_staterr(rx_desc,
5869 E1000_RXDEXT_STATERR_TCPE |
5870 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005871 /*
5872 * work around errata with sctp packets where the TCPE aka
5873 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5874 * packets, (aka let the stack check the crc32c)
5875 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005876 if (!((skb->len == 60) &&
5877 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005878 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005879 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005880 u64_stats_update_end(&ring->rx_syncp);
5881 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005882 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 return;
5884 }
5885 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005886 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5887 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005888 skb->ip_summed = CHECKSUM_UNNECESSARY;
5889
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005890 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5891 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005892}
5893
Alexander Duyck077887c2011-08-26 07:46:29 +00005894static inline void igb_rx_hash(struct igb_ring *ring,
5895 union e1000_adv_rx_desc *rx_desc,
5896 struct sk_buff *skb)
5897{
5898 if (ring->netdev->features & NETIF_F_RXHASH)
5899 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5900}
5901
Richard Cochran7ebae812012-03-16 10:55:37 +00005902#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005903static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5904 union e1000_adv_rx_desc *rx_desc,
5905 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005906{
5907 struct igb_adapter *adapter = q_vector->adapter;
5908 struct e1000_hw *hw = &adapter->hw;
5909 u64 regval;
5910
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005911 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5912 E1000_RXDADV_STAT_TS))
5913 return;
5914
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005915 /*
5916 * If this bit is set, then the RX registers contain the time stamp. No
5917 * other packet will be time stamped until we read these registers, so
5918 * read the registers to make them available again. Because only one
5919 * packet can be time stamped at a time, we know that the register
5920 * values must belong to this one here and therefore we don't need to
5921 * compare any of the additional attributes stored for it.
5922 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005923 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005924 * can turn into a skb_shared_hwtstamps.
5925 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005926 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005927 u32 *stamp = (u32 *)skb->data;
5928 regval = le32_to_cpu(*(stamp + 2));
5929 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5930 skb_pull(skb, IGB_TS_HDR_LEN);
5931 } else {
5932 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5933 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005934
Nick Nunley757b77e2010-03-26 11:36:47 +00005935 regval = rd32(E1000_RXSTMPL);
5936 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5937 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005938
5939 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5940}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005941
Richard Cochran7ebae812012-03-16 10:55:37 +00005942#endif
Alexander Duyck8be10e92011-08-26 07:47:11 +00005943static void igb_rx_vlan(struct igb_ring *ring,
5944 union e1000_adv_rx_desc *rx_desc,
5945 struct sk_buff *skb)
5946{
5947 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5948 u16 vid;
5949 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5950 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5951 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5952 else
5953 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5954
5955 __vlan_hwaccel_put_tag(skb, vid);
5956 }
5957}
5958
Alexander Duyck44390ca2011-08-26 07:43:38 +00005959static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005960{
5961 /* HW will not DMA in data larger than the given buffer, even if it
5962 * parses the (NFS, of course) header to be larger. In that case, it
5963 * fills the header buffer and spills the rest into the page.
5964 */
5965 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5966 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005967 if (hlen > IGB_RX_HDR_LEN)
5968 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005969 return hlen;
5970}
5971
Alexander Duyckcd392f52011-08-26 07:43:59 +00005972static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005973{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005974 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005975 union e1000_adv_rx_desc *rx_desc;
5976 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005977 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005978 u16 cleaned_count = igb_desc_unused(rx_ring);
5979 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005980
Alexander Duyck601369062011-08-26 07:44:05 +00005981 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005982
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005983 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005984 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005985 struct sk_buff *skb = buffer_info->skb;
5986 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005987
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005988 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005989 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005990
5991 i++;
5992 if (i == rx_ring->count)
5993 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005994
Alexander Duyck601369062011-08-26 07:44:05 +00005995 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005996 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005997
Alexander Duyck16eb8812011-08-26 07:43:54 +00005998 /*
5999 * This memory barrier is needed to keep us from reading
6000 * any other fields out of the rx_desc until we know the
6001 * RXD_STAT_DD bit is set
6002 */
6003 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006004
Alexander Duyck16eb8812011-08-26 07:43:54 +00006005 if (!skb_is_nonlinear(skb)) {
6006 __skb_put(skb, igb_get_hlen(rx_desc));
6007 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006008 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006009 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006010 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006011 }
6012
Alexander Duyck16eb8812011-08-26 07:43:54 +00006013 if (rx_desc->wb.upper.length) {
6014 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006015
Koki Sanagiaa913402010-04-27 01:01:19 +00006016 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006017 buffer_info->page,
6018 buffer_info->page_offset,
6019 length);
6020
Alexander Duyck16eb8812011-08-26 07:43:54 +00006021 skb->len += length;
6022 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006023 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006024
Alexander Duyckd1eff352009-11-12 18:38:35 +00006025 if ((page_count(buffer_info->page) != 1) ||
6026 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006027 buffer_info->page = NULL;
6028 else
6029 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006030
Alexander Duyck16eb8812011-08-26 07:43:54 +00006031 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6032 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6033 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006034 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006035
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006036 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006037 struct igb_rx_buffer *next_buffer;
6038 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006039 buffer_info->skb = next_buffer->skb;
6040 buffer_info->dma = next_buffer->dma;
6041 next_buffer->skb = skb;
6042 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006043 goto next_desc;
6044 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006045
Ben Greear89eaefb2012-03-06 09:41:58 +00006046 if (unlikely((igb_test_staterr(rx_desc,
6047 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6048 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006049 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006050 goto next_desc;
6051 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006052
Richard Cochran7ebae812012-03-16 10:55:37 +00006053#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006054 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Richard Cochran7ebae812012-03-16 10:55:37 +00006055#endif
Alexander Duyck077887c2011-08-26 07:46:29 +00006056 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006057 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006058 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006059
6060 total_bytes += skb->len;
6061 total_packets++;
6062
6063 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6064
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006065 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006066
Alexander Duyck16eb8812011-08-26 07:43:54 +00006067 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006068next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006069 if (!budget)
6070 break;
6071
6072 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006073 /* return some buffers to hardware, one at a time is too slow */
6074 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006075 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006076 cleaned_count = 0;
6077 }
6078
6079 /* use prefetched values */
6080 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006081 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006082
Auke Kok9d5c8242008-01-24 02:22:38 -08006083 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006084 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006085 rx_ring->rx_stats.packets += total_packets;
6086 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006087 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006088 q_vector->rx.total_packets += total_packets;
6089 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006090
6091 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006092 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006093
Alexander Duyck16eb8812011-08-26 07:43:54 +00006094 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006095}
6096
Alexander Duyckc023cd82011-08-26 07:43:43 +00006097static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006098 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006099{
6100 struct sk_buff *skb = bi->skb;
6101 dma_addr_t dma = bi->dma;
6102
6103 if (dma)
6104 return true;
6105
6106 if (likely(!skb)) {
6107 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6108 IGB_RX_HDR_LEN);
6109 bi->skb = skb;
6110 if (!skb) {
6111 rx_ring->rx_stats.alloc_failed++;
6112 return false;
6113 }
6114
6115 /* initialize skb for ring */
6116 skb_record_rx_queue(skb, rx_ring->queue_index);
6117 }
6118
6119 dma = dma_map_single(rx_ring->dev, skb->data,
6120 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6121
6122 if (dma_mapping_error(rx_ring->dev, dma)) {
6123 rx_ring->rx_stats.alloc_failed++;
6124 return false;
6125 }
6126
6127 bi->dma = dma;
6128 return true;
6129}
6130
6131static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006132 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006133{
6134 struct page *page = bi->page;
6135 dma_addr_t page_dma = bi->page_dma;
6136 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6137
6138 if (page_dma)
6139 return true;
6140
6141 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006142 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006143 bi->page = page;
6144 if (unlikely(!page)) {
6145 rx_ring->rx_stats.alloc_failed++;
6146 return false;
6147 }
6148 }
6149
6150 page_dma = dma_map_page(rx_ring->dev, page,
6151 page_offset, PAGE_SIZE / 2,
6152 DMA_FROM_DEVICE);
6153
6154 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6155 rx_ring->rx_stats.alloc_failed++;
6156 return false;
6157 }
6158
6159 bi->page_dma = page_dma;
6160 bi->page_offset = page_offset;
6161 return true;
6162}
6163
Auke Kok9d5c8242008-01-24 02:22:38 -08006164/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006165 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006166 * @adapter: address of board private structure
6167 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006168void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006169{
Auke Kok9d5c8242008-01-24 02:22:38 -08006170 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006171 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006172 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006173
Alexander Duyck601369062011-08-26 07:44:05 +00006174 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006175 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006176 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006177
6178 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006179 if (!igb_alloc_mapped_skb(rx_ring, bi))
6180 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006181
Alexander Duyckc023cd82011-08-26 07:43:43 +00006182 /* Refresh the desc even if buffer_addrs didn't change
6183 * because each write-back erases this info. */
6184 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006185
Alexander Duyckc023cd82011-08-26 07:43:43 +00006186 if (!igb_alloc_mapped_page(rx_ring, bi))
6187 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006188
Alexander Duyckc023cd82011-08-26 07:43:43 +00006189 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006190
Alexander Duyckc023cd82011-08-26 07:43:43 +00006191 rx_desc++;
6192 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006193 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006194 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006195 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006196 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006197 i -= rx_ring->count;
6198 }
6199
6200 /* clear the hdr_addr for the next_to_use descriptor */
6201 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006202 }
6203
Alexander Duyckc023cd82011-08-26 07:43:43 +00006204 i += rx_ring->count;
6205
Auke Kok9d5c8242008-01-24 02:22:38 -08006206 if (rx_ring->next_to_use != i) {
6207 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006208
6209 /* Force memory writes to complete before letting h/w
6210 * know there are new descriptors to fetch. (Only
6211 * applicable for weak-ordered memory model archs,
6212 * such as IA-64). */
6213 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006214 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006215 }
6216}
6217
6218/**
6219 * igb_mii_ioctl -
6220 * @netdev:
6221 * @ifreq:
6222 * @cmd:
6223 **/
6224static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6225{
6226 struct igb_adapter *adapter = netdev_priv(netdev);
6227 struct mii_ioctl_data *data = if_mii(ifr);
6228
6229 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6230 return -EOPNOTSUPP;
6231
6232 switch (cmd) {
6233 case SIOCGMIIPHY:
6234 data->phy_id = adapter->hw.phy.addr;
6235 break;
6236 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006237 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6238 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006239 return -EIO;
6240 break;
6241 case SIOCSMIIREG:
6242 default:
6243 return -EOPNOTSUPP;
6244 }
6245 return 0;
6246}
6247
6248/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006249 * igb_hwtstamp_ioctl - control hardware time stamping
6250 * @netdev:
6251 * @ifreq:
6252 * @cmd:
6253 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006254 * Outgoing time stamping can be enabled and disabled. Play nice and
6255 * disable it when requested, although it shouldn't case any overhead
6256 * when no packet needs it. At most one packet in the queue may be
6257 * marked for time stamping, otherwise it would be impossible to tell
6258 * for sure to which packet the hardware time stamp belongs.
6259 *
6260 * Incoming time stamping has to be configured via the hardware
6261 * filters. Not all combinations are supported, in particular event
6262 * type has to be specified. Matching the kind of event packet is
6263 * not supported, with the exception of "all V2 events regardless of
6264 * level 2 or 4".
6265 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006266 **/
6267static int igb_hwtstamp_ioctl(struct net_device *netdev,
6268 struct ifreq *ifr, int cmd)
6269{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006270 struct igb_adapter *adapter = netdev_priv(netdev);
6271 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006272 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006273 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6274 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006275 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006276 bool is_l4 = false;
6277 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006278 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006279
6280 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6281 return -EFAULT;
6282
6283 /* reserved for future extensions */
6284 if (config.flags)
6285 return -EINVAL;
6286
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006287 switch (config.tx_type) {
6288 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006289 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006290 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006291 break;
6292 default:
6293 return -ERANGE;
6294 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006295
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006296 switch (config.rx_filter) {
6297 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006298 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006299 break;
6300 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6301 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6302 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6303 case HWTSTAMP_FILTER_ALL:
6304 /*
6305 * register TSYNCRXCFG must be set, therefore it is not
6306 * possible to time stamp both Sync and Delay_Req messages
6307 * => fall back to time stamping all packets
6308 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006309 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006310 config.rx_filter = HWTSTAMP_FILTER_ALL;
6311 break;
6312 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006313 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006314 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006315 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006316 break;
6317 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006318 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006319 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006320 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006321 break;
6322 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6323 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006324 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006325 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006326 is_l2 = true;
6327 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006328 config.rx_filter = HWTSTAMP_FILTER_SOME;
6329 break;
6330 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6331 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006332 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006333 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006334 is_l2 = true;
6335 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006336 config.rx_filter = HWTSTAMP_FILTER_SOME;
6337 break;
6338 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6339 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6340 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006341 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006342 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006343 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006344 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006345 break;
6346 default:
6347 return -ERANGE;
6348 }
6349
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006350 if (hw->mac.type == e1000_82575) {
6351 if (tsync_rx_ctl | tsync_tx_ctl)
6352 return -EINVAL;
6353 return 0;
6354 }
6355
Nick Nunley757b77e2010-03-26 11:36:47 +00006356 /*
6357 * Per-packet timestamping only works if all packets are
6358 * timestamped, so enable timestamping in all packets as
6359 * long as one rx filter was configured.
6360 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006361 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006362 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6363 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6364 }
6365
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 /* enable/disable TX */
6367 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006368 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6369 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006370 wr32(E1000_TSYNCTXCTL, regval);
6371
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006372 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006373 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006374 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6375 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006376 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006377
6378 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006379 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6380
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006381 /* define ethertype filter for timestamped packets */
6382 if (is_l2)
6383 wr32(E1000_ETQF(3),
6384 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6385 E1000_ETQF_1588 | /* enable timestamping */
6386 ETH_P_1588)); /* 1588 eth protocol type */
6387 else
6388 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006389
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006390#define PTP_PORT 319
6391 /* L4 Queue Filter[3]: filter by destination port and protocol */
6392 if (is_l4) {
6393 u32 ftqf = (IPPROTO_UDP /* UDP */
6394 | E1000_FTQF_VF_BP /* VF not compared */
6395 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6396 | E1000_FTQF_MASK); /* mask all inputs */
6397 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006398
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006399 wr32(E1000_IMIR(3), htons(PTP_PORT));
6400 wr32(E1000_IMIREXT(3),
6401 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6402 if (hw->mac.type == e1000_82576) {
6403 /* enable source port check */
6404 wr32(E1000_SPQF(3), htons(PTP_PORT));
6405 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6406 }
6407 wr32(E1000_FTQF(3), ftqf);
6408 } else {
6409 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6410 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006411 wrfl();
6412
6413 adapter->hwtstamp_config = config;
6414
6415 /* clear TX/RX time stamp registers, just to be sure */
6416 regval = rd32(E1000_TXSTMPH);
6417 regval = rd32(E1000_RXSTMPH);
6418
6419 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6420 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006421}
6422
6423/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006424 * igb_ioctl -
6425 * @netdev:
6426 * @ifreq:
6427 * @cmd:
6428 **/
6429static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6430{
6431 switch (cmd) {
6432 case SIOCGMIIPHY:
6433 case SIOCGMIIREG:
6434 case SIOCSMIIREG:
6435 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006436 case SIOCSHWTSTAMP:
6437 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006438 default:
6439 return -EOPNOTSUPP;
6440 }
6441}
6442
Alexander Duyck009bc062009-07-23 18:08:35 +00006443s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6444{
6445 struct igb_adapter *adapter = hw->back;
6446 u16 cap_offset;
6447
Jon Masonbdaae042011-06-27 07:44:01 +00006448 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006449 if (!cap_offset)
6450 return -E1000_ERR_CONFIG;
6451
6452 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6453
6454 return 0;
6455}
6456
6457s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6458{
6459 struct igb_adapter *adapter = hw->back;
6460 u16 cap_offset;
6461
Jon Masonbdaae042011-06-27 07:44:01 +00006462 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006463 if (!cap_offset)
6464 return -E1000_ERR_CONFIG;
6465
6466 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6467
6468 return 0;
6469}
6470
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006471static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006472{
6473 struct igb_adapter *adapter = netdev_priv(netdev);
6474 struct e1000_hw *hw = &adapter->hw;
6475 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006476 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006477
Alexander Duyck5faf0302011-08-26 07:46:08 +00006478 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006479 /* enable VLAN tag insert/strip */
6480 ctrl = rd32(E1000_CTRL);
6481 ctrl |= E1000_CTRL_VME;
6482 wr32(E1000_CTRL, ctrl);
6483
Alexander Duyck51466232009-10-27 23:47:35 +00006484 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006485 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006486 rctl &= ~E1000_RCTL_CFIEN;
6487 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006488 } else {
6489 /* disable VLAN tag insert/strip */
6490 ctrl = rd32(E1000_CTRL);
6491 ctrl &= ~E1000_CTRL_VME;
6492 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006493 }
6494
Alexander Duycke1739522009-02-19 20:39:44 -08006495 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006496}
6497
Jiri Pirko8e586132011-12-08 19:52:37 -05006498static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006499{
6500 struct igb_adapter *adapter = netdev_priv(netdev);
6501 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006502 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006503
Alexander Duyck51466232009-10-27 23:47:35 +00006504 /* attempt to add filter to vlvf array */
6505 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006506
Alexander Duyck51466232009-10-27 23:47:35 +00006507 /* add the filter since PF can receive vlans w/o entry in vlvf */
6508 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006509
6510 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006511
6512 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006513}
6514
Jiri Pirko8e586132011-12-08 19:52:37 -05006515static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006516{
6517 struct igb_adapter *adapter = netdev_priv(netdev);
6518 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006519 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006520 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006521
Alexander Duyck51466232009-10-27 23:47:35 +00006522 /* remove vlan from VLVF table array */
6523 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006524
Alexander Duyck51466232009-10-27 23:47:35 +00006525 /* if vid was not present in VLVF just remove it from table */
6526 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006527 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006528
6529 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006530
6531 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006532}
6533
6534static void igb_restore_vlan(struct igb_adapter *adapter)
6535{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006536 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006537
Alexander Duyck5faf0302011-08-26 07:46:08 +00006538 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6539
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006540 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6541 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006542}
6543
David Decotigny14ad2512011-04-27 18:32:43 +00006544int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006545{
Alexander Duyck090b1792009-10-27 23:51:55 +00006546 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006547 struct e1000_mac_info *mac = &adapter->hw.mac;
6548
6549 mac->autoneg = 0;
6550
David Decotigny14ad2512011-04-27 18:32:43 +00006551 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6552 * for the switch() below to work */
6553 if ((spd & 1) || (dplx & ~1))
6554 goto err_inval;
6555
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006556 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6557 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006558 spd != SPEED_1000 &&
6559 dplx != DUPLEX_FULL)
6560 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006561
David Decotigny14ad2512011-04-27 18:32:43 +00006562 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006563 case SPEED_10 + DUPLEX_HALF:
6564 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6565 break;
6566 case SPEED_10 + DUPLEX_FULL:
6567 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6568 break;
6569 case SPEED_100 + DUPLEX_HALF:
6570 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6571 break;
6572 case SPEED_100 + DUPLEX_FULL:
6573 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6574 break;
6575 case SPEED_1000 + DUPLEX_FULL:
6576 mac->autoneg = 1;
6577 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6578 break;
6579 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6580 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006581 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006582 }
6583 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006584
6585err_inval:
6586 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6587 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006588}
6589
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006590static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6591 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006592{
6593 struct net_device *netdev = pci_get_drvdata(pdev);
6594 struct igb_adapter *adapter = netdev_priv(netdev);
6595 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006596 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006597 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006598#ifdef CONFIG_PM
6599 int retval = 0;
6600#endif
6601
6602 netif_device_detach(netdev);
6603
Alexander Duycka88f10e2008-07-08 15:13:38 -07006604 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006605 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006606
Alexander Duyck047e0032009-10-27 15:49:27 +00006607 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006608
6609#ifdef CONFIG_PM
6610 retval = pci_save_state(pdev);
6611 if (retval)
6612 return retval;
6613#endif
6614
6615 status = rd32(E1000_STATUS);
6616 if (status & E1000_STATUS_LU)
6617 wufc &= ~E1000_WUFC_LNKC;
6618
6619 if (wufc) {
6620 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006621 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006622
6623 /* turn on all-multi mode if wake on multicast is enabled */
6624 if (wufc & E1000_WUFC_MC) {
6625 rctl = rd32(E1000_RCTL);
6626 rctl |= E1000_RCTL_MPE;
6627 wr32(E1000_RCTL, rctl);
6628 }
6629
6630 ctrl = rd32(E1000_CTRL);
6631 /* advertise wake from D3Cold */
6632 #define E1000_CTRL_ADVD3WUC 0x00100000
6633 /* phy power management enable */
6634 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6635 ctrl |= E1000_CTRL_ADVD3WUC;
6636 wr32(E1000_CTRL, ctrl);
6637
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006639 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006640
6641 wr32(E1000_WUC, E1000_WUC_PME_EN);
6642 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006643 } else {
6644 wr32(E1000_WUC, 0);
6645 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006646 }
6647
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006648 *enable_wake = wufc || adapter->en_mng_pt;
6649 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006650 igb_power_down_link(adapter);
6651 else
6652 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006653
6654 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6655 * would have already happened in close and is redundant. */
6656 igb_release_hw_control(adapter);
6657
6658 pci_disable_device(pdev);
6659
Auke Kok9d5c8242008-01-24 02:22:38 -08006660 return 0;
6661}
6662
6663#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006664#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006665static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006666{
6667 int retval;
6668 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006669 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006670
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006671 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006672 if (retval)
6673 return retval;
6674
6675 if (wake) {
6676 pci_prepare_to_sleep(pdev);
6677 } else {
6678 pci_wake_from_d3(pdev, false);
6679 pci_set_power_state(pdev, PCI_D3hot);
6680 }
6681
6682 return 0;
6683}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006684#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006685
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006686static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006687{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006688 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006689 struct net_device *netdev = pci_get_drvdata(pdev);
6690 struct igb_adapter *adapter = netdev_priv(netdev);
6691 struct e1000_hw *hw = &adapter->hw;
6692 u32 err;
6693
6694 pci_set_power_state(pdev, PCI_D0);
6695 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006696 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006697
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006698 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006699 if (err) {
6700 dev_err(&pdev->dev,
6701 "igb: Cannot enable PCI device from suspend\n");
6702 return err;
6703 }
6704 pci_set_master(pdev);
6705
6706 pci_enable_wake(pdev, PCI_D3hot, 0);
6707 pci_enable_wake(pdev, PCI_D3cold, 0);
6708
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006709 if (!rtnl_is_locked()) {
6710 /*
6711 * shut up ASSERT_RTNL() warning in
6712 * netif_set_real_num_tx/rx_queues.
6713 */
6714 rtnl_lock();
6715 err = igb_init_interrupt_scheme(adapter);
6716 rtnl_unlock();
6717 } else {
6718 err = igb_init_interrupt_scheme(adapter);
6719 }
6720 if (err) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006721 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6722 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006723 }
6724
Auke Kok9d5c8242008-01-24 02:22:38 -08006725 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006726
6727 /* let the f/w know that the h/w is now under the control of the
6728 * driver. */
6729 igb_get_hw_control(adapter);
6730
Auke Kok9d5c8242008-01-24 02:22:38 -08006731 wr32(E1000_WUS, ~0);
6732
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006733 if (netdev->flags & IFF_UP) {
6734 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006735 if (err)
6736 return err;
6737 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006738
6739 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006740 return 0;
6741}
6742
6743#ifdef CONFIG_PM_RUNTIME
6744static int igb_runtime_idle(struct device *dev)
6745{
6746 struct pci_dev *pdev = to_pci_dev(dev);
6747 struct net_device *netdev = pci_get_drvdata(pdev);
6748 struct igb_adapter *adapter = netdev_priv(netdev);
6749
6750 if (!igb_has_link(adapter))
6751 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6752
6753 return -EBUSY;
6754}
6755
6756static int igb_runtime_suspend(struct device *dev)
6757{
6758 struct pci_dev *pdev = to_pci_dev(dev);
6759 int retval;
6760 bool wake;
6761
6762 retval = __igb_shutdown(pdev, &wake, 1);
6763 if (retval)
6764 return retval;
6765
6766 if (wake) {
6767 pci_prepare_to_sleep(pdev);
6768 } else {
6769 pci_wake_from_d3(pdev, false);
6770 pci_set_power_state(pdev, PCI_D3hot);
6771 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006772
Auke Kok9d5c8242008-01-24 02:22:38 -08006773 return 0;
6774}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006775
6776static int igb_runtime_resume(struct device *dev)
6777{
6778 return igb_resume(dev);
6779}
6780#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006781#endif
6782
6783static void igb_shutdown(struct pci_dev *pdev)
6784{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006785 bool wake;
6786
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006787 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006788
6789 if (system_state == SYSTEM_POWER_OFF) {
6790 pci_wake_from_d3(pdev, wake);
6791 pci_set_power_state(pdev, PCI_D3hot);
6792 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006793}
6794
6795#ifdef CONFIG_NET_POLL_CONTROLLER
6796/*
6797 * Polling 'interrupt' - used by things like netconsole to send skbs
6798 * without having to re-enable interrupts. It's not called while
6799 * the interrupt routine is executing.
6800 */
6801static void igb_netpoll(struct net_device *netdev)
6802{
6803 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006804 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006805 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006806 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006807
Alexander Duyck047e0032009-10-27 15:49:27 +00006808 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006809 q_vector = adapter->q_vector[i];
6810 if (adapter->msix_entries)
6811 wr32(E1000_EIMC, q_vector->eims_value);
6812 else
6813 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006814 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006815 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006816}
6817#endif /* CONFIG_NET_POLL_CONTROLLER */
6818
6819/**
6820 * igb_io_error_detected - called when PCI error is detected
6821 * @pdev: Pointer to PCI device
6822 * @state: The current pci connection state
6823 *
6824 * This function is called after a PCI bus error affecting
6825 * this device has been detected.
6826 */
6827static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6828 pci_channel_state_t state)
6829{
6830 struct net_device *netdev = pci_get_drvdata(pdev);
6831 struct igb_adapter *adapter = netdev_priv(netdev);
6832
6833 netif_device_detach(netdev);
6834
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006835 if (state == pci_channel_io_perm_failure)
6836 return PCI_ERS_RESULT_DISCONNECT;
6837
Auke Kok9d5c8242008-01-24 02:22:38 -08006838 if (netif_running(netdev))
6839 igb_down(adapter);
6840 pci_disable_device(pdev);
6841
6842 /* Request a slot slot reset. */
6843 return PCI_ERS_RESULT_NEED_RESET;
6844}
6845
6846/**
6847 * igb_io_slot_reset - called after the pci bus has been reset.
6848 * @pdev: Pointer to PCI device
6849 *
6850 * Restart the card from scratch, as if from a cold-boot. Implementation
6851 * resembles the first-half of the igb_resume routine.
6852 */
6853static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6854{
6855 struct net_device *netdev = pci_get_drvdata(pdev);
6856 struct igb_adapter *adapter = netdev_priv(netdev);
6857 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006858 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006859 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006860
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006861 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006862 dev_err(&pdev->dev,
6863 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006864 result = PCI_ERS_RESULT_DISCONNECT;
6865 } else {
6866 pci_set_master(pdev);
6867 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006868 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006869
6870 pci_enable_wake(pdev, PCI_D3hot, 0);
6871 pci_enable_wake(pdev, PCI_D3cold, 0);
6872
6873 igb_reset(adapter);
6874 wr32(E1000_WUS, ~0);
6875 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006876 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006877
Jeff Kirsherea943d42008-12-11 20:34:19 -08006878 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6879 if (err) {
6880 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6881 "failed 0x%0x\n", err);
6882 /* non-fatal, continue */
6883 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006884
Alexander Duyck40a914f2008-11-27 00:24:37 -08006885 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006886}
6887
6888/**
6889 * igb_io_resume - called when traffic can start flowing again.
6890 * @pdev: Pointer to PCI device
6891 *
6892 * This callback is called when the error recovery driver tells us that
6893 * its OK to resume normal operation. Implementation resembles the
6894 * second-half of the igb_resume routine.
6895 */
6896static void igb_io_resume(struct pci_dev *pdev)
6897{
6898 struct net_device *netdev = pci_get_drvdata(pdev);
6899 struct igb_adapter *adapter = netdev_priv(netdev);
6900
Auke Kok9d5c8242008-01-24 02:22:38 -08006901 if (netif_running(netdev)) {
6902 if (igb_up(adapter)) {
6903 dev_err(&pdev->dev, "igb_up failed after reset\n");
6904 return;
6905 }
6906 }
6907
6908 netif_device_attach(netdev);
6909
6910 /* let the f/w know that the h/w is now under the control of the
6911 * driver. */
6912 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006913}
6914
Alexander Duyck26ad9172009-10-05 06:32:49 +00006915static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6916 u8 qsel)
6917{
6918 u32 rar_low, rar_high;
6919 struct e1000_hw *hw = &adapter->hw;
6920
6921 /* HW expects these in little endian so we reverse the byte order
6922 * from network order (big endian) to little endian
6923 */
6924 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6925 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6926 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6927
6928 /* Indicate to hardware the Address is Valid. */
6929 rar_high |= E1000_RAH_AV;
6930
6931 if (hw->mac.type == e1000_82575)
6932 rar_high |= E1000_RAH_POOL_1 * qsel;
6933 else
6934 rar_high |= E1000_RAH_POOL_1 << qsel;
6935
6936 wr32(E1000_RAL(index), rar_low);
6937 wrfl();
6938 wr32(E1000_RAH(index), rar_high);
6939 wrfl();
6940}
6941
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006942static int igb_set_vf_mac(struct igb_adapter *adapter,
6943 int vf, unsigned char *mac_addr)
6944{
6945 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006946 /* VF MAC addresses start at end of receive addresses and moves
6947 * torwards the first, as a result a collision should not be possible */
6948 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006949
Alexander Duyck37680112009-02-19 20:40:30 -08006950 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006951
Alexander Duyck26ad9172009-10-05 06:32:49 +00006952 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006953
6954 return 0;
6955}
6956
Williams, Mitch A8151d292010-02-10 01:44:24 +00006957static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6958{
6959 struct igb_adapter *adapter = netdev_priv(netdev);
6960 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6961 return -EINVAL;
6962 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6963 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6964 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6965 " change effective.");
6966 if (test_bit(__IGB_DOWN, &adapter->state)) {
6967 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6968 " but the PF device is not up.\n");
6969 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6970 " attempting to use the VF device.\n");
6971 }
6972 return igb_set_vf_mac(adapter, vf, mac);
6973}
6974
Lior Levy17dc5662011-02-08 02:28:46 +00006975static int igb_link_mbps(int internal_link_speed)
6976{
6977 switch (internal_link_speed) {
6978 case SPEED_100:
6979 return 100;
6980 case SPEED_1000:
6981 return 1000;
6982 default:
6983 return 0;
6984 }
6985}
6986
6987static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6988 int link_speed)
6989{
6990 int rf_dec, rf_int;
6991 u32 bcnrc_val;
6992
6993 if (tx_rate != 0) {
6994 /* Calculate the rate factor values to set */
6995 rf_int = link_speed / tx_rate;
6996 rf_dec = (link_speed - (rf_int * tx_rate));
6997 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6998
6999 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7000 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7001 E1000_RTTBCNRC_RF_INT_MASK);
7002 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7003 } else {
7004 bcnrc_val = 0;
7005 }
7006
7007 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7008 wr32(E1000_RTTBCNRC, bcnrc_val);
7009}
7010
7011static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7012{
7013 int actual_link_speed, i;
7014 bool reset_rate = false;
7015
7016 /* VF TX rate limit was not set or not supported */
7017 if ((adapter->vf_rate_link_speed == 0) ||
7018 (adapter->hw.mac.type != e1000_82576))
7019 return;
7020
7021 actual_link_speed = igb_link_mbps(adapter->link_speed);
7022 if (actual_link_speed != adapter->vf_rate_link_speed) {
7023 reset_rate = true;
7024 adapter->vf_rate_link_speed = 0;
7025 dev_info(&adapter->pdev->dev,
7026 "Link speed has been changed. VF Transmit "
7027 "rate is disabled\n");
7028 }
7029
7030 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7031 if (reset_rate)
7032 adapter->vf_data[i].tx_rate = 0;
7033
7034 igb_set_vf_rate_limit(&adapter->hw, i,
7035 adapter->vf_data[i].tx_rate,
7036 actual_link_speed);
7037 }
7038}
7039
Williams, Mitch A8151d292010-02-10 01:44:24 +00007040static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7041{
Lior Levy17dc5662011-02-08 02:28:46 +00007042 struct igb_adapter *adapter = netdev_priv(netdev);
7043 struct e1000_hw *hw = &adapter->hw;
7044 int actual_link_speed;
7045
7046 if (hw->mac.type != e1000_82576)
7047 return -EOPNOTSUPP;
7048
7049 actual_link_speed = igb_link_mbps(adapter->link_speed);
7050 if ((vf >= adapter->vfs_allocated_count) ||
7051 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7052 (tx_rate < 0) || (tx_rate > actual_link_speed))
7053 return -EINVAL;
7054
7055 adapter->vf_rate_link_speed = actual_link_speed;
7056 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7057 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7058
7059 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007060}
7061
7062static int igb_ndo_get_vf_config(struct net_device *netdev,
7063 int vf, struct ifla_vf_info *ivi)
7064{
7065 struct igb_adapter *adapter = netdev_priv(netdev);
7066 if (vf >= adapter->vfs_allocated_count)
7067 return -EINVAL;
7068 ivi->vf = vf;
7069 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007070 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007071 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7072 ivi->qos = adapter->vf_data[vf].pf_qos;
7073 return 0;
7074}
7075
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007076static void igb_vmm_control(struct igb_adapter *adapter)
7077{
7078 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007079 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007080
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007081 switch (hw->mac.type) {
7082 case e1000_82575:
Carolyn Wybornyf96a8a02012-04-06 23:25:19 +00007083 case e1000_i210:
7084 case e1000_i211:
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007085 default:
7086 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007087 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007088 case e1000_82576:
7089 /* notify HW that the MAC is adding vlan tags */
7090 reg = rd32(E1000_DTXCTL);
7091 reg |= E1000_DTXCTL_VLAN_ADDED;
7092 wr32(E1000_DTXCTL, reg);
7093 case e1000_82580:
7094 /* enable replication vlan tag stripping */
7095 reg = rd32(E1000_RPLOLR);
7096 reg |= E1000_RPLOLR_STRVLAN;
7097 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007098 case e1000_i350:
7099 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007100 break;
7101 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007102
Alexander Duyckd4960302009-10-27 15:53:45 +00007103 if (adapter->vfs_allocated_count) {
7104 igb_vmdq_set_loopback_pf(hw, true);
7105 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007106 igb_vmdq_set_anti_spoofing_pf(hw, true,
7107 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007108 } else {
7109 igb_vmdq_set_loopback_pf(hw, false);
7110 igb_vmdq_set_replication_pf(hw, false);
7111 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007112}
7113
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007114static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7115{
7116 struct e1000_hw *hw = &adapter->hw;
7117 u32 dmac_thr;
7118 u16 hwm;
7119
7120 if (hw->mac.type > e1000_82580) {
7121 if (adapter->flags & IGB_FLAG_DMAC) {
7122 u32 reg;
7123
7124 /* force threshold to 0. */
7125 wr32(E1000_DMCTXTH, 0);
7126
7127 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007128 * DMA Coalescing high water mark needs to be greater
7129 * than the Rx threshold. Set hwm to PBA - max frame
7130 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007131 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007132 hwm = 64 * pba - adapter->max_frame_size / 16;
7133 if (hwm < 64 * (pba - 6))
7134 hwm = 64 * (pba - 6);
7135 reg = rd32(E1000_FCRTC);
7136 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7137 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7138 & E1000_FCRTC_RTH_COAL_MASK);
7139 wr32(E1000_FCRTC, reg);
7140
7141 /*
7142 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7143 * frame size, capping it at PBA - 10KB.
7144 */
7145 dmac_thr = pba - adapter->max_frame_size / 512;
7146 if (dmac_thr < pba - 10)
7147 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007148 reg = rd32(E1000_DMACR);
7149 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007150 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7151 & E1000_DMACR_DMACTHR_MASK);
7152
7153 /* transition to L0x or L1 if available..*/
7154 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7155
7156 /* watchdog timer= +-1000 usec in 32usec intervals */
7157 reg |= (1000 >> 5);
7158 wr32(E1000_DMACR, reg);
7159
7160 /*
7161 * no lower threshold to disable
7162 * coalescing(smart fifb)-UTRESH=0
7163 */
7164 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007165
7166 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7167
7168 wr32(E1000_DMCTLX, reg);
7169
7170 /*
7171 * free space in tx packet buffer to wake from
7172 * DMA coal
7173 */
7174 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7175 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7176
7177 /*
7178 * make low power state decision controlled
7179 * by DMA coal
7180 */
7181 reg = rd32(E1000_PCIEMISC);
7182 reg &= ~E1000_PCIEMISC_LX_DECISION;
7183 wr32(E1000_PCIEMISC, reg);
7184 } /* endif adapter->dmac is not disabled */
7185 } else if (hw->mac.type == e1000_82580) {
7186 u32 reg = rd32(E1000_PCIEMISC);
7187 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7188 wr32(E1000_DMACR, 0);
7189 }
7190}
7191
Auke Kok9d5c8242008-01-24 02:22:38 -08007192/* igb_main.c */