blob: 80d52d2dfea3ae46031b0332a78daa6cffc85a8c [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny6e861322012-01-18 22:13:27 +00004 Copyright(c) 2007-2012 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define MAJ 3
Carolyn Wybornybe0c0062012-04-09 23:13:02 +000063#define MIN 4
64#define BUILD 7
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny6e861322012-01-18 22:13:27 +000071static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
103 /* required last entry */
104 {0, }
105};
106
107MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
108
109void igb_reset(struct igb_adapter *);
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000114static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev);
117static int igb_sw_init(struct igb_adapter *);
118static int igb_open(struct net_device *);
119static int igb_close(struct net_device *);
120static void igb_configure_tx(struct igb_adapter *);
121static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static void igb_clean_all_tx_rings(struct igb_adapter *);
123static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700124static void igb_clean_tx_ring(struct igb_ring *);
125static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000126static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_update_phy_info(unsigned long);
128static void igb_watchdog(unsigned long);
129static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000130static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000131static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
132 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800133static int igb_change_mtu(struct net_device *, int);
134static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000135static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800136static irqreturn_t igb_intr(int irq, void *);
137static irqreturn_t igb_intr_msi(int irq, void *);
138static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700140#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000141static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700142static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700143#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700144static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000145static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000146static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800147static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
148static void igb_tx_timeout(struct net_device *);
149static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000150static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500151static int igb_vlan_rx_add_vid(struct net_device *, u16);
152static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800153static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000154static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_ping_all_vfs(struct igb_adapter *);
156static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000158static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800159static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000160static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
161static int igb_ndo_set_vf_vlan(struct net_device *netdev,
162 int vf, u16 vlan, u8 qos);
163static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
164static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
165 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000166static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000167
168#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000169static int igb_vf_configure(struct igb_adapter *adapter, int vf);
170static int igb_find_enabled_vfs(struct igb_adapter *adapter);
171static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000172#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800173
Auke Kok9d5c8242008-01-24 02:22:38 -0800174#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000175#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000176static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000177#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000178static int igb_resume(struct device *);
179#ifdef CONFIG_PM_RUNTIME
180static int igb_runtime_suspend(struct device *dev);
181static int igb_runtime_resume(struct device *dev);
182static int igb_runtime_idle(struct device *dev);
183#endif
184static const struct dev_pm_ops igb_pm_ops = {
185 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
186 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
187 igb_runtime_idle)
188};
Auke Kok9d5c8242008-01-24 02:22:38 -0800189#endif
190static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700191#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700192static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
193static struct notifier_block dca_notifier = {
194 .notifier_call = igb_notify_dca,
195 .next = NULL,
196 .priority = 0
197};
198#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800199#ifdef CONFIG_NET_POLL_CONTROLLER
200/* for netdump / net console */
201static void igb_netpoll(struct net_device *);
202#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800203#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000204static unsigned int max_vfs = 0;
205module_param(max_vfs, uint, 0);
206MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
207 "per physical function");
208#endif /* CONFIG_PCI_IOV */
209
Auke Kok9d5c8242008-01-24 02:22:38 -0800210static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
211 pci_channel_state_t);
212static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
213static void igb_io_resume(struct pci_dev *);
214
215static struct pci_error_handlers igb_err_handler = {
216 .error_detected = igb_io_error_detected,
217 .slot_reset = igb_io_slot_reset,
218 .resume = igb_io_resume,
219};
220
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000221static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800222
223static struct pci_driver igb_driver = {
224 .name = igb_driver_name,
225 .id_table = igb_pci_tbl,
226 .probe = igb_probe,
227 .remove = __devexit_p(igb_remove),
228#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000229 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800230#endif
231 .shutdown = igb_shutdown,
232 .err_handler = &igb_err_handler
233};
234
235MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
236MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
237MODULE_LICENSE("GPL");
238MODULE_VERSION(DRV_VERSION);
239
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000240#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
241static int debug = -1;
242module_param(debug, int, 0);
243MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
244
Taku Izumic97ec422010-04-27 14:39:30 +0000245struct igb_reg_info {
246 u32 ofs;
247 char *name;
248};
249
250static const struct igb_reg_info igb_reg_info_tbl[] = {
251
252 /* General Registers */
253 {E1000_CTRL, "CTRL"},
254 {E1000_STATUS, "STATUS"},
255 {E1000_CTRL_EXT, "CTRL_EXT"},
256
257 /* Interrupt Registers */
258 {E1000_ICR, "ICR"},
259
260 /* RX Registers */
261 {E1000_RCTL, "RCTL"},
262 {E1000_RDLEN(0), "RDLEN"},
263 {E1000_RDH(0), "RDH"},
264 {E1000_RDT(0), "RDT"},
265 {E1000_RXDCTL(0), "RXDCTL"},
266 {E1000_RDBAL(0), "RDBAL"},
267 {E1000_RDBAH(0), "RDBAH"},
268
269 /* TX Registers */
270 {E1000_TCTL, "TCTL"},
271 {E1000_TDBAL(0), "TDBAL"},
272 {E1000_TDBAH(0), "TDBAH"},
273 {E1000_TDLEN(0), "TDLEN"},
274 {E1000_TDH(0), "TDH"},
275 {E1000_TDT(0), "TDT"},
276 {E1000_TXDCTL(0), "TXDCTL"},
277 {E1000_TDFH, "TDFH"},
278 {E1000_TDFT, "TDFT"},
279 {E1000_TDFHS, "TDFHS"},
280 {E1000_TDFPC, "TDFPC"},
281
282 /* List Terminator */
283 {}
284};
285
286/*
287 * igb_regdump - register printout routine
288 */
289static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
290{
291 int n = 0;
292 char rname[16];
293 u32 regs[8];
294
295 switch (reginfo->ofs) {
296 case E1000_RDLEN(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDLEN(n));
299 break;
300 case E1000_RDH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDH(n));
303 break;
304 case E1000_RDT(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RDT(n));
307 break;
308 case E1000_RXDCTL(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RXDCTL(n));
311 break;
312 case E1000_RDBAL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RDBAL(n));
315 break;
316 case E1000_RDBAH(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAH(n));
319 break;
320 case E1000_TDBAL(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RDBAL(n));
323 break;
324 case E1000_TDBAH(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_TDBAH(n));
327 break;
328 case E1000_TDLEN(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDLEN(n));
331 break;
332 case E1000_TDH(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDH(n));
335 break;
336 case E1000_TDT(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDT(n));
339 break;
340 case E1000_TXDCTL(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TXDCTL(n));
343 break;
344 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000345 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000346 return;
347 }
348
349 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000350 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
351 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000352}
353
354/*
355 * igb_dump - Print registers, tx-rings and rx-rings
356 */
357static void igb_dump(struct igb_adapter *adapter)
358{
359 struct net_device *netdev = adapter->netdev;
360 struct e1000_hw *hw = &adapter->hw;
361 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000362 struct igb_ring *tx_ring;
363 union e1000_adv_tx_desc *tx_desc;
364 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000365 struct igb_ring *rx_ring;
366 union e1000_adv_rx_desc *rx_desc;
367 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000368 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000369
370 if (!netif_msg_hw(adapter))
371 return;
372
373 /* Print netdevice Info */
374 if (netdev) {
375 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000376 pr_info("Device Name state trans_start "
377 "last_rx\n");
378 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
379 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000380 }
381
382 /* Print Registers */
383 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000384 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000385 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 reginfo->name; reginfo++) {
387 igb_regdump(hw, reginfo);
388 }
389
390 /* Print TX Ring Summary */
391 if (!netdev || !netif_running(netdev))
392 goto exit;
393
394 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000395 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000396 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000397 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000398 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000399 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000400 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 (u64)buffer_info->dma,
403 buffer_info->length,
404 buffer_info->next_to_watch,
405 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000406 }
407
408 /* Print TX Rings */
409 if (!netif_msg_tx_done(adapter))
410 goto rx_ring_summary;
411
412 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413
414 /* Transmit Descriptor Formats
415 *
416 * Advanced Transmit Descriptor
417 * +--------------------------------------------------------------+
418 * 0 | Buffer Address [63:0] |
419 * +--------------------------------------------------------------+
420 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
421 * +--------------------------------------------------------------+
422 * 63 46 45 40 39 38 36 35 32 31 24 15 0
423 */
424
425 for (n = 0; n < adapter->num_tx_queues; n++) {
426 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000427 pr_info("------------------------------------\n");
428 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 pr_info("------------------------------------\n");
430 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
431 "[bi->dma ] leng ntw timestamp "
432 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000433
434 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000435 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000436 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000437 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000438 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000439 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000440 if (i == tx_ring->next_to_use &&
441 i == tx_ring->next_to_clean)
442 next_desc = " NTC/U";
443 else if (i == tx_ring->next_to_use)
444 next_desc = " NTU";
445 else if (i == tx_ring->next_to_clean)
446 next_desc = " NTC";
447 else
448 next_desc = "";
449
450 pr_info("T [0x%03X] %016llX %016llX %016llX"
451 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000452 le64_to_cpu(u0->a),
453 le64_to_cpu(u0->b),
454 (u64)buffer_info->dma,
455 buffer_info->length,
456 buffer_info->next_to_watch,
457 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000458 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000459
460 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
461 print_hex_dump(KERN_INFO, "",
462 DUMP_PREFIX_ADDRESS,
463 16, 1, phys_to_virt(buffer_info->dma),
464 buffer_info->length, true);
465 }
466 }
467
468 /* Print RX Rings Summary */
469rx_ring_summary:
470 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000471 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000472 for (n = 0; n < adapter->num_rx_queues; n++) {
473 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000474 pr_info(" %5d %5X %5X\n",
475 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000476 }
477
478 /* Print RX Rings */
479 if (!netif_msg_rx_status(adapter))
480 goto exit;
481
482 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
483
484 /* Advanced Receive Descriptor (Read) Format
485 * 63 1 0
486 * +-----------------------------------------------------+
487 * 0 | Packet Buffer Address [63:1] |A0/NSE|
488 * +----------------------------------------------+------+
489 * 8 | Header Buffer Address [63:1] | DD |
490 * +-----------------------------------------------------+
491 *
492 *
493 * Advanced Receive Descriptor (Write-Back) Format
494 *
495 * 63 48 47 32 31 30 21 20 17 16 4 3 0
496 * +------------------------------------------------------+
497 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
498 * | Checksum Ident | | | | Type | Type |
499 * +------------------------------------------------------+
500 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
501 * +------------------------------------------------------+
502 * 63 48 47 32 31 20 19 0
503 */
504
505 for (n = 0; n < adapter->num_rx_queues; n++) {
506 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000507 pr_info("------------------------------------\n");
508 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
509 pr_info("------------------------------------\n");
510 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
511 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
512 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
513 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000514
515 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000516 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000517 struct igb_rx_buffer *buffer_info;
518 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000519 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000520 u0 = (struct my_u0 *)rx_desc;
521 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000522
523 if (i == rx_ring->next_to_use)
524 next_desc = " NTU";
525 else if (i == rx_ring->next_to_clean)
526 next_desc = " NTC";
527 else
528 next_desc = "";
529
Taku Izumic97ec422010-04-27 14:39:30 +0000530 if (staterr & E1000_RXD_STAT_DD) {
531 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000532 pr_info("%s[0x%03X] %016llX %016llX -------"
533 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000534 le64_to_cpu(u0->a),
535 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000536 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000537 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000538 pr_info("%s[0x%03X] %016llX %016llX %016llX"
539 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000540 le64_to_cpu(u0->a),
541 le64_to_cpu(u0->b),
542 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000543 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000544
545 if (netif_msg_pktdata(adapter)) {
546 print_hex_dump(KERN_INFO, "",
547 DUMP_PREFIX_ADDRESS,
548 16, 1,
549 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000550 IGB_RX_HDR_LEN, true);
551 print_hex_dump(KERN_INFO, "",
552 DUMP_PREFIX_ADDRESS,
553 16, 1,
554 phys_to_virt(
555 buffer_info->page_dma +
556 buffer_info->page_offset),
557 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000558 }
559 }
Taku Izumic97ec422010-04-27 14:39:30 +0000560 }
561 }
562
563exit:
564 return;
565}
566
Auke Kok9d5c8242008-01-24 02:22:38 -0800567/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000568 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800569 * used by hardware layer to print debugging information
570 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000571struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800572{
573 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000574 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800575}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000576
577/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800578 * igb_init_module - Driver Registration Routine
579 *
580 * igb_init_module is the first routine called when the driver is
581 * loaded. All it does is register with the PCI subsystem.
582 **/
583static int __init igb_init_module(void)
584{
585 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000586 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 igb_driver_string, igb_driver_version);
588
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000589 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800590
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700591#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700592 dca_register_notify(&dca_notifier);
593#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800594 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800595 return ret;
596}
597
598module_init(igb_init_module);
599
600/**
601 * igb_exit_module - Driver Exit Cleanup Routine
602 *
603 * igb_exit_module is called just before the driver is removed
604 * from memory.
605 **/
606static void __exit igb_exit_module(void)
607{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700608#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700609 dca_unregister_notify(&dca_notifier);
610#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800611 pci_unregister_driver(&igb_driver);
612}
613
614module_exit(igb_exit_module);
615
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800616#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
617/**
618 * igb_cache_ring_register - Descriptor ring to register mapping
619 * @adapter: board private structure to initialize
620 *
621 * Once we know the feature-set enabled for the device, we'll cache
622 * the register offset the descriptor ring is assigned to.
623 **/
624static void igb_cache_ring_register(struct igb_adapter *adapter)
625{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000626 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000627 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800628
629 switch (adapter->hw.mac.type) {
630 case e1000_82576:
631 /* The queues are allocated for virtualization such that VF 0
632 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
633 * In order to avoid collision we start at the first free queue
634 * and continue consuming queues in the same sequence
635 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000636 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000637 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000638 adapter->rx_ring[i]->reg_idx = rbase_offset +
639 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000640 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800641 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000642 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000643 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800644 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000646 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000647 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000648 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800649 break;
650 }
651}
652
Alexander Duyck047e0032009-10-27 15:49:27 +0000653static void igb_free_queues(struct igb_adapter *adapter)
654{
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000656
Alexander Duyck3025a442010-02-17 01:02:39 +0000657 for (i = 0; i < adapter->num_tx_queues; i++) {
658 kfree(adapter->tx_ring[i]);
659 adapter->tx_ring[i] = NULL;
660 }
661 for (i = 0; i < adapter->num_rx_queues; i++) {
662 kfree(adapter->rx_ring[i]);
663 adapter->rx_ring[i] = NULL;
664 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000665 adapter->num_rx_queues = 0;
666 adapter->num_tx_queues = 0;
667}
668
Auke Kok9d5c8242008-01-24 02:22:38 -0800669/**
670 * igb_alloc_queues - Allocate memory for all rings
671 * @adapter: board private structure to initialize
672 *
673 * We allocate one ring per queue at run-time since we don't know the
674 * number of queues at compile-time.
675 **/
676static int igb_alloc_queues(struct igb_adapter *adapter)
677{
Alexander Duyck3025a442010-02-17 01:02:39 +0000678 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800679 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000680 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800681
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700682 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000683 if (orig_node == -1) {
684 int cur_node = next_online_node(adapter->node);
685 if (cur_node == MAX_NUMNODES)
686 cur_node = first_online_node;
687 adapter->node = cur_node;
688 }
689 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
690 adapter->node);
691 if (!ring)
692 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000693 if (!ring)
694 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800695 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700696 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000697 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000698 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000699 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000700 /* For 82575, context index must be unique per ring. */
701 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000702 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000703 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700704 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000705 /* Restore the adapter's original node */
706 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000707
Auke Kok9d5c8242008-01-24 02:22:38 -0800708 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000709 if (orig_node == -1) {
710 int cur_node = next_online_node(adapter->node);
711 if (cur_node == MAX_NUMNODES)
712 cur_node = first_online_node;
713 adapter->node = cur_node;
714 }
715 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
716 adapter->node);
717 if (!ring)
718 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000719 if (!ring)
720 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800721 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700722 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000723 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000724 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000725 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000726 /* set flag indicating ring supports SCTP checksum offload */
727 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000728 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000729
730 /* On i350, loopback VLAN packets have the tag byte-swapped. */
731 if (adapter->hw.mac.type == e1000_i350)
732 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
733
Alexander Duyck3025a442010-02-17 01:02:39 +0000734 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800735 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000736 /* Restore the adapter's original node */
737 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800738
739 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000740
Auke Kok9d5c8242008-01-24 02:22:38 -0800741 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800742
Alexander Duyck047e0032009-10-27 15:49:27 +0000743err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000744 /* Restore the adapter's original node */
745 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000746 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700747
Alexander Duyck047e0032009-10-27 15:49:27 +0000748 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700749}
750
Alexander Duyck4be000c2011-08-26 07:45:52 +0000751/**
752 * igb_write_ivar - configure ivar for given MSI-X vector
753 * @hw: pointer to the HW structure
754 * @msix_vector: vector number we are allocating to a given ring
755 * @index: row index of IVAR register to write within IVAR table
756 * @offset: column offset of in IVAR, should be multiple of 8
757 *
758 * This function is intended to handle the writing of the IVAR register
759 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
760 * each containing an cause allocation for an Rx and Tx ring, and a
761 * variable number of rows depending on the number of queues supported.
762 **/
763static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
764 int index, int offset)
765{
766 u32 ivar = array_rd32(E1000_IVAR0, index);
767
768 /* clear any bits that are currently set */
769 ivar &= ~((u32)0xFF << offset);
770
771 /* write vector and valid bit */
772 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
773
774 array_wr32(E1000_IVAR0, index, ivar);
775}
776
Auke Kok9d5c8242008-01-24 02:22:38 -0800777#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000778static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800779{
Alexander Duyck047e0032009-10-27 15:49:27 +0000780 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800781 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000782 int rx_queue = IGB_N0_QUEUE;
783 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000784 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000785
Alexander Duyck0ba82992011-08-26 07:45:47 +0000786 if (q_vector->rx.ring)
787 rx_queue = q_vector->rx.ring->reg_idx;
788 if (q_vector->tx.ring)
789 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700790
791 switch (hw->mac.type) {
792 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800793 /* The 82575 assigns vectors using a bitmask, which matches the
794 bitmask for the EICR/EIMS/EIMC registers. To assign one
795 or more queues to a vector, we write the appropriate bits
796 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000797 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800798 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000799 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800800 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000801 if (!adapter->msix_entries && msix_vector == 0)
802 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800803 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000804 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700805 break;
806 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000807 /*
808 * 82576 uses a table that essentially consists of 2 columns
809 * with 8 rows. The ordering is column-major so we use the
810 * lower 3 bits as the row index, and the 4th bit as the
811 * column offset.
812 */
813 if (rx_queue > IGB_N0_QUEUE)
814 igb_write_ivar(hw, msix_vector,
815 rx_queue & 0x7,
816 (rx_queue & 0x8) << 1);
817 if (tx_queue > IGB_N0_QUEUE)
818 igb_write_ivar(hw, msix_vector,
819 tx_queue & 0x7,
820 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000821 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700822 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000823 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000824 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000825 /*
826 * On 82580 and newer adapters the scheme is similar to 82576
827 * however instead of ordering column-major we have things
828 * ordered row-major. So we traverse the table by using
829 * bit 0 as the column offset, and the remaining bits as the
830 * row index.
831 */
832 if (rx_queue > IGB_N0_QUEUE)
833 igb_write_ivar(hw, msix_vector,
834 rx_queue >> 1,
835 (rx_queue & 0x1) << 4);
836 if (tx_queue > IGB_N0_QUEUE)
837 igb_write_ivar(hw, msix_vector,
838 tx_queue >> 1,
839 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000840 q_vector->eims_value = 1 << msix_vector;
841 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700842 default:
843 BUG();
844 break;
845 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000846
847 /* add q_vector eims value to global eims_enable_mask */
848 adapter->eims_enable_mask |= q_vector->eims_value;
849
850 /* configure q_vector to set itr on first interrupt */
851 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800852}
853
854/**
855 * igb_configure_msix - Configure MSI-X hardware
856 *
857 * igb_configure_msix sets up the hardware to properly
858 * generate MSI-X interrupts.
859 **/
860static void igb_configure_msix(struct igb_adapter *adapter)
861{
862 u32 tmp;
863 int i, vector = 0;
864 struct e1000_hw *hw = &adapter->hw;
865
866 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800867
868 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700869 switch (hw->mac.type) {
870 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800871 tmp = rd32(E1000_CTRL_EXT);
872 /* enable MSI-X PBA support*/
873 tmp |= E1000_CTRL_EXT_PBA_CLR;
874
875 /* Auto-Mask interrupts upon ICR read. */
876 tmp |= E1000_CTRL_EXT_EIAME;
877 tmp |= E1000_CTRL_EXT_IRCA;
878
879 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000880
881 /* enable msix_other interrupt */
882 array_wr32(E1000_MSIXBM(0), vector++,
883 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700884 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800885
Alexander Duyck2d064c02008-07-08 15:10:12 -0700886 break;
887
888 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000889 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000890 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000891 /* Turn on MSI-X capability first, or our settings
892 * won't stick. And it will take days to debug. */
893 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
894 E1000_GPIE_PBA | E1000_GPIE_EIAME |
895 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700896
Alexander Duyck047e0032009-10-27 15:49:27 +0000897 /* enable msix_other interrupt */
898 adapter->eims_other = 1 << vector;
899 tmp = (vector++ | E1000_IVAR_VALID) << 8;
900
901 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700902 break;
903 default:
904 /* do nothing, since nothing else supports MSI-X */
905 break;
906 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000907
908 adapter->eims_enable_mask |= adapter->eims_other;
909
Alexander Duyck26b39272010-02-17 01:00:41 +0000910 for (i = 0; i < adapter->num_q_vectors; i++)
911 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000912
Auke Kok9d5c8242008-01-24 02:22:38 -0800913 wrfl();
914}
915
916/**
917 * igb_request_msix - Initialize MSI-X interrupts
918 *
919 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
920 * kernel.
921 **/
922static int igb_request_msix(struct igb_adapter *adapter)
923{
924 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000925 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800926 int i, err = 0, vector = 0;
927
Auke Kok9d5c8242008-01-24 02:22:38 -0800928 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800929 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800930 if (err)
931 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000932 vector++;
933
934 for (i = 0; i < adapter->num_q_vectors; i++) {
935 struct igb_q_vector *q_vector = adapter->q_vector[i];
936
937 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
938
Alexander Duyck0ba82992011-08-26 07:45:47 +0000939 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000940 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000941 q_vector->rx.ring->queue_index);
942 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000943 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000944 q_vector->tx.ring->queue_index);
945 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000947 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 else
949 sprintf(q_vector->name, "%s-unused", netdev->name);
950
951 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800952 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000953 q_vector);
954 if (err)
955 goto out;
956 vector++;
957 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800958
Auke Kok9d5c8242008-01-24 02:22:38 -0800959 igb_configure_msix(adapter);
960 return 0;
961out:
962 return err;
963}
964
965static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
966{
967 if (adapter->msix_entries) {
968 pci_disable_msix(adapter->pdev);
969 kfree(adapter->msix_entries);
970 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000971 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800972 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000973 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800974}
975
Alexander Duyck047e0032009-10-27 15:49:27 +0000976/**
977 * igb_free_q_vectors - Free memory allocated for interrupt vectors
978 * @adapter: board private structure to initialize
979 *
980 * This function frees the memory allocated to the q_vectors. In addition if
981 * NAPI is enabled it will delete any references to the NAPI struct prior
982 * to freeing the q_vector.
983 **/
984static void igb_free_q_vectors(struct igb_adapter *adapter)
985{
986 int v_idx;
987
988 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
989 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
990 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000991 if (!q_vector)
992 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000993 netif_napi_del(&q_vector->napi);
994 kfree(q_vector);
995 }
996 adapter->num_q_vectors = 0;
997}
998
999/**
1000 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1001 *
1002 * This function resets the device so that it has 0 rx queues, tx queues, and
1003 * MSI-X interrupts allocated.
1004 */
1005static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1006{
1007 igb_free_queues(adapter);
1008 igb_free_q_vectors(adapter);
1009 igb_reset_interrupt_capability(adapter);
1010}
Auke Kok9d5c8242008-01-24 02:22:38 -08001011
1012/**
1013 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1014 *
1015 * Attempt to configure interrupts using the best available
1016 * capabilities of the hardware and kernel.
1017 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001018static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001019{
1020 int err;
1021 int numvecs, i;
1022
Alexander Duyck83b71802009-02-06 23:15:45 +00001023 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001024 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001025 if (adapter->vfs_allocated_count)
1026 adapter->num_tx_queues = 1;
1027 else
1028 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001029
Alexander Duyck047e0032009-10-27 15:49:27 +00001030 /* start with one vector for every rx queue */
1031 numvecs = adapter->num_rx_queues;
1032
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001033 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001034 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1035 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001036
1037 /* store the number of vectors reserved for queues */
1038 adapter->num_q_vectors = numvecs;
1039
1040 /* add 1 vector for link status interrupts */
1041 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001042 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1043 GFP_KERNEL);
1044 if (!adapter->msix_entries)
1045 goto msi_only;
1046
1047 for (i = 0; i < numvecs; i++)
1048 adapter->msix_entries[i].entry = i;
1049
1050 err = pci_enable_msix(adapter->pdev,
1051 adapter->msix_entries,
1052 numvecs);
1053 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001054 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001055
1056 igb_reset_interrupt_capability(adapter);
1057
1058 /* If we can't do MSI-X, try MSI */
1059msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001060#ifdef CONFIG_PCI_IOV
1061 /* disable SR-IOV for non MSI-X configurations */
1062 if (adapter->vf_data) {
1063 struct e1000_hw *hw = &adapter->hw;
1064 /* disable iov and allow time for transactions to clear */
1065 pci_disable_sriov(adapter->pdev);
1066 msleep(500);
1067
1068 kfree(adapter->vf_data);
1069 adapter->vf_data = NULL;
1070 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001071 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001072 msleep(100);
1073 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1074 }
1075#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001076 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001077 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001078 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001079 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001080 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001081 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001082 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001083 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001084out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001085 /* Notify the stack of the (possibly) reduced queue counts. */
1086 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1087 return netif_set_real_num_rx_queues(adapter->netdev,
1088 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001089}
1090
1091/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001092 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1093 * @adapter: board private structure to initialize
1094 *
1095 * We allocate one q_vector per queue interrupt. If allocation fails we
1096 * return -ENOMEM.
1097 **/
1098static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1099{
1100 struct igb_q_vector *q_vector;
1101 struct e1000_hw *hw = &adapter->hw;
1102 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001103 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001104
1105 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001106 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1107 adapter->num_tx_queues)) &&
1108 (adapter->num_rx_queues == v_idx))
1109 adapter->node = orig_node;
1110 if (orig_node == -1) {
1111 int cur_node = next_online_node(adapter->node);
1112 if (cur_node == MAX_NUMNODES)
1113 cur_node = first_online_node;
1114 adapter->node = cur_node;
1115 }
1116 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1117 adapter->node);
1118 if (!q_vector)
1119 q_vector = kzalloc(sizeof(struct igb_q_vector),
1120 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001121 if (!q_vector)
1122 goto err_out;
1123 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001124 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1125 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001126 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1127 adapter->q_vector[v_idx] = q_vector;
1128 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001129 /* Restore the adapter's original node */
1130 adapter->node = orig_node;
1131
Alexander Duyck047e0032009-10-27 15:49:27 +00001132 return 0;
1133
1134err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001135 /* Restore the adapter's original node */
1136 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001137 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001138 return -ENOMEM;
1139}
1140
1141static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1142 int ring_idx, int v_idx)
1143{
Alexander Duyck3025a442010-02-17 01:02:39 +00001144 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001145
Alexander Duyck0ba82992011-08-26 07:45:47 +00001146 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1147 q_vector->rx.ring->q_vector = q_vector;
1148 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001149 q_vector->itr_val = adapter->rx_itr_setting;
1150 if (q_vector->itr_val && q_vector->itr_val <= 3)
1151 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001152}
1153
1154static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1155 int ring_idx, int v_idx)
1156{
Alexander Duyck3025a442010-02-17 01:02:39 +00001157 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001158
Alexander Duyck0ba82992011-08-26 07:45:47 +00001159 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1160 q_vector->tx.ring->q_vector = q_vector;
1161 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001162 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001163 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001164 if (q_vector->itr_val && q_vector->itr_val <= 3)
1165 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001166}
1167
1168/**
1169 * igb_map_ring_to_vector - maps allocated queues to vectors
1170 *
1171 * This function maps the recently allocated queues to vectors.
1172 **/
1173static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1174{
1175 int i;
1176 int v_idx = 0;
1177
1178 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1179 (adapter->num_q_vectors < adapter->num_tx_queues))
1180 return -ENOMEM;
1181
1182 if (adapter->num_q_vectors >=
1183 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1184 for (i = 0; i < adapter->num_rx_queues; i++)
1185 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1186 for (i = 0; i < adapter->num_tx_queues; i++)
1187 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1188 } else {
1189 for (i = 0; i < adapter->num_rx_queues; i++) {
1190 if (i < adapter->num_tx_queues)
1191 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1192 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1193 }
1194 for (; i < adapter->num_tx_queues; i++)
1195 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1196 }
1197 return 0;
1198}
1199
1200/**
1201 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1202 *
1203 * This function initializes the interrupts and allocates all of the queues.
1204 **/
1205static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1206{
1207 struct pci_dev *pdev = adapter->pdev;
1208 int err;
1209
Ben Hutchings21adef32010-09-27 08:28:39 +00001210 err = igb_set_interrupt_capability(adapter);
1211 if (err)
1212 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001213
1214 err = igb_alloc_q_vectors(adapter);
1215 if (err) {
1216 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1217 goto err_alloc_q_vectors;
1218 }
1219
1220 err = igb_alloc_queues(adapter);
1221 if (err) {
1222 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1223 goto err_alloc_queues;
1224 }
1225
1226 err = igb_map_ring_to_vector(adapter);
1227 if (err) {
1228 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1229 goto err_map_queues;
1230 }
1231
1232
1233 return 0;
1234err_map_queues:
1235 igb_free_queues(adapter);
1236err_alloc_queues:
1237 igb_free_q_vectors(adapter);
1238err_alloc_q_vectors:
1239 igb_reset_interrupt_capability(adapter);
1240 return err;
1241}
1242
1243/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001244 * igb_request_irq - initialize interrupts
1245 *
1246 * Attempts to configure interrupts using the best available
1247 * capabilities of the hardware and kernel.
1248 **/
1249static int igb_request_irq(struct igb_adapter *adapter)
1250{
1251 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001252 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001253 int err = 0;
1254
1255 if (adapter->msix_entries) {
1256 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001257 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001258 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001259 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001260 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001261 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001262 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001263 igb_free_all_tx_resources(adapter);
1264 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001265 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001266 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001267 adapter->num_q_vectors = 1;
1268 err = igb_alloc_q_vectors(adapter);
1269 if (err) {
1270 dev_err(&pdev->dev,
1271 "Unable to allocate memory for vectors\n");
1272 goto request_done;
1273 }
1274 err = igb_alloc_queues(adapter);
1275 if (err) {
1276 dev_err(&pdev->dev,
1277 "Unable to allocate memory for queues\n");
1278 igb_free_q_vectors(adapter);
1279 goto request_done;
1280 }
1281 igb_setup_all_tx_resources(adapter);
1282 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001283 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001284
Alexander Duyckc74d5882011-08-26 07:46:45 +00001285 igb_assign_vector(adapter->q_vector[0], 0);
1286
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001287 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001288 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001289 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001290 if (!err)
1291 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001292
Auke Kok9d5c8242008-01-24 02:22:38 -08001293 /* fall back to legacy interrupts */
1294 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001295 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001296 }
1297
Alexander Duyckc74d5882011-08-26 07:46:45 +00001298 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001299 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001300
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001301 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001302 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001303 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001304
1305request_done:
1306 return err;
1307}
1308
1309static void igb_free_irq(struct igb_adapter *adapter)
1310{
Auke Kok9d5c8242008-01-24 02:22:38 -08001311 if (adapter->msix_entries) {
1312 int vector = 0, i;
1313
Alexander Duyck047e0032009-10-27 15:49:27 +00001314 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001315
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001316 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001317 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001318 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001319 } else {
1320 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001321 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001322}
1323
1324/**
1325 * igb_irq_disable - Mask off interrupt generation on the NIC
1326 * @adapter: board private structure
1327 **/
1328static void igb_irq_disable(struct igb_adapter *adapter)
1329{
1330 struct e1000_hw *hw = &adapter->hw;
1331
Alexander Duyck25568a52009-10-27 23:49:59 +00001332 /*
1333 * we need to be careful when disabling interrupts. The VFs are also
1334 * mapped into these registers and so clearing the bits can cause
1335 * issues on the VF drivers so we only need to clear what we set
1336 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001337 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001338 u32 regval = rd32(E1000_EIAM);
1339 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1340 wr32(E1000_EIMC, adapter->eims_enable_mask);
1341 regval = rd32(E1000_EIAC);
1342 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001343 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001344
1345 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001346 wr32(E1000_IMC, ~0);
1347 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001348 if (adapter->msix_entries) {
1349 int i;
1350 for (i = 0; i < adapter->num_q_vectors; i++)
1351 synchronize_irq(adapter->msix_entries[i].vector);
1352 } else {
1353 synchronize_irq(adapter->pdev->irq);
1354 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001355}
1356
1357/**
1358 * igb_irq_enable - Enable default interrupt generation settings
1359 * @adapter: board private structure
1360 **/
1361static void igb_irq_enable(struct igb_adapter *adapter)
1362{
1363 struct e1000_hw *hw = &adapter->hw;
1364
1365 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001366 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001367 u32 regval = rd32(E1000_EIAC);
1368 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1369 regval = rd32(E1000_EIAM);
1370 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001371 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001372 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001373 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001374 ims |= E1000_IMS_VMMB;
1375 }
1376 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001377 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001378 wr32(E1000_IMS, IMS_ENABLE_MASK |
1379 E1000_IMS_DRSTA);
1380 wr32(E1000_IAM, IMS_ENABLE_MASK |
1381 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001382 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001383}
1384
1385static void igb_update_mng_vlan(struct igb_adapter *adapter)
1386{
Alexander Duyck51466232009-10-27 23:47:35 +00001387 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001388 u16 vid = adapter->hw.mng_cookie.vlan_id;
1389 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001390
Alexander Duyck51466232009-10-27 23:47:35 +00001391 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1392 /* add VID to filter table */
1393 igb_vfta_set(hw, vid, true);
1394 adapter->mng_vlan_id = vid;
1395 } else {
1396 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1397 }
1398
1399 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1400 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001401 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001402 /* remove VID from filter table */
1403 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001404 }
1405}
1406
1407/**
1408 * igb_release_hw_control - release control of the h/w to f/w
1409 * @adapter: address of board private structure
1410 *
1411 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1412 * For ASF and Pass Through versions of f/w this means that the
1413 * driver is no longer loaded.
1414 *
1415 **/
1416static void igb_release_hw_control(struct igb_adapter *adapter)
1417{
1418 struct e1000_hw *hw = &adapter->hw;
1419 u32 ctrl_ext;
1420
1421 /* Let firmware take over control of h/w */
1422 ctrl_ext = rd32(E1000_CTRL_EXT);
1423 wr32(E1000_CTRL_EXT,
1424 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1425}
1426
Auke Kok9d5c8242008-01-24 02:22:38 -08001427/**
1428 * igb_get_hw_control - get control of the h/w from f/w
1429 * @adapter: address of board private structure
1430 *
1431 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1432 * For ASF and Pass Through versions of f/w this means that
1433 * the driver is loaded.
1434 *
1435 **/
1436static void igb_get_hw_control(struct igb_adapter *adapter)
1437{
1438 struct e1000_hw *hw = &adapter->hw;
1439 u32 ctrl_ext;
1440
1441 /* Let firmware know the driver has taken over */
1442 ctrl_ext = rd32(E1000_CTRL_EXT);
1443 wr32(E1000_CTRL_EXT,
1444 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1445}
1446
Auke Kok9d5c8242008-01-24 02:22:38 -08001447/**
1448 * igb_configure - configure the hardware for RX and TX
1449 * @adapter: private board structure
1450 **/
1451static void igb_configure(struct igb_adapter *adapter)
1452{
1453 struct net_device *netdev = adapter->netdev;
1454 int i;
1455
1456 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001457 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001458
1459 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001460
Alexander Duyck85b430b2009-10-27 15:50:29 +00001461 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001462 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001463 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001464
1465 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001466 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001467
1468 igb_rx_fifo_flush_82575(&adapter->hw);
1469
Alexander Duyckc493ea42009-03-20 00:16:50 +00001470 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001471 * at least 1 descriptor unused to make sure
1472 * next_to_use != next_to_clean */
1473 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001474 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001475 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001476 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001477}
1478
Nick Nunley88a268c2010-02-17 01:01:59 +00001479/**
1480 * igb_power_up_link - Power up the phy/serdes link
1481 * @adapter: address of board private structure
1482 **/
1483void igb_power_up_link(struct igb_adapter *adapter)
1484{
1485 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1486 igb_power_up_phy_copper(&adapter->hw);
1487 else
1488 igb_power_up_serdes_link_82575(&adapter->hw);
Koki Sanagia95a0742012-01-04 20:23:38 +00001489 igb_reset_phy(&adapter->hw);
Nick Nunley88a268c2010-02-17 01:01:59 +00001490}
1491
1492/**
1493 * igb_power_down_link - Power down the phy/serdes link
1494 * @adapter: address of board private structure
1495 */
1496static void igb_power_down_link(struct igb_adapter *adapter)
1497{
1498 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1499 igb_power_down_phy_copper_82575(&adapter->hw);
1500 else
1501 igb_shutdown_serdes_link_82575(&adapter->hw);
1502}
Auke Kok9d5c8242008-01-24 02:22:38 -08001503
1504/**
1505 * igb_up - Open the interface and prepare it to handle traffic
1506 * @adapter: board private structure
1507 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001508int igb_up(struct igb_adapter *adapter)
1509{
1510 struct e1000_hw *hw = &adapter->hw;
1511 int i;
1512
1513 /* hardware has been reset, we need to reload some things */
1514 igb_configure(adapter);
1515
1516 clear_bit(__IGB_DOWN, &adapter->state);
1517
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001518 for (i = 0; i < adapter->num_q_vectors; i++)
1519 napi_enable(&(adapter->q_vector[i]->napi));
1520
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001521 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001522 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001523 else
1524 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001525
1526 /* Clear any pending interrupts. */
1527 rd32(E1000_ICR);
1528 igb_irq_enable(adapter);
1529
Alexander Duyckd4960302009-10-27 15:53:45 +00001530 /* notify VFs that reset has been completed */
1531 if (adapter->vfs_allocated_count) {
1532 u32 reg_data = rd32(E1000_CTRL_EXT);
1533 reg_data |= E1000_CTRL_EXT_PFRSTD;
1534 wr32(E1000_CTRL_EXT, reg_data);
1535 }
1536
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001537 netif_tx_start_all_queues(adapter->netdev);
1538
Alexander Duyck25568a52009-10-27 23:49:59 +00001539 /* start the watchdog. */
1540 hw->mac.get_link_status = 1;
1541 schedule_work(&adapter->watchdog_task);
1542
Auke Kok9d5c8242008-01-24 02:22:38 -08001543 return 0;
1544}
1545
1546void igb_down(struct igb_adapter *adapter)
1547{
Auke Kok9d5c8242008-01-24 02:22:38 -08001548 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001549 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001550 u32 tctl, rctl;
1551 int i;
1552
1553 /* signal that we're down so the interrupt handler does not
1554 * reschedule our watchdog timer */
1555 set_bit(__IGB_DOWN, &adapter->state);
1556
1557 /* disable receives in the hardware */
1558 rctl = rd32(E1000_RCTL);
1559 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1560 /* flush and sleep below */
1561
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001562 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001563
1564 /* disable transmits in the hardware */
1565 tctl = rd32(E1000_TCTL);
1566 tctl &= ~E1000_TCTL_EN;
1567 wr32(E1000_TCTL, tctl);
1568 /* flush both disables and wait for them to finish */
1569 wrfl();
1570 msleep(10);
1571
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001572 for (i = 0; i < adapter->num_q_vectors; i++)
1573 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001574
Auke Kok9d5c8242008-01-24 02:22:38 -08001575 igb_irq_disable(adapter);
1576
1577 del_timer_sync(&adapter->watchdog_timer);
1578 del_timer_sync(&adapter->phy_info_timer);
1579
Auke Kok9d5c8242008-01-24 02:22:38 -08001580 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001581
1582 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001583 spin_lock(&adapter->stats64_lock);
1584 igb_update_stats(adapter, &adapter->stats64);
1585 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001586
Auke Kok9d5c8242008-01-24 02:22:38 -08001587 adapter->link_speed = 0;
1588 adapter->link_duplex = 0;
1589
Jeff Kirsher30236822008-06-24 17:01:15 -07001590 if (!pci_channel_offline(adapter->pdev))
1591 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001592 igb_clean_all_tx_rings(adapter);
1593 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001594#ifdef CONFIG_IGB_DCA
1595
1596 /* since we reset the hardware DCA settings were cleared */
1597 igb_setup_dca(adapter);
1598#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001599}
1600
1601void igb_reinit_locked(struct igb_adapter *adapter)
1602{
1603 WARN_ON(in_interrupt());
1604 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1605 msleep(1);
1606 igb_down(adapter);
1607 igb_up(adapter);
1608 clear_bit(__IGB_RESETTING, &adapter->state);
1609}
1610
1611void igb_reset(struct igb_adapter *adapter)
1612{
Alexander Duyck090b1792009-10-27 23:51:55 +00001613 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001614 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001615 struct e1000_mac_info *mac = &hw->mac;
1616 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001617 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1618 u16 hwm;
1619
1620 /* Repartition Pba for greater than 9k mtu
1621 * To take effect CTRL.RST is required.
1622 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001623 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001624 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001625 case e1000_82580:
1626 pba = rd32(E1000_RXPBS);
1627 pba = igb_rxpbs_adjust_82580(pba);
1628 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001629 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001630 pba = rd32(E1000_RXPBS);
1631 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001632 break;
1633 case e1000_82575:
1634 default:
1635 pba = E1000_PBA_34K;
1636 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001637 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001638
Alexander Duyck2d064c02008-07-08 15:10:12 -07001639 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1640 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001641 /* adjust PBA for jumbo frames */
1642 wr32(E1000_PBA, pba);
1643
1644 /* To maintain wire speed transmits, the Tx FIFO should be
1645 * large enough to accommodate two full transmit packets,
1646 * rounded up to the next 1KB and expressed in KB. Likewise,
1647 * the Rx FIFO should be large enough to accommodate at least
1648 * one full receive packet and is similarly rounded up and
1649 * expressed in KB. */
1650 pba = rd32(E1000_PBA);
1651 /* upper 16 bits has Tx packet buffer allocation size in KB */
1652 tx_space = pba >> 16;
1653 /* lower 16 bits has Rx packet buffer allocation size in KB */
1654 pba &= 0xffff;
1655 /* the tx fifo also stores 16 bytes of information about the tx
1656 * but don't include ethernet FCS because hardware appends it */
1657 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001658 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001659 ETH_FCS_LEN) * 2;
1660 min_tx_space = ALIGN(min_tx_space, 1024);
1661 min_tx_space >>= 10;
1662 /* software strips receive CRC, so leave room for it */
1663 min_rx_space = adapter->max_frame_size;
1664 min_rx_space = ALIGN(min_rx_space, 1024);
1665 min_rx_space >>= 10;
1666
1667 /* If current Tx allocation is less than the min Tx FIFO size,
1668 * and the min Tx FIFO size is less than the current Rx FIFO
1669 * allocation, take space away from current Rx allocation */
1670 if (tx_space < min_tx_space &&
1671 ((min_tx_space - tx_space) < pba)) {
1672 pba = pba - (min_tx_space - tx_space);
1673
1674 /* if short on rx space, rx wins and must trump tx
1675 * adjustment */
1676 if (pba < min_rx_space)
1677 pba = min_rx_space;
1678 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001679 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001681
1682 /* flow control settings */
1683 /* The high water mark must be low enough to fit one full frame
1684 * (or the size used for early receive) above it in the Rx FIFO.
1685 * Set it to the lower of:
1686 * - 90% of the Rx FIFO size, or
1687 * - the full Rx FIFO size minus one full frame */
1688 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001689 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001690
Alexander Duyckd405ea32009-12-23 13:21:27 +00001691 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1692 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001693 fc->pause_time = 0xFFFF;
1694 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001695 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001696
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001697 /* disable receive for all VFs and wait one second */
1698 if (adapter->vfs_allocated_count) {
1699 int i;
1700 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001701 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001702
1703 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001704 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001705
1706 /* disable transmits and receives */
1707 wr32(E1000_VFRE, 0);
1708 wr32(E1000_VFTE, 0);
1709 }
1710
Auke Kok9d5c8242008-01-24 02:22:38 -08001711 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001712 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001713 wr32(E1000_WUC, 0);
1714
Alexander Duyck330a6d62009-10-27 23:51:35 +00001715 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001716 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001717
Matthew Vicka27416b2012-04-18 02:57:44 +00001718 /*
1719 * Flow control settings reset on hardware reset, so guarantee flow
1720 * control is off when forcing speed.
1721 */
1722 if (!hw->mac.autoneg)
1723 igb_force_mac_fc(hw);
1724
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001725 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001726 if (!netif_running(adapter->netdev))
1727 igb_power_down_link(adapter);
1728
Auke Kok9d5c8242008-01-24 02:22:38 -08001729 igb_update_mng_vlan(adapter);
1730
1731 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1732 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1733
Alexander Duyck330a6d62009-10-27 23:51:35 +00001734 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001735}
1736
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001737static netdev_features_t igb_fix_features(struct net_device *netdev,
1738 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001739{
1740 /*
1741 * Since there is no support for separate rx/tx vlan accel
1742 * enable/disable make sure tx flag is always in same state as rx.
1743 */
1744 if (features & NETIF_F_HW_VLAN_RX)
1745 features |= NETIF_F_HW_VLAN_TX;
1746 else
1747 features &= ~NETIF_F_HW_VLAN_TX;
1748
1749 return features;
1750}
1751
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001752static int igb_set_features(struct net_device *netdev,
1753 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001754{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001755 netdev_features_t changed = netdev->features ^ features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001756 struct igb_adapter *adapter = netdev_priv(netdev);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001757
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001758 if (changed & NETIF_F_HW_VLAN_RX)
1759 igb_vlan_mode(netdev, features);
1760
Ben Greear89eaefb2012-03-06 09:41:58 +00001761 if (!(changed & NETIF_F_RXALL))
1762 return 0;
1763
1764 netdev->features = features;
1765
1766 if (netif_running(netdev))
1767 igb_reinit_locked(adapter);
1768 else
1769 igb_reset(adapter);
1770
Michał Mirosławac52caa2011-06-08 08:38:01 +00001771 return 0;
1772}
1773
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001774static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001775 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001776 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001777 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001778 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001779 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001780 .ndo_set_mac_address = igb_set_mac,
1781 .ndo_change_mtu = igb_change_mtu,
1782 .ndo_do_ioctl = igb_ioctl,
1783 .ndo_tx_timeout = igb_tx_timeout,
1784 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001785 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1786 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001787 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1788 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1789 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1790 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001791#ifdef CONFIG_NET_POLL_CONTROLLER
1792 .ndo_poll_controller = igb_netpoll,
1793#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001794 .ndo_fix_features = igb_fix_features,
1795 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001796};
1797
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001798/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001799 * igb_probe - Device Initialization Routine
1800 * @pdev: PCI device information struct
1801 * @ent: entry in igb_pci_tbl
1802 *
1803 * Returns 0 on success, negative on failure
1804 *
1805 * igb_probe initializes an adapter identified by a pci_dev structure.
1806 * The OS initialization, configuring of the adapter private structure,
1807 * and a hardware reset occur.
1808 **/
1809static int __devinit igb_probe(struct pci_dev *pdev,
1810 const struct pci_device_id *ent)
1811{
1812 struct net_device *netdev;
1813 struct igb_adapter *adapter;
1814 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001815 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001816 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001817 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001818 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1819 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001820 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001821 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001822 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001823
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001824 /* Catch broken hardware that put the wrong VF device ID in
1825 * the PCIe SR-IOV capability.
1826 */
1827 if (pdev->is_virtfn) {
1828 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1829 pci_name(pdev), pdev->vendor, pdev->device);
1830 return -EINVAL;
1831 }
1832
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001833 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001834 if (err)
1835 return err;
1836
1837 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001838 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001839 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001840 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001841 if (!err)
1842 pci_using_dac = 1;
1843 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001844 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001845 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001846 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001847 if (err) {
1848 dev_err(&pdev->dev, "No usable DMA "
1849 "configuration, aborting\n");
1850 goto err_dma;
1851 }
1852 }
1853 }
1854
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001855 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1856 IORESOURCE_MEM),
1857 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001858 if (err)
1859 goto err_pci_reg;
1860
Frans Pop19d5afd2009-10-02 10:04:12 -07001861 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001862
Auke Kok9d5c8242008-01-24 02:22:38 -08001863 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001864 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001865
1866 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001867 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001868 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001869 if (!netdev)
1870 goto err_alloc_etherdev;
1871
1872 SET_NETDEV_DEV(netdev, &pdev->dev);
1873
1874 pci_set_drvdata(pdev, netdev);
1875 adapter = netdev_priv(netdev);
1876 adapter->netdev = netdev;
1877 adapter->pdev = pdev;
1878 hw = &adapter->hw;
1879 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00001880 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Auke Kok9d5c8242008-01-24 02:22:38 -08001881
1882 mmio_start = pci_resource_start(pdev, 0);
1883 mmio_len = pci_resource_len(pdev, 0);
1884
1885 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001886 hw->hw_addr = ioremap(mmio_start, mmio_len);
1887 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001888 goto err_ioremap;
1889
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001890 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001891 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001892 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001893
1894 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1895
1896 netdev->mem_start = mmio_start;
1897 netdev->mem_end = mmio_start + mmio_len;
1898
Auke Kok9d5c8242008-01-24 02:22:38 -08001899 /* PCI config space info */
1900 hw->vendor_id = pdev->vendor;
1901 hw->device_id = pdev->device;
1902 hw->revision_id = pdev->revision;
1903 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1904 hw->subsystem_device_id = pdev->subsystem_device;
1905
Auke Kok9d5c8242008-01-24 02:22:38 -08001906 /* Copy the default MAC, PHY and NVM function pointers */
1907 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1908 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1909 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1910 /* Initialize skew-specific constants */
1911 err = ei->get_invariants(hw);
1912 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001913 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001914
Alexander Duyck450c87c2009-02-06 23:22:11 +00001915 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001916 err = igb_sw_init(adapter);
1917 if (err)
1918 goto err_sw_init;
1919
1920 igb_get_bus_info_pcie(hw);
1921
1922 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001923
1924 /* Copper options */
1925 if (hw->phy.media_type == e1000_media_type_copper) {
1926 hw->phy.mdix = AUTO_ALL_MODES;
1927 hw->phy.disable_polarity_correction = false;
1928 hw->phy.ms_type = e1000_ms_hw_default;
1929 }
1930
1931 if (igb_check_reset_block(hw))
1932 dev_info(&pdev->dev,
1933 "PHY reset is blocked due to SOL/IDER session.\n");
1934
Alexander Duyck077887c2011-08-26 07:46:29 +00001935 /*
1936 * features is initialized to 0 in allocation, it might have bits
1937 * set by igb_sw_init so we should use an or instead of an
1938 * assignment.
1939 */
1940 netdev->features |= NETIF_F_SG |
1941 NETIF_F_IP_CSUM |
1942 NETIF_F_IPV6_CSUM |
1943 NETIF_F_TSO |
1944 NETIF_F_TSO6 |
1945 NETIF_F_RXHASH |
1946 NETIF_F_RXCSUM |
1947 NETIF_F_HW_VLAN_RX |
1948 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001949
Alexander Duyck077887c2011-08-26 07:46:29 +00001950 /* copy netdev features into list of user selectable features */
1951 netdev->hw_features |= netdev->features;
Ben Greear89eaefb2012-03-06 09:41:58 +00001952 netdev->hw_features |= NETIF_F_RXALL;
Auke Kok9d5c8242008-01-24 02:22:38 -08001953
Alexander Duyck077887c2011-08-26 07:46:29 +00001954 /* set this bit last since it cannot be part of hw_features */
1955 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1956
1957 netdev->vlan_features |= NETIF_F_TSO |
1958 NETIF_F_TSO6 |
1959 NETIF_F_IP_CSUM |
1960 NETIF_F_IPV6_CSUM |
1961 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001962
Ben Greear6b8f0922012-03-06 09:41:53 +00001963 netdev->priv_flags |= IFF_SUPP_NOFCS;
1964
Yi Zou7b872a52010-09-22 17:57:58 +00001965 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001966 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001967 netdev->vlan_features |= NETIF_F_HIGHDMA;
1968 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001969
Michał Mirosławac52caa2011-06-08 08:38:01 +00001970 if (hw->mac.type >= e1000_82576) {
1971 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001972 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001973 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001974
Jiri Pirko01789342011-08-16 06:29:00 +00001975 netdev->priv_flags |= IFF_UNICAST_FLT;
1976
Alexander Duyck330a6d62009-10-27 23:51:35 +00001977 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001978
1979 /* before reading the NVM, reset the controller to put the device in a
1980 * known good starting state */
1981 hw->mac.ops.reset_hw(hw);
1982
1983 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001984 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001985 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1986 err = -EIO;
1987 goto err_eeprom;
1988 }
1989
1990 /* copy the MAC address out of the NVM */
1991 if (hw->mac.ops.read_mac_addr(hw))
1992 dev_err(&pdev->dev, "NVM Read Error\n");
1993
1994 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1995 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1996
1997 if (!is_valid_ether_addr(netdev->perm_addr)) {
1998 dev_err(&pdev->dev, "Invalid MAC Address\n");
1999 err = -EIO;
2000 goto err_eeprom;
2001 }
2002
Joe Perchesc061b182010-08-23 18:20:03 +00002003 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002004 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002005 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002006 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002007
2008 INIT_WORK(&adapter->reset_task, igb_reset_task);
2009 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2010
Alexander Duyck450c87c2009-02-06 23:22:11 +00002011 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002012 adapter->fc_autoneg = true;
2013 hw->mac.autoneg = true;
2014 hw->phy.autoneg_advertised = 0x2f;
2015
Alexander Duyck0cce1192009-07-23 18:10:24 +00002016 hw->fc.requested_mode = e1000_fc_default;
2017 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002018
Auke Kok9d5c8242008-01-24 02:22:38 -08002019 igb_validate_mdi_setting(hw);
2020
Auke Kok9d5c8242008-01-24 02:22:38 -08002021 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2022 * enable the ACPI Magic Packet filter
2023 */
2024
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002025 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002026 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002027 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002028 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2029 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2030 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002031 else if (hw->bus.func == 1)
2032 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002033
2034 if (eeprom_data & eeprom_apme_mask)
2035 adapter->eeprom_wol |= E1000_WUFC_MAG;
2036
2037 /* now that we have the eeprom settings, apply the special cases where
2038 * the eeprom may be wrong or the board simply won't support wake on
2039 * lan on a particular port */
2040 switch (pdev->device) {
2041 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2042 adapter->eeprom_wol = 0;
2043 break;
2044 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002045 case E1000_DEV_ID_82576_FIBER:
2046 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002047 /* Wake events only supported on port A for dual fiber
2048 * regardless of eeprom setting */
2049 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2050 adapter->eeprom_wol = 0;
2051 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002052 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002053 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002054 /* if quad port adapter, disable WoL on all but port A */
2055 if (global_quad_port_a != 0)
2056 adapter->eeprom_wol = 0;
2057 else
2058 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2059 /* Reset for multiple quad port adapters */
2060 if (++global_quad_port_a == 4)
2061 global_quad_port_a = 0;
2062 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002063 }
2064
2065 /* initialize the wol settings based on the eeprom settings */
2066 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002067 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002068
2069 /* reset the hardware with the new settings */
2070 igb_reset(adapter);
2071
2072 /* let the f/w know that the h/w is now under the control of the
2073 * driver. */
2074 igb_get_hw_control(adapter);
2075
Auke Kok9d5c8242008-01-24 02:22:38 -08002076 strcpy(netdev->name, "eth%d");
2077 err = register_netdev(netdev);
2078 if (err)
2079 goto err_register;
2080
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002081 /* carrier off reporting is important to ethtool even BEFORE open */
2082 netif_carrier_off(netdev);
2083
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002084#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002085 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002086 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002087 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002088 igb_setup_dca(adapter);
2089 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002090
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002091#endif
Richard Cochran7ebae812012-03-16 10:55:37 +00002092#ifdef CONFIG_IGB_PTP
Anders Berggren673b8b72011-02-04 07:32:32 +00002093 /* do hw tstamp init after resetting */
Richard Cochran7ebae812012-03-16 10:55:37 +00002094 igb_ptp_init(adapter);
Anders Berggren673b8b72011-02-04 07:32:32 +00002095
Richard Cochran7ebae812012-03-16 10:55:37 +00002096#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002097 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2098 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002099 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002100 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002101 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002102 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002103 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002104 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2105 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2106 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2107 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002108 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002109
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002110 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2111 if (ret_val)
2112 strcpy(part_str, "Unknown");
2113 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002114 dev_info(&pdev->dev,
2115 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2116 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002117 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002118 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002119 switch (hw->mac.type) {
2120 case e1000_i350:
2121 igb_set_eee_i350(hw);
2122 break;
2123 default:
2124 break;
2125 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002126
2127 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002128 return 0;
2129
2130err_register:
2131 igb_release_hw_control(adapter);
2132err_eeprom:
2133 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002134 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002135
2136 if (hw->flash_address)
2137 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002138err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002139 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002140 iounmap(hw->hw_addr);
2141err_ioremap:
2142 free_netdev(netdev);
2143err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002144 pci_release_selected_regions(pdev,
2145 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002146err_pci_reg:
2147err_dma:
2148 pci_disable_device(pdev);
2149 return err;
2150}
2151
2152/**
2153 * igb_remove - Device Removal Routine
2154 * @pdev: PCI device information struct
2155 *
2156 * igb_remove is called by the PCI subsystem to alert the driver
2157 * that it should release a PCI device. The could be caused by a
2158 * Hot-Plug event, or because the driver is going to be removed from
2159 * memory.
2160 **/
2161static void __devexit igb_remove(struct pci_dev *pdev)
2162{
2163 struct net_device *netdev = pci_get_drvdata(pdev);
2164 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002165 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002166
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002167 pm_runtime_get_noresume(&pdev->dev);
Richard Cochran7ebae812012-03-16 10:55:37 +00002168#ifdef CONFIG_IGB_PTP
2169 igb_ptp_remove(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002170
Richard Cochran7ebae812012-03-16 10:55:37 +00002171#endif
Tejun Heo760141a2010-12-12 16:45:14 +01002172 /*
2173 * The watchdog timer may be rescheduled, so explicitly
2174 * disable watchdog from being rescheduled.
2175 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002176 set_bit(__IGB_DOWN, &adapter->state);
2177 del_timer_sync(&adapter->watchdog_timer);
2178 del_timer_sync(&adapter->phy_info_timer);
2179
Tejun Heo760141a2010-12-12 16:45:14 +01002180 cancel_work_sync(&adapter->reset_task);
2181 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002182
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002183#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002184 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002185 dev_info(&pdev->dev, "DCA disabled\n");
2186 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002187 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002188 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002189 }
2190#endif
2191
Auke Kok9d5c8242008-01-24 02:22:38 -08002192 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2193 * would have already happened in close and is redundant. */
2194 igb_release_hw_control(adapter);
2195
2196 unregister_netdev(netdev);
2197
Alexander Duyck047e0032009-10-27 15:49:27 +00002198 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002199
Alexander Duyck37680112009-02-19 20:40:30 -08002200#ifdef CONFIG_PCI_IOV
2201 /* reclaim resources allocated to VFs */
2202 if (adapter->vf_data) {
2203 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002204 if (!igb_check_vf_assignment(adapter)) {
2205 pci_disable_sriov(pdev);
2206 msleep(500);
2207 } else {
2208 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2209 }
Alexander Duyck37680112009-02-19 20:40:30 -08002210
2211 kfree(adapter->vf_data);
2212 adapter->vf_data = NULL;
2213 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002214 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002215 msleep(100);
2216 dev_info(&pdev->dev, "IOV Disabled\n");
2217 }
2218#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002219
Alexander Duyck28b07592009-02-06 23:20:31 +00002220 iounmap(hw->hw_addr);
2221 if (hw->flash_address)
2222 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002223 pci_release_selected_regions(pdev,
2224 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002225
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002226 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002227 free_netdev(netdev);
2228
Frans Pop19d5afd2009-10-02 10:04:12 -07002229 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002230
Auke Kok9d5c8242008-01-24 02:22:38 -08002231 pci_disable_device(pdev);
2232}
2233
2234/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002235 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2236 * @adapter: board private structure to initialize
2237 *
2238 * This function initializes the vf specific data storage and then attempts to
2239 * allocate the VFs. The reason for ordering it this way is because it is much
2240 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2241 * the memory for the VFs.
2242 **/
2243static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2244{
2245#ifdef CONFIG_PCI_IOV
2246 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002247 int old_vfs = igb_find_enabled_vfs(adapter);
2248 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002249
Greg Rose0224d662011-10-14 02:57:14 +00002250 if (old_vfs) {
2251 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2252 "max_vfs setting of %d\n", old_vfs, max_vfs);
2253 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002254 }
2255
Greg Rose0224d662011-10-14 02:57:14 +00002256 if (!adapter->vfs_allocated_count)
2257 return;
2258
2259 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2260 sizeof(struct vf_data_storage), GFP_KERNEL);
2261 /* if allocation failed then we do not support SR-IOV */
2262 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002263 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002264 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2265 "Data Storage\n");
2266 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002267 }
Greg Rose0224d662011-10-14 02:57:14 +00002268
2269 if (!old_vfs) {
2270 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2271 goto err_out;
2272 }
2273 dev_info(&pdev->dev, "%d VFs allocated\n",
2274 adapter->vfs_allocated_count);
2275 for (i = 0; i < adapter->vfs_allocated_count; i++)
2276 igb_vf_configure(adapter, i);
2277
2278 /* DMA Coalescing is not supported in IOV mode. */
2279 adapter->flags &= ~IGB_FLAG_DMAC;
2280 goto out;
2281err_out:
2282 kfree(adapter->vf_data);
2283 adapter->vf_data = NULL;
2284 adapter->vfs_allocated_count = 0;
2285out:
2286 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002287#endif /* CONFIG_PCI_IOV */
2288}
2289
Alexander Duyck115f4592009-11-12 18:37:00 +00002290/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002291 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2292 * @adapter: board private structure to initialize
2293 *
2294 * igb_sw_init initializes the Adapter private data structure.
2295 * Fields are initialized based on PCI device information and
2296 * OS network device settings (MTU size).
2297 **/
2298static int __devinit igb_sw_init(struct igb_adapter *adapter)
2299{
2300 struct e1000_hw *hw = &adapter->hw;
2301 struct net_device *netdev = adapter->netdev;
2302 struct pci_dev *pdev = adapter->pdev;
2303
2304 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2305
Alexander Duyck13fde972011-10-05 13:35:24 +00002306 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002307 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2308 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002309
2310 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002311 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2312 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2313
Alexander Duyck13fde972011-10-05 13:35:24 +00002314 /* set default work limits */
2315 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2316
Alexander Duyck153285f2011-08-26 07:43:32 +00002317 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2318 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002319 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2320
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002321 adapter->node = -1;
2322
Eric Dumazet12dcd862010-10-15 17:27:10 +00002323 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002324#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002325 switch (hw->mac.type) {
2326 case e1000_82576:
2327 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002328 if (max_vfs > 7) {
2329 dev_warn(&pdev->dev,
2330 "Maximum of 7 VFs per PF, using max\n");
2331 adapter->vfs_allocated_count = 7;
2332 } else
2333 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002334 break;
2335 default:
2336 break;
2337 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002338#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002339 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002340 /* i350 cannot do RSS and SR-IOV at the same time */
2341 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2342 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002343
2344 /*
2345 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2346 * then we should combine the queues into a queue pair in order to
2347 * conserve interrupts due to limited supply
2348 */
2349 if ((adapter->rss_queues > 4) ||
2350 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2351 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2352
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002353 /* Setup and initialize a copy of the hw vlan table array */
2354 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2355 E1000_VLAN_FILTER_TBL_SIZE,
2356 GFP_ATOMIC);
2357
Alexander Duycka6b623e2009-10-27 23:47:53 +00002358 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002359 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002360 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2361 return -ENOMEM;
2362 }
2363
Alexander Duycka6b623e2009-10-27 23:47:53 +00002364 igb_probe_vfs(adapter);
2365
Auke Kok9d5c8242008-01-24 02:22:38 -08002366 /* Explicitly disable IRQ since the NIC can be in any state. */
2367 igb_irq_disable(adapter);
2368
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002369 if (hw->mac.type == e1000_i350)
2370 adapter->flags &= ~IGB_FLAG_DMAC;
2371
Auke Kok9d5c8242008-01-24 02:22:38 -08002372 set_bit(__IGB_DOWN, &adapter->state);
2373 return 0;
2374}
2375
2376/**
2377 * igb_open - Called when a network interface is made active
2378 * @netdev: network interface device structure
2379 *
2380 * Returns 0 on success, negative value on failure
2381 *
2382 * The open entry point is called when a network interface is made
2383 * active by the system (IFF_UP). At this point all resources needed
2384 * for transmit and receive operations are allocated, the interrupt
2385 * handler is registered with the OS, the watchdog timer is started,
2386 * and the stack is notified that the interface is ready.
2387 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002388static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002389{
2390 struct igb_adapter *adapter = netdev_priv(netdev);
2391 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002392 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002393 int err;
2394 int i;
2395
2396 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002397 if (test_bit(__IGB_TESTING, &adapter->state)) {
2398 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002399 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002400 }
2401
2402 if (!resuming)
2403 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002404
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002405 netif_carrier_off(netdev);
2406
Auke Kok9d5c8242008-01-24 02:22:38 -08002407 /* allocate transmit descriptors */
2408 err = igb_setup_all_tx_resources(adapter);
2409 if (err)
2410 goto err_setup_tx;
2411
2412 /* allocate receive descriptors */
2413 err = igb_setup_all_rx_resources(adapter);
2414 if (err)
2415 goto err_setup_rx;
2416
Nick Nunley88a268c2010-02-17 01:01:59 +00002417 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002418
Auke Kok9d5c8242008-01-24 02:22:38 -08002419 /* before we allocate an interrupt, we must be ready to handle it.
2420 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2421 * as soon as we call pci_request_irq, so we have to setup our
2422 * clean_rx handler before we do so. */
2423 igb_configure(adapter);
2424
2425 err = igb_request_irq(adapter);
2426 if (err)
2427 goto err_req_irq;
2428
2429 /* From here on the code is the same as igb_up() */
2430 clear_bit(__IGB_DOWN, &adapter->state);
2431
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002432 for (i = 0; i < adapter->num_q_vectors; i++)
2433 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002434
2435 /* Clear any pending interrupts. */
2436 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002437
2438 igb_irq_enable(adapter);
2439
Alexander Duyckd4960302009-10-27 15:53:45 +00002440 /* notify VFs that reset has been completed */
2441 if (adapter->vfs_allocated_count) {
2442 u32 reg_data = rd32(E1000_CTRL_EXT);
2443 reg_data |= E1000_CTRL_EXT_PFRSTD;
2444 wr32(E1000_CTRL_EXT, reg_data);
2445 }
2446
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002447 netif_tx_start_all_queues(netdev);
2448
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002449 if (!resuming)
2450 pm_runtime_put(&pdev->dev);
2451
Alexander Duyck25568a52009-10-27 23:49:59 +00002452 /* start the watchdog. */
2453 hw->mac.get_link_status = 1;
2454 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002455
2456 return 0;
2457
2458err_req_irq:
2459 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002460 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002461 igb_free_all_rx_resources(adapter);
2462err_setup_rx:
2463 igb_free_all_tx_resources(adapter);
2464err_setup_tx:
2465 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002466 if (!resuming)
2467 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002468
2469 return err;
2470}
2471
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002472static int igb_open(struct net_device *netdev)
2473{
2474 return __igb_open(netdev, false);
2475}
2476
Auke Kok9d5c8242008-01-24 02:22:38 -08002477/**
2478 * igb_close - Disables a network interface
2479 * @netdev: network interface device structure
2480 *
2481 * Returns 0, this is not allowed to fail
2482 *
2483 * The close entry point is called when an interface is de-activated
2484 * by the OS. The hardware is still under the driver's control, but
2485 * needs to be disabled. A global MAC reset is issued to stop the
2486 * hardware, and all transmit and receive resources are freed.
2487 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002488static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002489{
2490 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002491 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002492
2493 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002494
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002495 if (!suspending)
2496 pm_runtime_get_sync(&pdev->dev);
2497
2498 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002499 igb_free_irq(adapter);
2500
2501 igb_free_all_tx_resources(adapter);
2502 igb_free_all_rx_resources(adapter);
2503
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002504 if (!suspending)
2505 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002506 return 0;
2507}
2508
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002509static int igb_close(struct net_device *netdev)
2510{
2511 return __igb_close(netdev, false);
2512}
2513
Auke Kok9d5c8242008-01-24 02:22:38 -08002514/**
2515 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002516 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2517 *
2518 * Return 0 on success, negative on failure
2519 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002520int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002521{
Alexander Duyck59d71982010-04-27 13:09:25 +00002522 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002523 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002524 int size;
2525
Alexander Duyck06034642011-08-26 07:44:22 +00002526 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002527 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2528 if (!tx_ring->tx_buffer_info)
2529 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002530 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002531 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002532
2533 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002534 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002535 tx_ring->size = ALIGN(tx_ring->size, 4096);
2536
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002537 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002538 tx_ring->desc = dma_alloc_coherent(dev,
2539 tx_ring->size,
2540 &tx_ring->dma,
2541 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002542 set_dev_node(dev, orig_node);
2543 if (!tx_ring->desc)
2544 tx_ring->desc = dma_alloc_coherent(dev,
2545 tx_ring->size,
2546 &tx_ring->dma,
2547 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002548
2549 if (!tx_ring->desc)
2550 goto err;
2551
Auke Kok9d5c8242008-01-24 02:22:38 -08002552 tx_ring->next_to_use = 0;
2553 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002554
Auke Kok9d5c8242008-01-24 02:22:38 -08002555 return 0;
2556
2557err:
Alexander Duyck06034642011-08-26 07:44:22 +00002558 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002559 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002560 "Unable to allocate memory for the transmit descriptor ring\n");
2561 return -ENOMEM;
2562}
2563
2564/**
2565 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2566 * (Descriptors) for all queues
2567 * @adapter: board private structure
2568 *
2569 * Return 0 on success, negative on failure
2570 **/
2571static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2572{
Alexander Duyck439705e2009-10-27 23:49:20 +00002573 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002574 int i, err = 0;
2575
2576 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002577 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002578 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002579 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002580 "Allocation for Tx Queue %u failed\n", i);
2581 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002582 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002583 break;
2584 }
2585 }
2586
2587 return err;
2588}
2589
2590/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002591 * igb_setup_tctl - configure the transmit control registers
2592 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002593 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002594void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002595{
Auke Kok9d5c8242008-01-24 02:22:38 -08002596 struct e1000_hw *hw = &adapter->hw;
2597 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002598
Alexander Duyck85b430b2009-10-27 15:50:29 +00002599 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2600 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002601
2602 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002603 tctl = rd32(E1000_TCTL);
2604 tctl &= ~E1000_TCTL_CT;
2605 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2606 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2607
2608 igb_config_collision_dist(hw);
2609
Auke Kok9d5c8242008-01-24 02:22:38 -08002610 /* Enable transmits */
2611 tctl |= E1000_TCTL_EN;
2612
2613 wr32(E1000_TCTL, tctl);
2614}
2615
2616/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002617 * igb_configure_tx_ring - Configure transmit ring after Reset
2618 * @adapter: board private structure
2619 * @ring: tx ring to configure
2620 *
2621 * Configure a transmit ring after a reset.
2622 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002623void igb_configure_tx_ring(struct igb_adapter *adapter,
2624 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002625{
2626 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002627 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002628 u64 tdba = ring->dma;
2629 int reg_idx = ring->reg_idx;
2630
2631 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002632 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002633 wrfl();
2634 mdelay(10);
2635
2636 wr32(E1000_TDLEN(reg_idx),
2637 ring->count * sizeof(union e1000_adv_tx_desc));
2638 wr32(E1000_TDBAL(reg_idx),
2639 tdba & 0x00000000ffffffffULL);
2640 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2641
Alexander Duyckfce99e32009-10-27 15:51:27 +00002642 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002643 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002644 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002645
2646 txdctl |= IGB_TX_PTHRESH;
2647 txdctl |= IGB_TX_HTHRESH << 8;
2648 txdctl |= IGB_TX_WTHRESH << 16;
2649
2650 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2651 wr32(E1000_TXDCTL(reg_idx), txdctl);
2652}
2653
2654/**
2655 * igb_configure_tx - Configure transmit Unit after Reset
2656 * @adapter: board private structure
2657 *
2658 * Configure the Tx unit of the MAC after a reset.
2659 **/
2660static void igb_configure_tx(struct igb_adapter *adapter)
2661{
2662 int i;
2663
2664 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002665 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002666}
2667
2668/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002669 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002670 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2671 *
2672 * Returns 0 on success, negative on failure
2673 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002674int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002675{
Alexander Duyck59d71982010-04-27 13:09:25 +00002676 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002677 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002678 int size, desc_len;
2679
Alexander Duyck06034642011-08-26 07:44:22 +00002680 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002681 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2682 if (!rx_ring->rx_buffer_info)
2683 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002684 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002685 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002686
2687 desc_len = sizeof(union e1000_adv_rx_desc);
2688
2689 /* Round up to nearest 4K */
2690 rx_ring->size = rx_ring->count * desc_len;
2691 rx_ring->size = ALIGN(rx_ring->size, 4096);
2692
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002693 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002694 rx_ring->desc = dma_alloc_coherent(dev,
2695 rx_ring->size,
2696 &rx_ring->dma,
2697 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002698 set_dev_node(dev, orig_node);
2699 if (!rx_ring->desc)
2700 rx_ring->desc = dma_alloc_coherent(dev,
2701 rx_ring->size,
2702 &rx_ring->dma,
2703 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002704
2705 if (!rx_ring->desc)
2706 goto err;
2707
2708 rx_ring->next_to_clean = 0;
2709 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002710
Auke Kok9d5c8242008-01-24 02:22:38 -08002711 return 0;
2712
2713err:
Alexander Duyck06034642011-08-26 07:44:22 +00002714 vfree(rx_ring->rx_buffer_info);
2715 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002716 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2717 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002718 return -ENOMEM;
2719}
2720
2721/**
2722 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2723 * (Descriptors) for all queues
2724 * @adapter: board private structure
2725 *
2726 * Return 0 on success, negative on failure
2727 **/
2728static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2729{
Alexander Duyck439705e2009-10-27 23:49:20 +00002730 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002731 int i, err = 0;
2732
2733 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002734 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002735 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002736 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002737 "Allocation for Rx Queue %u failed\n", i);
2738 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002739 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002740 break;
2741 }
2742 }
2743
2744 return err;
2745}
2746
2747/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002748 * igb_setup_mrqc - configure the multiple receive queue control registers
2749 * @adapter: Board private structure
2750 **/
2751static void igb_setup_mrqc(struct igb_adapter *adapter)
2752{
2753 struct e1000_hw *hw = &adapter->hw;
2754 u32 mrqc, rxcsum;
2755 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2756 union e1000_reta {
2757 u32 dword;
2758 u8 bytes[4];
2759 } reta;
2760 static const u8 rsshash[40] = {
2761 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2762 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2763 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2764 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2765
2766 /* Fill out hash function seeds */
2767 for (j = 0; j < 10; j++) {
2768 u32 rsskey = rsshash[(j * 4)];
2769 rsskey |= rsshash[(j * 4) + 1] << 8;
2770 rsskey |= rsshash[(j * 4) + 2] << 16;
2771 rsskey |= rsshash[(j * 4) + 3] << 24;
2772 array_wr32(E1000_RSSRK(0), j, rsskey);
2773 }
2774
Alexander Duycka99955f2009-11-12 18:37:19 +00002775 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002776
2777 if (adapter->vfs_allocated_count) {
2778 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2779 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002780 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002781 case e1000_82580:
2782 num_rx_queues = 1;
2783 shift = 0;
2784 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002785 case e1000_82576:
2786 shift = 3;
2787 num_rx_queues = 2;
2788 break;
2789 case e1000_82575:
2790 shift = 2;
2791 shift2 = 6;
2792 default:
2793 break;
2794 }
2795 } else {
2796 if (hw->mac.type == e1000_82575)
2797 shift = 6;
2798 }
2799
2800 for (j = 0; j < (32 * 4); j++) {
2801 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2802 if (shift2)
2803 reta.bytes[j & 3] |= num_rx_queues << shift2;
2804 if ((j & 3) == 3)
2805 wr32(E1000_RETA(j >> 2), reta.dword);
2806 }
2807
2808 /*
2809 * Disable raw packet checksumming so that RSS hash is placed in
2810 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2811 * offloads as they are enabled by default
2812 */
2813 rxcsum = rd32(E1000_RXCSUM);
2814 rxcsum |= E1000_RXCSUM_PCSD;
2815
2816 if (adapter->hw.mac.type >= e1000_82576)
2817 /* Enable Receive Checksum Offload for SCTP */
2818 rxcsum |= E1000_RXCSUM_CRCOFL;
2819
2820 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2821 wr32(E1000_RXCSUM, rxcsum);
2822
2823 /* If VMDq is enabled then we set the appropriate mode for that, else
2824 * we default to RSS so that an RSS hash is calculated per packet even
2825 * if we are only using one queue */
2826 if (adapter->vfs_allocated_count) {
2827 if (hw->mac.type > e1000_82575) {
2828 /* Set the default pool for the PF's first queue */
2829 u32 vtctl = rd32(E1000_VT_CTL);
2830 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2831 E1000_VT_CTL_DISABLE_DEF_POOL);
2832 vtctl |= adapter->vfs_allocated_count <<
2833 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2834 wr32(E1000_VT_CTL, vtctl);
2835 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002836 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002837 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2838 else
2839 mrqc = E1000_MRQC_ENABLE_VMDQ;
2840 } else {
2841 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2842 }
2843 igb_vmm_control(adapter);
2844
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002845 /*
2846 * Generate RSS hash based on TCP port numbers and/or
2847 * IPv4/v6 src and dst addresses since UDP cannot be
2848 * hashed reliably due to IP fragmentation
2849 */
2850 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2851 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2852 E1000_MRQC_RSS_FIELD_IPV6 |
2853 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2854 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002855
2856 wr32(E1000_MRQC, mrqc);
2857}
2858
2859/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002860 * igb_setup_rctl - configure the receive control registers
2861 * @adapter: Board private structure
2862 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002863void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002864{
2865 struct e1000_hw *hw = &adapter->hw;
2866 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002867
2868 rctl = rd32(E1000_RCTL);
2869
2870 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002871 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002872
Alexander Duyck69d728b2008-11-25 01:04:03 -08002873 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002874 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002875
Auke Kok87cb7e82008-07-08 15:08:29 -07002876 /*
2877 * enable stripping of CRC. It's unlikely this will break BMC
2878 * redirection as it did with e1000. Newer features require
2879 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002880 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002881 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002882
Alexander Duyck559e9c42009-10-27 23:52:50 +00002883 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002884 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002885
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002886 /* enable LPE to prevent packets larger than max_frame_size */
2887 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002888
Alexander Duyck952f72a2009-10-27 15:51:07 +00002889 /* disable queue 0 to prevent tail write w/o re-config */
2890 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002891
Alexander Duycke1739522009-02-19 20:39:44 -08002892 /* Attention!!! For SR-IOV PF driver operations you must enable
2893 * queue drop for all VF and PF queues to prevent head of line blocking
2894 * if an un-trusted VF does not provide descriptors to hardware.
2895 */
2896 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002897 /* set all queue drop enable bits */
2898 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002899 }
2900
Ben Greear89eaefb2012-03-06 09:41:58 +00002901 /* This is useful for sniffing bad packets. */
2902 if (adapter->netdev->features & NETIF_F_RXALL) {
2903 /* UPE and MPE will be handled by normal PROMISC logic
2904 * in e1000e_set_rx_mode */
2905 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
2906 E1000_RCTL_BAM | /* RX All Bcast Pkts */
2907 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
2908
2909 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
2910 E1000_RCTL_DPF | /* Allow filtered pause */
2911 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
2912 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
2913 * and that breaks VLANs.
2914 */
2915 }
2916
Auke Kok9d5c8242008-01-24 02:22:38 -08002917 wr32(E1000_RCTL, rctl);
2918}
2919
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002920static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2921 int vfn)
2922{
2923 struct e1000_hw *hw = &adapter->hw;
2924 u32 vmolr;
2925
2926 /* if it isn't the PF check to see if VFs are enabled and
2927 * increase the size to support vlan tags */
2928 if (vfn < adapter->vfs_allocated_count &&
2929 adapter->vf_data[vfn].vlans_enabled)
2930 size += VLAN_TAG_SIZE;
2931
2932 vmolr = rd32(E1000_VMOLR(vfn));
2933 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2934 vmolr |= size | E1000_VMOLR_LPE;
2935 wr32(E1000_VMOLR(vfn), vmolr);
2936
2937 return 0;
2938}
2939
Auke Kok9d5c8242008-01-24 02:22:38 -08002940/**
Alexander Duycke1739522009-02-19 20:39:44 -08002941 * igb_rlpml_set - set maximum receive packet size
2942 * @adapter: board private structure
2943 *
2944 * Configure maximum receivable packet size.
2945 **/
2946static void igb_rlpml_set(struct igb_adapter *adapter)
2947{
Alexander Duyck153285f2011-08-26 07:43:32 +00002948 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002949 struct e1000_hw *hw = &adapter->hw;
2950 u16 pf_id = adapter->vfs_allocated_count;
2951
Alexander Duycke1739522009-02-19 20:39:44 -08002952 if (pf_id) {
2953 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00002954 /*
2955 * If we're in VMDQ or SR-IOV mode, then set global RLPML
2956 * to our max jumbo frame size, in case we need to enable
2957 * jumbo frames on one of the rings later.
2958 * This will not pass over-length frames into the default
2959 * queue because it's gated by the VMOLR.RLPML.
2960 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002961 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002962 }
2963
2964 wr32(E1000_RLPML, max_frame_size);
2965}
2966
Williams, Mitch A8151d292010-02-10 01:44:24 +00002967static inline void igb_set_vmolr(struct igb_adapter *adapter,
2968 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002969{
2970 struct e1000_hw *hw = &adapter->hw;
2971 u32 vmolr;
2972
2973 /*
2974 * This register exists only on 82576 and newer so if we are older then
2975 * we should exit and do nothing
2976 */
2977 if (hw->mac.type < e1000_82576)
2978 return;
2979
2980 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002981 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2982 if (aupe)
2983 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2984 else
2985 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002986
2987 /* clear all bits that might not be set */
2988 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2989
Alexander Duycka99955f2009-11-12 18:37:19 +00002990 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002991 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2992 /*
2993 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2994 * multicast packets
2995 */
2996 if (vfn <= adapter->vfs_allocated_count)
2997 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2998
2999 wr32(E1000_VMOLR(vfn), vmolr);
3000}
3001
Alexander Duycke1739522009-02-19 20:39:44 -08003002/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003003 * igb_configure_rx_ring - Configure a receive ring after Reset
3004 * @adapter: board private structure
3005 * @ring: receive ring to be configured
3006 *
3007 * Configure the Rx unit of the MAC after a reset.
3008 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003009void igb_configure_rx_ring(struct igb_adapter *adapter,
3010 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003011{
3012 struct e1000_hw *hw = &adapter->hw;
3013 u64 rdba = ring->dma;
3014 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003015 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003016
3017 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003018 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003019
3020 /* Set DMA base address registers */
3021 wr32(E1000_RDBAL(reg_idx),
3022 rdba & 0x00000000ffffffffULL);
3023 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3024 wr32(E1000_RDLEN(reg_idx),
3025 ring->count * sizeof(union e1000_adv_rx_desc));
3026
3027 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003028 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003029 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003030 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003031
Alexander Duyck952f72a2009-10-27 15:51:07 +00003032 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003033 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003034#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003035 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003036#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003037 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003038#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003039 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003040 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003041 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003042 /* Only set Drop Enable if we are supporting multiple queues */
3043 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3044 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003045
3046 wr32(E1000_SRRCTL(reg_idx), srrctl);
3047
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003048 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003049 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003050
Alexander Duyck85b430b2009-10-27 15:50:29 +00003051 rxdctl |= IGB_RX_PTHRESH;
3052 rxdctl |= IGB_RX_HTHRESH << 8;
3053 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003054
3055 /* enable receive descriptor fetching */
3056 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003057 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3058}
3059
3060/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003061 * igb_configure_rx - Configure receive Unit after Reset
3062 * @adapter: board private structure
3063 *
3064 * Configure the Rx unit of the MAC after a reset.
3065 **/
3066static void igb_configure_rx(struct igb_adapter *adapter)
3067{
Hannes Eder91075842009-02-18 19:36:04 -08003068 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003069
Alexander Duyck68d480c2009-10-05 06:33:08 +00003070 /* set UTA to appropriate mode */
3071 igb_set_uta(adapter);
3072
Alexander Duyck26ad9172009-10-05 06:32:49 +00003073 /* set the correct pool for the PF default MAC address in entry 0 */
3074 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3075 adapter->vfs_allocated_count);
3076
Alexander Duyck06cf2662009-10-27 15:53:25 +00003077 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3078 * the Base and Length of the Rx Descriptor Ring */
3079 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003080 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003081}
3082
3083/**
3084 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003085 * @tx_ring: Tx descriptor ring for a specific queue
3086 *
3087 * Free all transmit software resources
3088 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003089void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003090{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003091 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003092
Alexander Duyck06034642011-08-26 07:44:22 +00003093 vfree(tx_ring->tx_buffer_info);
3094 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003095
Alexander Duyck439705e2009-10-27 23:49:20 +00003096 /* if not set, then don't free */
3097 if (!tx_ring->desc)
3098 return;
3099
Alexander Duyck59d71982010-04-27 13:09:25 +00003100 dma_free_coherent(tx_ring->dev, tx_ring->size,
3101 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003102
3103 tx_ring->desc = NULL;
3104}
3105
3106/**
3107 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3108 * @adapter: board private structure
3109 *
3110 * Free all transmit software resources
3111 **/
3112static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3113{
3114 int i;
3115
3116 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003117 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003118}
3119
Alexander Duyckebe42d12011-08-26 07:45:09 +00003120void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3121 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003122{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003123 if (tx_buffer->skb) {
3124 dev_kfree_skb_any(tx_buffer->skb);
3125 if (tx_buffer->dma)
3126 dma_unmap_single(ring->dev,
3127 tx_buffer->dma,
3128 tx_buffer->length,
3129 DMA_TO_DEVICE);
3130 } else if (tx_buffer->dma) {
3131 dma_unmap_page(ring->dev,
3132 tx_buffer->dma,
3133 tx_buffer->length,
3134 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003135 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003136 tx_buffer->next_to_watch = NULL;
3137 tx_buffer->skb = NULL;
3138 tx_buffer->dma = 0;
3139 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003140}
3141
3142/**
3143 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003144 * @tx_ring: ring to be cleaned
3145 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003146static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003147{
Alexander Duyck06034642011-08-26 07:44:22 +00003148 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003149 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003150 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003151
Alexander Duyck06034642011-08-26 07:44:22 +00003152 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003153 return;
3154 /* Free all the Tx ring sk_buffs */
3155
3156 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003157 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003158 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003159 }
3160
John Fastabenddad8a3b2012-04-23 12:22:39 +00003161 netdev_tx_reset_queue(txring_txq(tx_ring));
3162
Alexander Duyck06034642011-08-26 07:44:22 +00003163 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3164 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003165
3166 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003167 memset(tx_ring->desc, 0, tx_ring->size);
3168
3169 tx_ring->next_to_use = 0;
3170 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003171}
3172
3173/**
3174 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3175 * @adapter: board private structure
3176 **/
3177static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3178{
3179 int i;
3180
3181 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003182 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003183}
3184
3185/**
3186 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003187 * @rx_ring: ring to clean the resources from
3188 *
3189 * Free all receive software resources
3190 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003191void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003192{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003193 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003194
Alexander Duyck06034642011-08-26 07:44:22 +00003195 vfree(rx_ring->rx_buffer_info);
3196 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003197
Alexander Duyck439705e2009-10-27 23:49:20 +00003198 /* if not set, then don't free */
3199 if (!rx_ring->desc)
3200 return;
3201
Alexander Duyck59d71982010-04-27 13:09:25 +00003202 dma_free_coherent(rx_ring->dev, rx_ring->size,
3203 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003204
3205 rx_ring->desc = NULL;
3206}
3207
3208/**
3209 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3210 * @adapter: board private structure
3211 *
3212 * Free all receive software resources
3213 **/
3214static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3215{
3216 int i;
3217
3218 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003219 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003220}
3221
3222/**
3223 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003224 * @rx_ring: ring to free buffers from
3225 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003226static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003227{
Auke Kok9d5c8242008-01-24 02:22:38 -08003228 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003229 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003230
Alexander Duyck06034642011-08-26 07:44:22 +00003231 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003232 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003233
Auke Kok9d5c8242008-01-24 02:22:38 -08003234 /* Free all the Rx ring sk_buffs */
3235 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003236 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003237 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003238 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003239 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003240 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003241 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003242 buffer_info->dma = 0;
3243 }
3244
3245 if (buffer_info->skb) {
3246 dev_kfree_skb(buffer_info->skb);
3247 buffer_info->skb = NULL;
3248 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003249 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003250 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003251 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003252 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003253 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003254 buffer_info->page_dma = 0;
3255 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003256 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003257 put_page(buffer_info->page);
3258 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003259 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003260 }
3261 }
3262
Alexander Duyck06034642011-08-26 07:44:22 +00003263 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3264 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003265
3266 /* Zero out the descriptor ring */
3267 memset(rx_ring->desc, 0, rx_ring->size);
3268
3269 rx_ring->next_to_clean = 0;
3270 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003271}
3272
3273/**
3274 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3275 * @adapter: board private structure
3276 **/
3277static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3278{
3279 int i;
3280
3281 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003282 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003283}
3284
3285/**
3286 * igb_set_mac - Change the Ethernet Address of the NIC
3287 * @netdev: network interface device structure
3288 * @p: pointer to an address structure
3289 *
3290 * Returns 0 on success, negative on failure
3291 **/
3292static int igb_set_mac(struct net_device *netdev, void *p)
3293{
3294 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003295 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003296 struct sockaddr *addr = p;
3297
3298 if (!is_valid_ether_addr(addr->sa_data))
3299 return -EADDRNOTAVAIL;
3300
3301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003302 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003303
Alexander Duyck26ad9172009-10-05 06:32:49 +00003304 /* set the correct pool for the new PF MAC address in entry 0 */
3305 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3306 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003307
Auke Kok9d5c8242008-01-24 02:22:38 -08003308 return 0;
3309}
3310
3311/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003312 * igb_write_mc_addr_list - write multicast addresses to MTA
3313 * @netdev: network interface device structure
3314 *
3315 * Writes multicast address list to the MTA hash table.
3316 * Returns: -ENOMEM on failure
3317 * 0 on no addresses written
3318 * X on writing X addresses to MTA
3319 **/
3320static int igb_write_mc_addr_list(struct net_device *netdev)
3321{
3322 struct igb_adapter *adapter = netdev_priv(netdev);
3323 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003324 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003325 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003326 int i;
3327
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003328 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003329 /* nothing to program, so clear mc list */
3330 igb_update_mc_addr_list(hw, NULL, 0);
3331 igb_restore_vf_multicasts(adapter);
3332 return 0;
3333 }
3334
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003335 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003336 if (!mta_list)
3337 return -ENOMEM;
3338
Alexander Duyck68d480c2009-10-05 06:33:08 +00003339 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003340 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003341 netdev_for_each_mc_addr(ha, netdev)
3342 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003343
Alexander Duyck68d480c2009-10-05 06:33:08 +00003344 igb_update_mc_addr_list(hw, mta_list, i);
3345 kfree(mta_list);
3346
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003347 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003348}
3349
3350/**
3351 * igb_write_uc_addr_list - write unicast addresses to RAR table
3352 * @netdev: network interface device structure
3353 *
3354 * Writes unicast address list to the RAR table.
3355 * Returns: -ENOMEM on failure/insufficient address space
3356 * 0 on no addresses written
3357 * X on writing X addresses to the RAR table
3358 **/
3359static int igb_write_uc_addr_list(struct net_device *netdev)
3360{
3361 struct igb_adapter *adapter = netdev_priv(netdev);
3362 struct e1000_hw *hw = &adapter->hw;
3363 unsigned int vfn = adapter->vfs_allocated_count;
3364 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3365 int count = 0;
3366
3367 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003368 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003369 return -ENOMEM;
3370
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003371 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003372 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003373
3374 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003375 if (!rar_entries)
3376 break;
3377 igb_rar_set_qsel(adapter, ha->addr,
3378 rar_entries--,
3379 vfn);
3380 count++;
3381 }
3382 }
3383 /* write the addresses in reverse order to avoid write combining */
3384 for (; rar_entries > 0 ; rar_entries--) {
3385 wr32(E1000_RAH(rar_entries), 0);
3386 wr32(E1000_RAL(rar_entries), 0);
3387 }
3388 wrfl();
3389
3390 return count;
3391}
3392
3393/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003394 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003395 * @netdev: network interface device structure
3396 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003397 * The set_rx_mode entry point is called whenever the unicast or multicast
3398 * address lists or the network interface flags are updated. This routine is
3399 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003400 * promiscuous mode, and all-multi behavior.
3401 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003402static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003403{
3404 struct igb_adapter *adapter = netdev_priv(netdev);
3405 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003406 unsigned int vfn = adapter->vfs_allocated_count;
3407 u32 rctl, vmolr = 0;
3408 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003409
3410 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003411 rctl = rd32(E1000_RCTL);
3412
Alexander Duyck68d480c2009-10-05 06:33:08 +00003413 /* clear the effected bits */
3414 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3415
Patrick McHardy746b9f02008-07-16 20:15:45 -07003416 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003417 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003418 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003419 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003420 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003421 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003422 vmolr |= E1000_VMOLR_MPME;
3423 } else {
3424 /*
3425 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003426 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003427 * that we can at least receive multicast traffic
3428 */
3429 count = igb_write_mc_addr_list(netdev);
3430 if (count < 0) {
3431 rctl |= E1000_RCTL_MPE;
3432 vmolr |= E1000_VMOLR_MPME;
3433 } else if (count) {
3434 vmolr |= E1000_VMOLR_ROMPE;
3435 }
3436 }
3437 /*
3438 * Write addresses to available RAR registers, if there is not
3439 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003440 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003441 */
3442 count = igb_write_uc_addr_list(netdev);
3443 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003444 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003445 vmolr |= E1000_VMOLR_ROPE;
3446 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003447 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003448 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003449 wr32(E1000_RCTL, rctl);
3450
Alexander Duyck68d480c2009-10-05 06:33:08 +00003451 /*
3452 * In order to support SR-IOV and eventually VMDq it is necessary to set
3453 * the VMOLR to enable the appropriate modes. Without this workaround
3454 * we will have issues with VLAN tag stripping not being done for frames
3455 * that are only arriving because we are the default pool
3456 */
3457 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003458 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003459
Alexander Duyck68d480c2009-10-05 06:33:08 +00003460 vmolr |= rd32(E1000_VMOLR(vfn)) &
3461 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3462 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003463 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003464}
3465
Greg Rose13800462010-11-06 02:08:26 +00003466static void igb_check_wvbr(struct igb_adapter *adapter)
3467{
3468 struct e1000_hw *hw = &adapter->hw;
3469 u32 wvbr = 0;
3470
3471 switch (hw->mac.type) {
3472 case e1000_82576:
3473 case e1000_i350:
3474 if (!(wvbr = rd32(E1000_WVBR)))
3475 return;
3476 break;
3477 default:
3478 break;
3479 }
3480
3481 adapter->wvbr |= wvbr;
3482}
3483
3484#define IGB_STAGGERED_QUEUE_OFFSET 8
3485
3486static void igb_spoof_check(struct igb_adapter *adapter)
3487{
3488 int j;
3489
3490 if (!adapter->wvbr)
3491 return;
3492
3493 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3494 if (adapter->wvbr & (1 << j) ||
3495 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3496 dev_warn(&adapter->pdev->dev,
3497 "Spoof event(s) detected on VF %d\n", j);
3498 adapter->wvbr &=
3499 ~((1 << j) |
3500 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3501 }
3502 }
3503}
3504
Auke Kok9d5c8242008-01-24 02:22:38 -08003505/* Need to wait a few seconds after link up to get diagnostic information from
3506 * the phy */
3507static void igb_update_phy_info(unsigned long data)
3508{
3509 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003510 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003511}
3512
3513/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003514 * igb_has_link - check shared code for link and determine up/down
3515 * @adapter: pointer to driver private info
3516 **/
Nick Nunley31455352010-02-17 01:01:21 +00003517bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003518{
3519 struct e1000_hw *hw = &adapter->hw;
3520 bool link_active = false;
3521 s32 ret_val = 0;
3522
3523 /* get_link_status is set on LSC (link status) interrupt or
3524 * rx sequence error interrupt. get_link_status will stay
3525 * false until the e1000_check_for_link establishes link
3526 * for copper adapters ONLY
3527 */
3528 switch (hw->phy.media_type) {
3529 case e1000_media_type_copper:
3530 if (hw->mac.get_link_status) {
3531 ret_val = hw->mac.ops.check_for_link(hw);
3532 link_active = !hw->mac.get_link_status;
3533 } else {
3534 link_active = true;
3535 }
3536 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003537 case e1000_media_type_internal_serdes:
3538 ret_val = hw->mac.ops.check_for_link(hw);
3539 link_active = hw->mac.serdes_has_link;
3540 break;
3541 default:
3542 case e1000_media_type_unknown:
3543 break;
3544 }
3545
3546 return link_active;
3547}
3548
Stefan Assmann563988d2011-04-05 04:27:15 +00003549static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3550{
3551 bool ret = false;
3552 u32 ctrl_ext, thstat;
3553
3554 /* check for thermal sensor event on i350, copper only */
3555 if (hw->mac.type == e1000_i350) {
3556 thstat = rd32(E1000_THSTAT);
3557 ctrl_ext = rd32(E1000_CTRL_EXT);
3558
3559 if ((hw->phy.media_type == e1000_media_type_copper) &&
3560 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3561 ret = !!(thstat & event);
3562 }
3563 }
3564
3565 return ret;
3566}
3567
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003568/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003569 * igb_watchdog - Timer Call-back
3570 * @data: pointer to adapter cast into an unsigned long
3571 **/
3572static void igb_watchdog(unsigned long data)
3573{
3574 struct igb_adapter *adapter = (struct igb_adapter *)data;
3575 /* Do the rest outside of interrupt context */
3576 schedule_work(&adapter->watchdog_task);
3577}
3578
3579static void igb_watchdog_task(struct work_struct *work)
3580{
3581 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003582 struct igb_adapter,
3583 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003584 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003585 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003586 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003587 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003588
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003589 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003590 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003591 /* Cancel scheduled suspend requests. */
3592 pm_runtime_resume(netdev->dev.parent);
3593
Auke Kok9d5c8242008-01-24 02:22:38 -08003594 if (!netif_carrier_ok(netdev)) {
3595 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003596 hw->mac.ops.get_speed_and_duplex(hw,
3597 &adapter->link_speed,
3598 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003599
3600 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003601 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003602 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3603 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003604 netdev->name,
3605 adapter->link_speed,
3606 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003607 "Full" : "Half",
3608 (ctrl & E1000_CTRL_TFCE) &&
3609 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3610 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3611 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003612
Stefan Assmann563988d2011-04-05 04:27:15 +00003613 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003614 if (igb_thermal_sensor_event(hw,
3615 E1000_THSTAT_LINK_THROTTLE)) {
3616 netdev_info(netdev, "The network adapter link "
3617 "speed was downshifted because it "
3618 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003619 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003620
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003621 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003622 adapter->tx_timeout_factor = 1;
3623 switch (adapter->link_speed) {
3624 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003625 adapter->tx_timeout_factor = 14;
3626 break;
3627 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003628 /* maybe add some timeout factor ? */
3629 break;
3630 }
3631
3632 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003633
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003634 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003635 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003636
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003637 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003638 if (!test_bit(__IGB_DOWN, &adapter->state))
3639 mod_timer(&adapter->phy_info_timer,
3640 round_jiffies(jiffies + 2 * HZ));
3641 }
3642 } else {
3643 if (netif_carrier_ok(netdev)) {
3644 adapter->link_speed = 0;
3645 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003646
3647 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003648 if (igb_thermal_sensor_event(hw,
3649 E1000_THSTAT_PWR_DOWN)) {
3650 netdev_err(netdev, "The network adapter was "
3651 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003652 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003653
Alexander Duyck527d47c2008-11-27 00:21:39 -08003654 /* Links status message must follow this format */
3655 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3656 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003657 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003658
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003659 igb_ping_all_vfs(adapter);
3660
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003661 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003662 if (!test_bit(__IGB_DOWN, &adapter->state))
3663 mod_timer(&adapter->phy_info_timer,
3664 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003665
3666 pm_schedule_suspend(netdev->dev.parent,
3667 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003668 }
3669 }
3670
Eric Dumazet12dcd862010-10-15 17:27:10 +00003671 spin_lock(&adapter->stats64_lock);
3672 igb_update_stats(adapter, &adapter->stats64);
3673 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003674
Alexander Duyckdbabb062009-11-12 18:38:16 +00003675 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003676 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003677 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003678 /* We've lost link, so the controller stops DMA,
3679 * but we've got queued Tx work that's never going
3680 * to get done, so reset controller to flush Tx.
3681 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003682 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3683 adapter->tx_timeout_count++;
3684 schedule_work(&adapter->reset_task);
3685 /* return immediately since reset is imminent */
3686 return;
3687 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003688 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003689
Alexander Duyckdbabb062009-11-12 18:38:16 +00003690 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003691 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003692 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003693
Auke Kok9d5c8242008-01-24 02:22:38 -08003694 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003695 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003696 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003697 for (i = 0; i < adapter->num_q_vectors; i++)
3698 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003699 wr32(E1000_EICS, eics);
3700 } else {
3701 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3702 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003703
Greg Rose13800462010-11-06 02:08:26 +00003704 igb_spoof_check(adapter);
3705
Auke Kok9d5c8242008-01-24 02:22:38 -08003706 /* Reset the timer */
3707 if (!test_bit(__IGB_DOWN, &adapter->state))
3708 mod_timer(&adapter->watchdog_timer,
3709 round_jiffies(jiffies + 2 * HZ));
3710}
3711
3712enum latency_range {
3713 lowest_latency = 0,
3714 low_latency = 1,
3715 bulk_latency = 2,
3716 latency_invalid = 255
3717};
3718
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003719/**
3720 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3721 *
3722 * Stores a new ITR value based on strictly on packet size. This
3723 * algorithm is less sophisticated than that used in igb_update_itr,
3724 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003725 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003726 * were determined based on theoretical maximum wire speed and testing
3727 * data, in order to minimize response time while increasing bulk
3728 * throughput.
3729 * This functionality is controlled by the InterruptThrottleRate module
3730 * parameter (see igb_param.c)
3731 * NOTE: This function is called only when operating in a multiqueue
3732 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003733 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003734 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003735static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003736{
Alexander Duyck047e0032009-10-27 15:49:27 +00003737 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003738 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003739 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003740 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003741
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003742 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3743 * ints/sec - ITR timer value of 120 ticks.
3744 */
3745 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003746 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003747 goto set_itr_val;
3748 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003749
Alexander Duyck0ba82992011-08-26 07:45:47 +00003750 packets = q_vector->rx.total_packets;
3751 if (packets)
3752 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003753
Alexander Duyck0ba82992011-08-26 07:45:47 +00003754 packets = q_vector->tx.total_packets;
3755 if (packets)
3756 avg_wire_size = max_t(u32, avg_wire_size,
3757 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003758
3759 /* if avg_wire_size isn't set no work was done */
3760 if (!avg_wire_size)
3761 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003762
3763 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3764 avg_wire_size += 24;
3765
3766 /* Don't starve jumbo frames */
3767 avg_wire_size = min(avg_wire_size, 3000);
3768
3769 /* Give a little boost to mid-size frames */
3770 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3771 new_val = avg_wire_size / 3;
3772 else
3773 new_val = avg_wire_size / 2;
3774
Alexander Duyck0ba82992011-08-26 07:45:47 +00003775 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3776 if (new_val < IGB_20K_ITR &&
3777 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3778 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3779 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003780
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003781set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003782 if (new_val != q_vector->itr_val) {
3783 q_vector->itr_val = new_val;
3784 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003785 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003786clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003787 q_vector->rx.total_bytes = 0;
3788 q_vector->rx.total_packets = 0;
3789 q_vector->tx.total_bytes = 0;
3790 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003791}
3792
3793/**
3794 * igb_update_itr - update the dynamic ITR value based on statistics
3795 * Stores a new ITR value based on packets and byte
3796 * counts during the last interrupt. The advantage of per interrupt
3797 * computation is faster updates and more accurate ITR for the current
3798 * traffic pattern. Constants in this function were computed
3799 * based on theoretical maximum wire speed and thresholds were set based
3800 * on testing data as well as attempting to minimize response time
3801 * while increasing bulk throughput.
3802 * this functionality is controlled by the InterruptThrottleRate module
3803 * parameter (see igb_param.c)
3804 * NOTE: These calculations are only valid when operating in a single-
3805 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003806 * @q_vector: pointer to q_vector
3807 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003808 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003809static void igb_update_itr(struct igb_q_vector *q_vector,
3810 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003811{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003812 unsigned int packets = ring_container->total_packets;
3813 unsigned int bytes = ring_container->total_bytes;
3814 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003815
Alexander Duyck0ba82992011-08-26 07:45:47 +00003816 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003817 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003818 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003819
Alexander Duyck0ba82992011-08-26 07:45:47 +00003820 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003821 case lowest_latency:
3822 /* handle TSO and jumbo frames */
3823 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003824 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003825 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003826 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003827 break;
3828 case low_latency: /* 50 usec aka 20000 ints/s */
3829 if (bytes > 10000) {
3830 /* this if handles the TSO accounting */
3831 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003832 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003833 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003834 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003835 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003836 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003837 }
3838 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003839 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003840 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003841 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003842 }
3843 break;
3844 case bulk_latency: /* 250 usec aka 4000 ints/s */
3845 if (bytes > 25000) {
3846 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003847 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003848 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003849 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003850 }
3851 break;
3852 }
3853
Alexander Duyck0ba82992011-08-26 07:45:47 +00003854 /* clear work counters since we have the values we need */
3855 ring_container->total_bytes = 0;
3856 ring_container->total_packets = 0;
3857
3858 /* write updated itr to ring container */
3859 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003860}
3861
Alexander Duyck0ba82992011-08-26 07:45:47 +00003862static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003863{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003864 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003865 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003866 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003867
3868 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3869 if (adapter->link_speed != SPEED_1000) {
3870 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003871 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003872 goto set_itr_now;
3873 }
3874
Alexander Duyck0ba82992011-08-26 07:45:47 +00003875 igb_update_itr(q_vector, &q_vector->tx);
3876 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003877
Alexander Duyck0ba82992011-08-26 07:45:47 +00003878 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003879
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003880 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003881 if (current_itr == lowest_latency &&
3882 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3883 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003884 current_itr = low_latency;
3885
Auke Kok9d5c8242008-01-24 02:22:38 -08003886 switch (current_itr) {
3887 /* counts and packets in update_itr are dependent on these numbers */
3888 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003889 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003890 break;
3891 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003892 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003893 break;
3894 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003895 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003896 break;
3897 default:
3898 break;
3899 }
3900
3901set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003902 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003903 /* this attempts to bias the interrupt rate towards Bulk
3904 * by adding intermediate steps when interrupt rate is
3905 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003906 new_itr = new_itr > q_vector->itr_val ?
3907 max((new_itr * q_vector->itr_val) /
3908 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 new_itr;
3911 /* Don't write the value here; it resets the adapter's
3912 * internal timer, and causes us to delay far longer than
3913 * we should between interrupts. Instead, we write the ITR
3914 * value at the beginning of the next interrupt so the timing
3915 * ends up being correct.
3916 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003917 q_vector->itr_val = new_itr;
3918 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003919 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003920}
3921
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00003922static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3923 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003924{
3925 struct e1000_adv_tx_context_desc *context_desc;
3926 u16 i = tx_ring->next_to_use;
3927
3928 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3929
3930 i++;
3931 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3932
3933 /* set bits to identify this as an advanced context descriptor */
3934 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3935
3936 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00003937 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003938 mss_l4len_idx |= tx_ring->reg_idx << 4;
3939
3940 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3941 context_desc->seqnum_seed = 0;
3942 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3943 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3944}
3945
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003946static int igb_tso(struct igb_ring *tx_ring,
3947 struct igb_tx_buffer *first,
3948 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003949{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003950 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003951 u32 vlan_macip_lens, type_tucmd;
3952 u32 mss_l4len_idx, l4len;
3953
3954 if (!skb_is_gso(skb))
3955 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003956
3957 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003958 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003959 if (err)
3960 return err;
3961 }
3962
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003963 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3964 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08003965
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003966 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003967 struct iphdr *iph = ip_hdr(skb);
3968 iph->tot_len = 0;
3969 iph->check = 0;
3970 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3971 iph->daddr, 0,
3972 IPPROTO_TCP,
3973 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003974 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003975 first->tx_flags |= IGB_TX_FLAGS_TSO |
3976 IGB_TX_FLAGS_CSUM |
3977 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003978 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003979 ipv6_hdr(skb)->payload_len = 0;
3980 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3981 &ipv6_hdr(skb)->daddr,
3982 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003983 first->tx_flags |= IGB_TX_FLAGS_TSO |
3984 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08003985 }
3986
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003987 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003988 l4len = tcp_hdrlen(skb);
3989 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003990
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003991 /* update gso size and bytecount with header size */
3992 first->gso_segs = skb_shinfo(skb)->gso_segs;
3993 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3994
Auke Kok9d5c8242008-01-24 02:22:38 -08003995 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003996 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
3997 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08003998
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003999 /* VLAN MACLEN IPLEN */
4000 vlan_macip_lens = skb_network_header_len(skb);
4001 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004002 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004003
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004004 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004005
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004006 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004007}
4008
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004009static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004010{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004011 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004012 u32 vlan_macip_lens = 0;
4013 u32 mss_l4len_idx = 0;
4014 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004015
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004016 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004017 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4018 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004019 } else {
4020 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004021 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004022 case __constant_htons(ETH_P_IP):
4023 vlan_macip_lens |= skb_network_header_len(skb);
4024 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4025 l4_hdr = ip_hdr(skb)->protocol;
4026 break;
4027 case __constant_htons(ETH_P_IPV6):
4028 vlan_macip_lens |= skb_network_header_len(skb);
4029 l4_hdr = ipv6_hdr(skb)->nexthdr;
4030 break;
4031 default:
4032 if (unlikely(net_ratelimit())) {
4033 dev_warn(tx_ring->dev,
4034 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004035 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004036 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004037 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004038 }
4039
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004040 switch (l4_hdr) {
4041 case IPPROTO_TCP:
4042 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4043 mss_l4len_idx = tcp_hdrlen(skb) <<
4044 E1000_ADVTXD_L4LEN_SHIFT;
4045 break;
4046 case IPPROTO_SCTP:
4047 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4048 mss_l4len_idx = sizeof(struct sctphdr) <<
4049 E1000_ADVTXD_L4LEN_SHIFT;
4050 break;
4051 case IPPROTO_UDP:
4052 mss_l4len_idx = sizeof(struct udphdr) <<
4053 E1000_ADVTXD_L4LEN_SHIFT;
4054 break;
4055 default:
4056 if (unlikely(net_ratelimit())) {
4057 dev_warn(tx_ring->dev,
4058 "partial checksum but l4 proto=%x!\n",
4059 l4_hdr);
4060 }
4061 break;
4062 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004063
4064 /* update TX checksum flag */
4065 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004066 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004067
4068 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004069 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004070
4071 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004072}
4073
Alexander Duycke032afc2011-08-26 07:44:48 +00004074static __le32 igb_tx_cmd_type(u32 tx_flags)
4075{
4076 /* set type for advanced descriptor with frame checksum insertion */
4077 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4078 E1000_ADVTXD_DCMD_IFCS |
4079 E1000_ADVTXD_DCMD_DEXT);
4080
4081 /* set HW vlan bit if vlan is present */
4082 if (tx_flags & IGB_TX_FLAGS_VLAN)
4083 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4084
4085 /* set timestamp bit if present */
4086 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4087 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4088
4089 /* set segmentation bits for TSO */
4090 if (tx_flags & IGB_TX_FLAGS_TSO)
4091 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4092
4093 return cmd_type;
4094}
4095
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004096static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4097 union e1000_adv_tx_desc *tx_desc,
4098 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004099{
4100 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4101
4102 /* 82575 requires a unique index per ring if any offload is enabled */
4103 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004104 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004105 olinfo_status |= tx_ring->reg_idx << 4;
4106
4107 /* insert L4 checksum */
4108 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4109 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4110
4111 /* insert IPv4 checksum */
4112 if (tx_flags & IGB_TX_FLAGS_IPV4)
4113 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4114 }
4115
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004116 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004117}
4118
Alexander Duyckebe42d12011-08-26 07:45:09 +00004119/*
4120 * The largest size we can write to the descriptor is 65535. In order to
4121 * maintain a power of two alignment we have to limit ourselves to 32K.
4122 */
4123#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004124#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004125
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004126static void igb_tx_map(struct igb_ring *tx_ring,
4127 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004128 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004129{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004130 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004131 struct igb_tx_buffer *tx_buffer_info;
4132 union e1000_adv_tx_desc *tx_desc;
4133 dma_addr_t dma;
4134 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4135 unsigned int data_len = skb->data_len;
4136 unsigned int size = skb_headlen(skb);
4137 unsigned int paylen = skb->len - hdr_len;
4138 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004139 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004140 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004141
4142 tx_desc = IGB_TX_DESC(tx_ring, i);
4143
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004144 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004145 cmd_type = igb_tx_cmd_type(tx_flags);
4146
4147 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4148 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004149 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004150
Alexander Duyckebe42d12011-08-26 07:45:09 +00004151 /* record length, and DMA address */
4152 first->length = size;
4153 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004154 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004155
Alexander Duyckebe42d12011-08-26 07:45:09 +00004156 for (;;) {
4157 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4158 tx_desc->read.cmd_type_len =
4159 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004160
Alexander Duyckebe42d12011-08-26 07:45:09 +00004161 i++;
4162 tx_desc++;
4163 if (i == tx_ring->count) {
4164 tx_desc = IGB_TX_DESC(tx_ring, 0);
4165 i = 0;
4166 }
4167
4168 dma += IGB_MAX_DATA_PER_TXD;
4169 size -= IGB_MAX_DATA_PER_TXD;
4170
4171 tx_desc->read.olinfo_status = 0;
4172 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4173 }
4174
4175 if (likely(!data_len))
4176 break;
4177
4178 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4179
Alexander Duyck65689fe2009-03-20 00:17:43 +00004180 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004181 tx_desc++;
4182 if (i == tx_ring->count) {
4183 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004184 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004185 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004186
Eric Dumazet9e903e02011-10-18 21:00:24 +00004187 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004188 data_len -= size;
4189
4190 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4191 size, DMA_TO_DEVICE);
4192 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004193 goto dma_error;
4194
Alexander Duyckebe42d12011-08-26 07:45:09 +00004195 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4196 tx_buffer_info->length = size;
4197 tx_buffer_info->dma = dma;
4198
4199 tx_desc->read.olinfo_status = 0;
4200 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4201
4202 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004203 }
4204
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004205 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4206
Alexander Duyckebe42d12011-08-26 07:45:09 +00004207 /* write last descriptor with RS and EOP bits */
4208 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
Ben Greear6b8f0922012-03-06 09:41:53 +00004209 if (unlikely(skb->no_fcs))
4210 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
Alexander Duyckebe42d12011-08-26 07:45:09 +00004211 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004212
4213 /* set the timestamp */
4214 first->time_stamp = jiffies;
4215
Alexander Duyckebe42d12011-08-26 07:45:09 +00004216 /*
4217 * Force memory writes to complete before letting h/w know there
4218 * are new descriptors to fetch. (Only applicable for weak-ordered
4219 * memory model archs, such as IA-64).
4220 *
4221 * We also need this memory barrier to make certain all of the
4222 * status bits have been updated before next_to_watch is written.
4223 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004224 wmb();
4225
Alexander Duyckebe42d12011-08-26 07:45:09 +00004226 /* set next_to_watch value indicating a packet is present */
4227 first->next_to_watch = tx_desc;
4228
4229 i++;
4230 if (i == tx_ring->count)
4231 i = 0;
4232
Auke Kok9d5c8242008-01-24 02:22:38 -08004233 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004234
Alexander Duyckfce99e32009-10-27 15:51:27 +00004235 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004236
Auke Kok9d5c8242008-01-24 02:22:38 -08004237 /* we need this if more than one processor can write to our tail
4238 * at a time, it syncronizes IO on IA64/Altix systems */
4239 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004240
4241 return;
4242
4243dma_error:
4244 dev_err(tx_ring->dev, "TX DMA map failed\n");
4245
4246 /* clear dma mappings for failed tx_buffer_info map */
4247 for (;;) {
4248 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4249 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4250 if (tx_buffer_info == first)
4251 break;
4252 if (i == 0)
4253 i = tx_ring->count;
4254 i--;
4255 }
4256
4257 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004258}
4259
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004260static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004261{
Alexander Duycke694e962009-10-27 15:53:06 +00004262 struct net_device *netdev = tx_ring->netdev;
4263
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004264 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004265
Auke Kok9d5c8242008-01-24 02:22:38 -08004266 /* Herbert's original patch had:
4267 * smp_mb__after_netif_stop_queue();
4268 * but since that doesn't exist yet, just open code it. */
4269 smp_mb();
4270
4271 /* We need to check again in a case another CPU has just
4272 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004273 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004274 return -EBUSY;
4275
4276 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004277 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004278
4279 u64_stats_update_begin(&tx_ring->tx_syncp2);
4280 tx_ring->tx_stats.restart_queue2++;
4281 u64_stats_update_end(&tx_ring->tx_syncp2);
4282
Auke Kok9d5c8242008-01-24 02:22:38 -08004283 return 0;
4284}
4285
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004286static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004287{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004288 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004289 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004290 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004291}
4292
Alexander Duyckcd392f52011-08-26 07:43:59 +00004293netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4294 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004295{
Alexander Duyck8542db02011-08-26 07:44:43 +00004296 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004297 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004298 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004299 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004300 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004301
Auke Kok9d5c8242008-01-24 02:22:38 -08004302 /* need: 1 descriptor per page,
4303 * + 2 desc gap to keep tail from touching head,
4304 * + 1 desc for skb->data,
4305 * + 1 desc for context descriptor,
4306 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004307 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004308 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004309 return NETDEV_TX_BUSY;
4310 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004311
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004312 /* record the location of the first descriptor for this packet */
4313 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4314 first->skb = skb;
4315 first->bytecount = skb->len;
4316 first->gso_segs = 1;
4317
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004318 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4319 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004320 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004321 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004322
Jesse Grosseab6d182010-10-20 13:56:03 +00004323 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004324 tx_flags |= IGB_TX_FLAGS_VLAN;
4325 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4326 }
4327
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004328 /* record initial flags and protocol */
4329 first->tx_flags = tx_flags;
4330 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004331
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004332 tso = igb_tso(tx_ring, first, &hdr_len);
4333 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004334 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004335 else if (!tso)
4336 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004337
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004338 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004339
4340 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004341 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004342
Auke Kok9d5c8242008-01-24 02:22:38 -08004343 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004344
4345out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004346 igb_unmap_and_free_tx_resource(tx_ring, first);
4347
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004348 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004349}
4350
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004351static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4352 struct sk_buff *skb)
4353{
4354 unsigned int r_idx = skb->queue_mapping;
4355
4356 if (r_idx >= adapter->num_tx_queues)
4357 r_idx = r_idx % adapter->num_tx_queues;
4358
4359 return adapter->tx_ring[r_idx];
4360}
4361
Alexander Duyckcd392f52011-08-26 07:43:59 +00004362static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4363 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004364{
4365 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004366
4367 if (test_bit(__IGB_DOWN, &adapter->state)) {
4368 dev_kfree_skb_any(skb);
4369 return NETDEV_TX_OK;
4370 }
4371
4372 if (skb->len <= 0) {
4373 dev_kfree_skb_any(skb);
4374 return NETDEV_TX_OK;
4375 }
4376
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004377 /*
4378 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4379 * in order to meet this minimum size requirement.
4380 */
4381 if (skb->len < 17) {
4382 if (skb_padto(skb, 17))
4383 return NETDEV_TX_OK;
4384 skb->len = 17;
4385 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004386
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004387 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004388}
4389
4390/**
4391 * igb_tx_timeout - Respond to a Tx Hang
4392 * @netdev: network interface device structure
4393 **/
4394static void igb_tx_timeout(struct net_device *netdev)
4395{
4396 struct igb_adapter *adapter = netdev_priv(netdev);
4397 struct e1000_hw *hw = &adapter->hw;
4398
4399 /* Do the reset outside of interrupt context */
4400 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004401
Alexander Duyck06218a82011-08-26 07:46:55 +00004402 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004403 hw->dev_spec._82575.global_device_reset = true;
4404
Auke Kok9d5c8242008-01-24 02:22:38 -08004405 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004406 wr32(E1000_EICS,
4407 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004408}
4409
4410static void igb_reset_task(struct work_struct *work)
4411{
4412 struct igb_adapter *adapter;
4413 adapter = container_of(work, struct igb_adapter, reset_task);
4414
Taku Izumic97ec422010-04-27 14:39:30 +00004415 igb_dump(adapter);
4416 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004417 igb_reinit_locked(adapter);
4418}
4419
4420/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004421 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004422 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004423 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004424 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004425 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004426static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4427 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004428{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004429 struct igb_adapter *adapter = netdev_priv(netdev);
4430
4431 spin_lock(&adapter->stats64_lock);
4432 igb_update_stats(adapter, &adapter->stats64);
4433 memcpy(stats, &adapter->stats64, sizeof(*stats));
4434 spin_unlock(&adapter->stats64_lock);
4435
4436 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004437}
4438
4439/**
4440 * igb_change_mtu - Change the Maximum Transfer Unit
4441 * @netdev: network interface device structure
4442 * @new_mtu: new value for maximum frame size
4443 *
4444 * Returns 0 on success, negative on failure
4445 **/
4446static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4447{
4448 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004449 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004450 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004451
Alexander Duyckc809d222009-10-27 23:52:13 +00004452 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004453 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004454 return -EINVAL;
4455 }
4456
Alexander Duyck153285f2011-08-26 07:43:32 +00004457#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004458 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004459 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004460 return -EINVAL;
4461 }
4462
4463 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4464 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004465
Auke Kok9d5c8242008-01-24 02:22:38 -08004466 /* igb_down has a dependency on max_frame_size */
4467 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004468
Alexander Duyck4c844852009-10-27 15:52:07 +00004469 if (netif_running(netdev))
4470 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004471
Alexander Duyck090b1792009-10-27 23:51:55 +00004472 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004473 netdev->mtu, new_mtu);
4474 netdev->mtu = new_mtu;
4475
4476 if (netif_running(netdev))
4477 igb_up(adapter);
4478 else
4479 igb_reset(adapter);
4480
4481 clear_bit(__IGB_RESETTING, &adapter->state);
4482
4483 return 0;
4484}
4485
4486/**
4487 * igb_update_stats - Update the board statistics counters
4488 * @adapter: board private structure
4489 **/
4490
Eric Dumazet12dcd862010-10-15 17:27:10 +00004491void igb_update_stats(struct igb_adapter *adapter,
4492 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004493{
4494 struct e1000_hw *hw = &adapter->hw;
4495 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004496 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004497 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004498 int i;
4499 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004500 unsigned int start;
4501 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004502
4503#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4504
4505 /*
4506 * Prevent stats update while adapter is being reset, or if the pci
4507 * connection is down.
4508 */
4509 if (adapter->link_speed == 0)
4510 return;
4511 if (pci_channel_offline(pdev))
4512 return;
4513
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004514 bytes = 0;
4515 packets = 0;
4516 for (i = 0; i < adapter->num_rx_queues; i++) {
4517 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004518 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004519
Alexander Duyck3025a442010-02-17 01:02:39 +00004520 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004521 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004522
4523 do {
4524 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4525 _bytes = ring->rx_stats.bytes;
4526 _packets = ring->rx_stats.packets;
4527 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4528 bytes += _bytes;
4529 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004530 }
4531
Alexander Duyck128e45e2009-11-12 18:37:38 +00004532 net_stats->rx_bytes = bytes;
4533 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004534
4535 bytes = 0;
4536 packets = 0;
4537 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004538 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004539 do {
4540 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4541 _bytes = ring->tx_stats.bytes;
4542 _packets = ring->tx_stats.packets;
4543 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4544 bytes += _bytes;
4545 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004546 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004547 net_stats->tx_bytes = bytes;
4548 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004549
4550 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004551 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4552 adapter->stats.gprc += rd32(E1000_GPRC);
4553 adapter->stats.gorc += rd32(E1000_GORCL);
4554 rd32(E1000_GORCH); /* clear GORCL */
4555 adapter->stats.bprc += rd32(E1000_BPRC);
4556 adapter->stats.mprc += rd32(E1000_MPRC);
4557 adapter->stats.roc += rd32(E1000_ROC);
4558
4559 adapter->stats.prc64 += rd32(E1000_PRC64);
4560 adapter->stats.prc127 += rd32(E1000_PRC127);
4561 adapter->stats.prc255 += rd32(E1000_PRC255);
4562 adapter->stats.prc511 += rd32(E1000_PRC511);
4563 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4564 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4565 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4566 adapter->stats.sec += rd32(E1000_SEC);
4567
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004568 mpc = rd32(E1000_MPC);
4569 adapter->stats.mpc += mpc;
4570 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004571 adapter->stats.scc += rd32(E1000_SCC);
4572 adapter->stats.ecol += rd32(E1000_ECOL);
4573 adapter->stats.mcc += rd32(E1000_MCC);
4574 adapter->stats.latecol += rd32(E1000_LATECOL);
4575 adapter->stats.dc += rd32(E1000_DC);
4576 adapter->stats.rlec += rd32(E1000_RLEC);
4577 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4578 adapter->stats.xontxc += rd32(E1000_XONTXC);
4579 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4580 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4581 adapter->stats.fcruc += rd32(E1000_FCRUC);
4582 adapter->stats.gptc += rd32(E1000_GPTC);
4583 adapter->stats.gotc += rd32(E1000_GOTCL);
4584 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004585 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004586 adapter->stats.ruc += rd32(E1000_RUC);
4587 adapter->stats.rfc += rd32(E1000_RFC);
4588 adapter->stats.rjc += rd32(E1000_RJC);
4589 adapter->stats.tor += rd32(E1000_TORH);
4590 adapter->stats.tot += rd32(E1000_TOTH);
4591 adapter->stats.tpr += rd32(E1000_TPR);
4592
4593 adapter->stats.ptc64 += rd32(E1000_PTC64);
4594 adapter->stats.ptc127 += rd32(E1000_PTC127);
4595 adapter->stats.ptc255 += rd32(E1000_PTC255);
4596 adapter->stats.ptc511 += rd32(E1000_PTC511);
4597 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4598 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4599
4600 adapter->stats.mptc += rd32(E1000_MPTC);
4601 adapter->stats.bptc += rd32(E1000_BPTC);
4602
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004603 adapter->stats.tpt += rd32(E1000_TPT);
4604 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004605
4606 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004607 /* read internal phy specific stats */
4608 reg = rd32(E1000_CTRL_EXT);
4609 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4610 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4611 adapter->stats.tncrs += rd32(E1000_TNCRS);
4612 }
4613
Auke Kok9d5c8242008-01-24 02:22:38 -08004614 adapter->stats.tsctc += rd32(E1000_TSCTC);
4615 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4616
4617 adapter->stats.iac += rd32(E1000_IAC);
4618 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4619 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4620 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4621 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4622 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4623 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4624 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4625 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4626
4627 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004628 net_stats->multicast = adapter->stats.mprc;
4629 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004630
4631 /* Rx Errors */
4632
4633 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004634 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004635 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004636 adapter->stats.crcerrs + adapter->stats.algnerrc +
4637 adapter->stats.ruc + adapter->stats.roc +
4638 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004639 net_stats->rx_length_errors = adapter->stats.ruc +
4640 adapter->stats.roc;
4641 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4642 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4643 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004644
4645 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004646 net_stats->tx_errors = adapter->stats.ecol +
4647 adapter->stats.latecol;
4648 net_stats->tx_aborted_errors = adapter->stats.ecol;
4649 net_stats->tx_window_errors = adapter->stats.latecol;
4650 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004651
4652 /* Tx Dropped needs to be maintained elsewhere */
4653
4654 /* Phy Stats */
4655 if (hw->phy.media_type == e1000_media_type_copper) {
4656 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004657 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004658 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4659 adapter->phy_stats.idle_errors += phy_tmp;
4660 }
4661 }
4662
4663 /* Management Stats */
4664 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4665 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4666 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004667
4668 /* OS2BMC Stats */
4669 reg = rd32(E1000_MANC);
4670 if (reg & E1000_MANC_EN_BMC2OS) {
4671 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4672 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4673 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4674 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4675 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004676}
4677
Auke Kok9d5c8242008-01-24 02:22:38 -08004678static irqreturn_t igb_msix_other(int irq, void *data)
4679{
Alexander Duyck047e0032009-10-27 15:49:27 +00004680 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004681 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004682 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004683 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004684
Alexander Duyck7f081d42010-01-07 17:41:00 +00004685 if (icr & E1000_ICR_DRSTA)
4686 schedule_work(&adapter->reset_task);
4687
Alexander Duyck047e0032009-10-27 15:49:27 +00004688 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004689 /* HW is reporting DMA is out of sync */
4690 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004691 /* The DMA Out of Sync is also indication of a spoof event
4692 * in IOV mode. Check the Wrong VM Behavior register to
4693 * see if it is really a spoof event. */
4694 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004695 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004696
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004697 /* Check for a mailbox event */
4698 if (icr & E1000_ICR_VMMB)
4699 igb_msg_task(adapter);
4700
4701 if (icr & E1000_ICR_LSC) {
4702 hw->mac.get_link_status = 1;
4703 /* guard against interrupt when we're going down */
4704 if (!test_bit(__IGB_DOWN, &adapter->state))
4705 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4706 }
4707
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004708 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004709
4710 return IRQ_HANDLED;
4711}
4712
Alexander Duyck047e0032009-10-27 15:49:27 +00004713static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004714{
Alexander Duyck26b39272010-02-17 01:00:41 +00004715 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004716 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004717
Alexander Duyck047e0032009-10-27 15:49:27 +00004718 if (!q_vector->set_itr)
4719 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004720
Alexander Duyck047e0032009-10-27 15:49:27 +00004721 if (!itr_val)
4722 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004723
Alexander Duyck26b39272010-02-17 01:00:41 +00004724 if (adapter->hw.mac.type == e1000_82575)
4725 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004726 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004727 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004728
4729 writel(itr_val, q_vector->itr_register);
4730 q_vector->set_itr = 0;
4731}
4732
4733static irqreturn_t igb_msix_ring(int irq, void *data)
4734{
4735 struct igb_q_vector *q_vector = data;
4736
4737 /* Write the ITR value calculated from the previous interrupt. */
4738 igb_write_itr(q_vector);
4739
4740 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004741
Auke Kok9d5c8242008-01-24 02:22:38 -08004742 return IRQ_HANDLED;
4743}
4744
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004745#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004746static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004747{
Alexander Duyck047e0032009-10-27 15:49:27 +00004748 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004749 struct e1000_hw *hw = &adapter->hw;
4750 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004751
Alexander Duyck047e0032009-10-27 15:49:27 +00004752 if (q_vector->cpu == cpu)
4753 goto out_no_update;
4754
Alexander Duyck0ba82992011-08-26 07:45:47 +00004755 if (q_vector->tx.ring) {
4756 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004757 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4758 if (hw->mac.type == e1000_82575) {
4759 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4760 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4761 } else {
4762 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4763 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4764 E1000_DCA_TXCTRL_CPUID_SHIFT;
4765 }
4766 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4767 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4768 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004769 if (q_vector->rx.ring) {
4770 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004771 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4772 if (hw->mac.type == e1000_82575) {
4773 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4774 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4775 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004776 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004777 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004778 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004779 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004780 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4781 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4782 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4783 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004784 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004785 q_vector->cpu = cpu;
4786out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004787 put_cpu();
4788}
4789
4790static void igb_setup_dca(struct igb_adapter *adapter)
4791{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004792 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004793 int i;
4794
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004795 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004796 return;
4797
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004798 /* Always use CB2 mode, difference is masked in the CB driver. */
4799 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4800
Alexander Duyck047e0032009-10-27 15:49:27 +00004801 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004802 adapter->q_vector[i]->cpu = -1;
4803 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004804 }
4805}
4806
4807static int __igb_notify_dca(struct device *dev, void *data)
4808{
4809 struct net_device *netdev = dev_get_drvdata(dev);
4810 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004811 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004812 struct e1000_hw *hw = &adapter->hw;
4813 unsigned long event = *(unsigned long *)data;
4814
4815 switch (event) {
4816 case DCA_PROVIDER_ADD:
4817 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004818 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004819 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004820 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004821 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004822 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004823 igb_setup_dca(adapter);
4824 break;
4825 }
4826 /* Fall Through since DCA is disabled. */
4827 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004828 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004829 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004830 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004831 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004832 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004833 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004834 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004835 }
4836 break;
4837 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004838
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004839 return 0;
4840}
4841
4842static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4843 void *p)
4844{
4845 int ret_val;
4846
4847 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4848 __igb_notify_dca);
4849
4850 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4851}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004852#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004853
Greg Rose0224d662011-10-14 02:57:14 +00004854#ifdef CONFIG_PCI_IOV
4855static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4856{
4857 unsigned char mac_addr[ETH_ALEN];
4858 struct pci_dev *pdev = adapter->pdev;
4859 struct e1000_hw *hw = &adapter->hw;
4860 struct pci_dev *pvfdev;
4861 unsigned int device_id;
4862 u16 thisvf_devfn;
4863
4864 random_ether_addr(mac_addr);
4865 igb_set_vf_mac(adapter, vf, mac_addr);
4866
4867 switch (adapter->hw.mac.type) {
4868 case e1000_82576:
4869 device_id = IGB_82576_VF_DEV_ID;
4870 /* VF Stride for 82576 is 2 */
4871 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4872 (pdev->devfn & 1);
4873 break;
4874 case e1000_i350:
4875 device_id = IGB_I350_VF_DEV_ID;
4876 /* VF Stride for I350 is 4 */
4877 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4878 (pdev->devfn & 3);
4879 break;
4880 default:
4881 device_id = 0;
4882 thisvf_devfn = 0;
4883 break;
4884 }
4885
4886 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4887 while (pvfdev) {
4888 if (pvfdev->devfn == thisvf_devfn)
4889 break;
4890 pvfdev = pci_get_device(hw->vendor_id,
4891 device_id, pvfdev);
4892 }
4893
4894 if (pvfdev)
4895 adapter->vf_data[vf].vfdev = pvfdev;
4896 else
4897 dev_err(&pdev->dev,
4898 "Couldn't find pci dev ptr for VF %4.4x\n",
4899 thisvf_devfn);
4900 return pvfdev != NULL;
4901}
4902
4903static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4904{
4905 struct e1000_hw *hw = &adapter->hw;
4906 struct pci_dev *pdev = adapter->pdev;
4907 struct pci_dev *pvfdev;
4908 u16 vf_devfn = 0;
4909 u16 vf_stride;
4910 unsigned int device_id;
4911 int vfs_found = 0;
4912
4913 switch (adapter->hw.mac.type) {
4914 case e1000_82576:
4915 device_id = IGB_82576_VF_DEV_ID;
4916 /* VF Stride for 82576 is 2 */
4917 vf_stride = 2;
4918 break;
4919 case e1000_i350:
4920 device_id = IGB_I350_VF_DEV_ID;
4921 /* VF Stride for I350 is 4 */
4922 vf_stride = 4;
4923 break;
4924 default:
4925 device_id = 0;
4926 vf_stride = 0;
4927 break;
4928 }
4929
4930 vf_devfn = pdev->devfn + 0x80;
4931 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4932 while (pvfdev) {
Greg Rose06292922012-02-02 23:51:43 +00004933 if (pvfdev->devfn == vf_devfn &&
4934 (pvfdev->bus->number >= pdev->bus->number))
Greg Rose0224d662011-10-14 02:57:14 +00004935 vfs_found++;
4936 vf_devfn += vf_stride;
4937 pvfdev = pci_get_device(hw->vendor_id,
4938 device_id, pvfdev);
4939 }
4940
4941 return vfs_found;
4942}
4943
4944static int igb_check_vf_assignment(struct igb_adapter *adapter)
4945{
4946 int i;
4947 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4948 if (adapter->vf_data[i].vfdev) {
4949 if (adapter->vf_data[i].vfdev->dev_flags &
4950 PCI_DEV_FLAGS_ASSIGNED)
4951 return true;
4952 }
4953 }
4954 return false;
4955}
4956
4957#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004958static void igb_ping_all_vfs(struct igb_adapter *adapter)
4959{
4960 struct e1000_hw *hw = &adapter->hw;
4961 u32 ping;
4962 int i;
4963
4964 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4965 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004966 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004967 ping |= E1000_VT_MSGTYPE_CTS;
4968 igb_write_mbx(hw, &ping, 1, i);
4969 }
4970}
4971
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004972static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4973{
4974 struct e1000_hw *hw = &adapter->hw;
4975 u32 vmolr = rd32(E1000_VMOLR(vf));
4976 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4977
Alexander Duyckd85b90042010-09-22 17:56:20 +00004978 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004979 IGB_VF_FLAG_MULTI_PROMISC);
4980 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4981
4982 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4983 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004984 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004985 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4986 } else {
4987 /*
4988 * if we have hashes and we are clearing a multicast promisc
4989 * flag we need to write the hashes to the MTA as this step
4990 * was previously skipped
4991 */
4992 if (vf_data->num_vf_mc_hashes > 30) {
4993 vmolr |= E1000_VMOLR_MPME;
4994 } else if (vf_data->num_vf_mc_hashes) {
4995 int j;
4996 vmolr |= E1000_VMOLR_ROMPE;
4997 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4998 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4999 }
5000 }
5001
5002 wr32(E1000_VMOLR(vf), vmolr);
5003
5004 /* there are flags left unprocessed, likely not supported */
5005 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5006 return -EINVAL;
5007
5008 return 0;
5009
5010}
5011
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005012static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5013 u32 *msgbuf, u32 vf)
5014{
5015 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5016 u16 *hash_list = (u16 *)&msgbuf[1];
5017 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5018 int i;
5019
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005020 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005021 * to this VF for later use to restore when the PF multi cast
5022 * list changes
5023 */
5024 vf_data->num_vf_mc_hashes = n;
5025
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005026 /* only up to 30 hash values supported */
5027 if (n > 30)
5028 n = 30;
5029
5030 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005031 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005032 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005033
5034 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005035 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005036
5037 return 0;
5038}
5039
5040static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5041{
5042 struct e1000_hw *hw = &adapter->hw;
5043 struct vf_data_storage *vf_data;
5044 int i, j;
5045
5046 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005047 u32 vmolr = rd32(E1000_VMOLR(i));
5048 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5049
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005050 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005051
5052 if ((vf_data->num_vf_mc_hashes > 30) ||
5053 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5054 vmolr |= E1000_VMOLR_MPME;
5055 } else if (vf_data->num_vf_mc_hashes) {
5056 vmolr |= E1000_VMOLR_ROMPE;
5057 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5058 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5059 }
5060 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005061 }
5062}
5063
5064static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5065{
5066 struct e1000_hw *hw = &adapter->hw;
5067 u32 pool_mask, reg, vid;
5068 int i;
5069
5070 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5071
5072 /* Find the vlan filter for this id */
5073 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5074 reg = rd32(E1000_VLVF(i));
5075
5076 /* remove the vf from the pool */
5077 reg &= ~pool_mask;
5078
5079 /* if pool is empty then remove entry from vfta */
5080 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5081 (reg & E1000_VLVF_VLANID_ENABLE)) {
5082 reg = 0;
5083 vid = reg & E1000_VLVF_VLANID_MASK;
5084 igb_vfta_set(hw, vid, false);
5085 }
5086
5087 wr32(E1000_VLVF(i), reg);
5088 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005089
5090 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005091}
5092
5093static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5094{
5095 struct e1000_hw *hw = &adapter->hw;
5096 u32 reg, i;
5097
Alexander Duyck51466232009-10-27 23:47:35 +00005098 /* The vlvf table only exists on 82576 hardware and newer */
5099 if (hw->mac.type < e1000_82576)
5100 return -1;
5101
5102 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005103 if (!adapter->vfs_allocated_count)
5104 return -1;
5105
5106 /* Find the vlan filter for this id */
5107 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5108 reg = rd32(E1000_VLVF(i));
5109 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5110 vid == (reg & E1000_VLVF_VLANID_MASK))
5111 break;
5112 }
5113
5114 if (add) {
5115 if (i == E1000_VLVF_ARRAY_SIZE) {
5116 /* Did not find a matching VLAN ID entry that was
5117 * enabled. Search for a free filter entry, i.e.
5118 * one without the enable bit set
5119 */
5120 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5121 reg = rd32(E1000_VLVF(i));
5122 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5123 break;
5124 }
5125 }
5126 if (i < E1000_VLVF_ARRAY_SIZE) {
5127 /* Found an enabled/available entry */
5128 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5129
5130 /* if !enabled we need to set this up in vfta */
5131 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005132 /* add VID to filter table */
5133 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005134 reg |= E1000_VLVF_VLANID_ENABLE;
5135 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005136 reg &= ~E1000_VLVF_VLANID_MASK;
5137 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005138 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005139
5140 /* do not modify RLPML for PF devices */
5141 if (vf >= adapter->vfs_allocated_count)
5142 return 0;
5143
5144 if (!adapter->vf_data[vf].vlans_enabled) {
5145 u32 size;
5146 reg = rd32(E1000_VMOLR(vf));
5147 size = reg & E1000_VMOLR_RLPML_MASK;
5148 size += 4;
5149 reg &= ~E1000_VMOLR_RLPML_MASK;
5150 reg |= size;
5151 wr32(E1000_VMOLR(vf), reg);
5152 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005153
Alexander Duyck51466232009-10-27 23:47:35 +00005154 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005155 }
5156 } else {
5157 if (i < E1000_VLVF_ARRAY_SIZE) {
5158 /* remove vf from the pool */
5159 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5160 /* if pool is empty then remove entry from vfta */
5161 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5162 reg = 0;
5163 igb_vfta_set(hw, vid, false);
5164 }
5165 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005166
5167 /* do not modify RLPML for PF devices */
5168 if (vf >= adapter->vfs_allocated_count)
5169 return 0;
5170
5171 adapter->vf_data[vf].vlans_enabled--;
5172 if (!adapter->vf_data[vf].vlans_enabled) {
5173 u32 size;
5174 reg = rd32(E1000_VMOLR(vf));
5175 size = reg & E1000_VMOLR_RLPML_MASK;
5176 size -= 4;
5177 reg &= ~E1000_VMOLR_RLPML_MASK;
5178 reg |= size;
5179 wr32(E1000_VMOLR(vf), reg);
5180 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005181 }
5182 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005183 return 0;
5184}
5185
5186static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5187{
5188 struct e1000_hw *hw = &adapter->hw;
5189
5190 if (vid)
5191 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5192 else
5193 wr32(E1000_VMVIR(vf), 0);
5194}
5195
5196static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5197 int vf, u16 vlan, u8 qos)
5198{
5199 int err = 0;
5200 struct igb_adapter *adapter = netdev_priv(netdev);
5201
5202 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5203 return -EINVAL;
5204 if (vlan || qos) {
5205 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5206 if (err)
5207 goto out;
5208 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5209 igb_set_vmolr(adapter, vf, !vlan);
5210 adapter->vf_data[vf].pf_vlan = vlan;
5211 adapter->vf_data[vf].pf_qos = qos;
5212 dev_info(&adapter->pdev->dev,
5213 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5214 if (test_bit(__IGB_DOWN, &adapter->state)) {
5215 dev_warn(&adapter->pdev->dev,
5216 "The VF VLAN has been set,"
5217 " but the PF device is not up.\n");
5218 dev_warn(&adapter->pdev->dev,
5219 "Bring the PF device up before"
5220 " attempting to use the VF device.\n");
5221 }
5222 } else {
5223 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5224 false, vf);
5225 igb_set_vmvir(adapter, vlan, vf);
5226 igb_set_vmolr(adapter, vf, true);
5227 adapter->vf_data[vf].pf_vlan = 0;
5228 adapter->vf_data[vf].pf_qos = 0;
5229 }
5230out:
5231 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005232}
5233
5234static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5235{
5236 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5237 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5238
5239 return igb_vlvf_set(adapter, vid, add, vf);
5240}
5241
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005242static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005243{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005244 /* clear flags - except flag that indicates PF has set the MAC */
5245 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005246 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005247
5248 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005249 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005250
5251 /* reset vlans for device */
5252 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005253 if (adapter->vf_data[vf].pf_vlan)
5254 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5255 adapter->vf_data[vf].pf_vlan,
5256 adapter->vf_data[vf].pf_qos);
5257 else
5258 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005259
5260 /* reset multicast table array for vf */
5261 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5262
5263 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005264 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005265}
5266
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005267static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5268{
5269 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5270
5271 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005272 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5273 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005274
5275 /* process remaining reset events */
5276 igb_vf_reset(adapter, vf);
5277}
5278
5279static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005280{
5281 struct e1000_hw *hw = &adapter->hw;
5282 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005283 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005284 u32 reg, msgbuf[3];
5285 u8 *addr = (u8 *)(&msgbuf[1]);
5286
5287 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005288 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005289
5290 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005291 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005292
5293 /* enable transmit and receive for vf */
5294 reg = rd32(E1000_VFTE);
5295 wr32(E1000_VFTE, reg | (1 << vf));
5296 reg = rd32(E1000_VFRE);
5297 wr32(E1000_VFRE, reg | (1 << vf));
5298
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005299 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005300
5301 /* reply to reset with ack and vf mac address */
5302 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5303 memcpy(addr, vf_mac, 6);
5304 igb_write_mbx(hw, msgbuf, 3, vf);
5305}
5306
5307static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5308{
Greg Rosede42edd2010-07-01 13:39:23 +00005309 /*
5310 * The VF MAC Address is stored in a packed array of bytes
5311 * starting at the second 32 bit word of the msg array
5312 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005313 unsigned char *addr = (char *)&msg[1];
5314 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005315
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005316 if (is_valid_ether_addr(addr))
5317 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005318
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005319 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005320}
5321
5322static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5323{
5324 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005325 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005326 u32 msg = E1000_VT_MSGTYPE_NACK;
5327
5328 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005329 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5330 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005331 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005332 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005333 }
5334}
5335
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005336static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005337{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005338 struct pci_dev *pdev = adapter->pdev;
5339 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005340 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005341 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005342 s32 retval;
5343
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005344 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005345
Alexander Duyckfef45f42009-12-11 22:57:34 -08005346 if (retval) {
5347 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005348 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005349 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5350 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5351 return;
5352 goto out;
5353 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005354
5355 /* this is a message we already processed, do nothing */
5356 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005357 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005358
5359 /*
5360 * until the vf completes a reset it should not be
5361 * allowed to start any configuration.
5362 */
5363
5364 if (msgbuf[0] == E1000_VF_RESET) {
5365 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005366 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005367 }
5368
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005369 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005370 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5371 return;
5372 retval = -1;
5373 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005374 }
5375
5376 switch ((msgbuf[0] & 0xFFFF)) {
5377 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005378 retval = -EINVAL;
5379 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5380 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5381 else
5382 dev_warn(&pdev->dev,
5383 "VF %d attempted to override administratively "
5384 "set MAC address\nReload the VF driver to "
5385 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005386 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005387 case E1000_VF_SET_PROMISC:
5388 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5389 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005390 case E1000_VF_SET_MULTICAST:
5391 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5392 break;
5393 case E1000_VF_SET_LPE:
5394 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5395 break;
5396 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005397 retval = -1;
5398 if (vf_data->pf_vlan)
5399 dev_warn(&pdev->dev,
5400 "VF %d attempted to override administratively "
5401 "set VLAN tag\nReload the VF driver to "
5402 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005403 else
5404 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005405 break;
5406 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005407 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005408 retval = -1;
5409 break;
5410 }
5411
Alexander Duyckfef45f42009-12-11 22:57:34 -08005412 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5413out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005414 /* notify the VF of the results of what it sent us */
5415 if (retval)
5416 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5417 else
5418 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5419
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005420 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005421}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005422
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005423static void igb_msg_task(struct igb_adapter *adapter)
5424{
5425 struct e1000_hw *hw = &adapter->hw;
5426 u32 vf;
5427
5428 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5429 /* process any reset requests */
5430 if (!igb_check_for_rst(hw, vf))
5431 igb_vf_reset_event(adapter, vf);
5432
5433 /* process any messages pending */
5434 if (!igb_check_for_msg(hw, vf))
5435 igb_rcv_msg_from_vf(adapter, vf);
5436
5437 /* process any acks */
5438 if (!igb_check_for_ack(hw, vf))
5439 igb_rcv_ack_from_vf(adapter, vf);
5440 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005441}
5442
Auke Kok9d5c8242008-01-24 02:22:38 -08005443/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005444 * igb_set_uta - Set unicast filter table address
5445 * @adapter: board private structure
5446 *
5447 * The unicast table address is a register array of 32-bit registers.
5448 * The table is meant to be used in a way similar to how the MTA is used
5449 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005450 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5451 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005452 **/
5453static void igb_set_uta(struct igb_adapter *adapter)
5454{
5455 struct e1000_hw *hw = &adapter->hw;
5456 int i;
5457
5458 /* The UTA table only exists on 82576 hardware and newer */
5459 if (hw->mac.type < e1000_82576)
5460 return;
5461
5462 /* we only need to do this if VMDq is enabled */
5463 if (!adapter->vfs_allocated_count)
5464 return;
5465
5466 for (i = 0; i < hw->mac.uta_reg_count; i++)
5467 array_wr32(E1000_UTA, i, ~0);
5468}
5469
5470/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005471 * igb_intr_msi - Interrupt Handler
5472 * @irq: interrupt number
5473 * @data: pointer to a network interface device structure
5474 **/
5475static irqreturn_t igb_intr_msi(int irq, void *data)
5476{
Alexander Duyck047e0032009-10-27 15:49:27 +00005477 struct igb_adapter *adapter = data;
5478 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005479 struct e1000_hw *hw = &adapter->hw;
5480 /* read ICR disables interrupts using IAM */
5481 u32 icr = rd32(E1000_ICR);
5482
Alexander Duyck047e0032009-10-27 15:49:27 +00005483 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005484
Alexander Duyck7f081d42010-01-07 17:41:00 +00005485 if (icr & E1000_ICR_DRSTA)
5486 schedule_work(&adapter->reset_task);
5487
Alexander Duyck047e0032009-10-27 15:49:27 +00005488 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005489 /* HW is reporting DMA is out of sync */
5490 adapter->stats.doosync++;
5491 }
5492
Auke Kok9d5c8242008-01-24 02:22:38 -08005493 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5494 hw->mac.get_link_status = 1;
5495 if (!test_bit(__IGB_DOWN, &adapter->state))
5496 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5497 }
5498
Alexander Duyck047e0032009-10-27 15:49:27 +00005499 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005500
5501 return IRQ_HANDLED;
5502}
5503
5504/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005505 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005506 * @irq: interrupt number
5507 * @data: pointer to a network interface device structure
5508 **/
5509static irqreturn_t igb_intr(int irq, void *data)
5510{
Alexander Duyck047e0032009-10-27 15:49:27 +00005511 struct igb_adapter *adapter = data;
5512 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005513 struct e1000_hw *hw = &adapter->hw;
5514 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5515 * need for the IMC write */
5516 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005517
5518 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5519 * not set, then the adapter didn't send an interrupt */
5520 if (!(icr & E1000_ICR_INT_ASSERTED))
5521 return IRQ_NONE;
5522
Alexander Duyck0ba82992011-08-26 07:45:47 +00005523 igb_write_itr(q_vector);
5524
Alexander Duyck7f081d42010-01-07 17:41:00 +00005525 if (icr & E1000_ICR_DRSTA)
5526 schedule_work(&adapter->reset_task);
5527
Alexander Duyck047e0032009-10-27 15:49:27 +00005528 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005529 /* HW is reporting DMA is out of sync */
5530 adapter->stats.doosync++;
5531 }
5532
Auke Kok9d5c8242008-01-24 02:22:38 -08005533 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5534 hw->mac.get_link_status = 1;
5535 /* guard against interrupt when we're going down */
5536 if (!test_bit(__IGB_DOWN, &adapter->state))
5537 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5538 }
5539
Alexander Duyck047e0032009-10-27 15:49:27 +00005540 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005541
5542 return IRQ_HANDLED;
5543}
5544
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005545static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005546{
Alexander Duyck047e0032009-10-27 15:49:27 +00005547 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005548 struct e1000_hw *hw = &adapter->hw;
5549
Alexander Duyck0ba82992011-08-26 07:45:47 +00005550 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5551 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5552 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5553 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005554 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005555 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005556 }
5557
5558 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5559 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005560 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005561 else
5562 igb_irq_enable(adapter);
5563 }
5564}
5565
Auke Kok9d5c8242008-01-24 02:22:38 -08005566/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005567 * igb_poll - NAPI Rx polling callback
5568 * @napi: napi polling structure
5569 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005570 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005571static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005572{
Alexander Duyck047e0032009-10-27 15:49:27 +00005573 struct igb_q_vector *q_vector = container_of(napi,
5574 struct igb_q_vector,
5575 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005576 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005577
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005578#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005579 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5580 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005581#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005582 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005583 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005584
Alexander Duyck0ba82992011-08-26 07:45:47 +00005585 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005586 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005587
Alexander Duyck16eb8812011-08-26 07:43:54 +00005588 /* If all work not completed, return budget and keep polling */
5589 if (!clean_complete)
5590 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005591
Alexander Duyck46544252009-02-19 20:39:04 -08005592 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005593 napi_complete(napi);
5594 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005595
Alexander Duyck16eb8812011-08-26 07:43:54 +00005596 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005597}
Al Viro6d8126f2008-03-16 22:23:24 +00005598
Richard Cochran7ebae812012-03-16 10:55:37 +00005599#ifdef CONFIG_IGB_PTP
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005600/**
5601 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5602 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005603 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005604 *
5605 * If we were asked to do hardware stamping and such a time stamp is
5606 * available, then it must have been for this skb here because we only
5607 * allow only one such packet into the queue.
5608 */
Alexander Duyck06034642011-08-26 07:44:22 +00005609static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5610 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005611{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005612 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005613 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005614 struct skb_shared_hwtstamps shhwtstamps;
5615 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005616
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005617 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005618 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005619 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5620 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005621
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005622 regval = rd32(E1000_TXSTMPL);
5623 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5624
5625 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005626 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005627}
5628
Richard Cochran7ebae812012-03-16 10:55:37 +00005629#endif
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005630/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005631 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005632 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005633 * returns true if ring is completely cleaned
5634 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005635static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005636{
Alexander Duyck047e0032009-10-27 15:49:27 +00005637 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005638 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005639 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005640 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005641 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005642 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005643 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005644
Alexander Duyck13fde972011-10-05 13:35:24 +00005645 if (test_bit(__IGB_DOWN, &adapter->state))
5646 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005647
Alexander Duyck06034642011-08-26 07:44:22 +00005648 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005649 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005650 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005651
Alexander Duyck13fde972011-10-05 13:35:24 +00005652 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005653 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005654
Alexander Duyck8542db02011-08-26 07:44:43 +00005655 /* prevent any other reads prior to eop_desc */
5656 rmb();
5657
5658 /* if next_to_watch is not set then there is no work pending */
5659 if (!eop_desc)
5660 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005661
5662 /* if DD is not set pending work has not been completed */
5663 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5664 break;
5665
Alexander Duyck8542db02011-08-26 07:44:43 +00005666 /* clear next_to_watch to prevent false hangs */
5667 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005668
Alexander Duyckebe42d12011-08-26 07:45:09 +00005669 /* update the statistics for this packet */
5670 total_bytes += tx_buffer->bytecount;
5671 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005672
Richard Cochran7ebae812012-03-16 10:55:37 +00005673#ifdef CONFIG_IGB_PTP
Alexander Duyckebe42d12011-08-26 07:45:09 +00005674 /* retrieve hardware timestamp */
5675 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005676
Richard Cochran7ebae812012-03-16 10:55:37 +00005677#endif
Alexander Duyckebe42d12011-08-26 07:45:09 +00005678 /* free the skb */
5679 dev_kfree_skb_any(tx_buffer->skb);
5680 tx_buffer->skb = NULL;
5681
5682 /* unmap skb header data */
5683 dma_unmap_single(tx_ring->dev,
5684 tx_buffer->dma,
5685 tx_buffer->length,
5686 DMA_TO_DEVICE);
5687
5688 /* clear last DMA location and unmap remaining buffers */
5689 while (tx_desc != eop_desc) {
5690 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005691
Alexander Duyck13fde972011-10-05 13:35:24 +00005692 tx_buffer++;
5693 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005695 if (unlikely(!i)) {
5696 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005697 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005698 tx_desc = IGB_TX_DESC(tx_ring, 0);
5699 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005700
5701 /* unmap any remaining paged data */
5702 if (tx_buffer->dma) {
5703 dma_unmap_page(tx_ring->dev,
5704 tx_buffer->dma,
5705 tx_buffer->length,
5706 DMA_TO_DEVICE);
5707 }
5708 }
5709
5710 /* clear last DMA location */
5711 tx_buffer->dma = 0;
5712
5713 /* move us one more past the eop_desc for start of next pkt */
5714 tx_buffer++;
5715 tx_desc++;
5716 i++;
5717 if (unlikely(!i)) {
5718 i -= tx_ring->count;
5719 tx_buffer = tx_ring->tx_buffer_info;
5720 tx_desc = IGB_TX_DESC(tx_ring, 0);
5721 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005722 }
5723
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005724 netdev_tx_completed_queue(txring_txq(tx_ring),
5725 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005726 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005727 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005728 u64_stats_update_begin(&tx_ring->tx_syncp);
5729 tx_ring->tx_stats.bytes += total_bytes;
5730 tx_ring->tx_stats.packets += total_packets;
5731 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005732 q_vector->tx.total_bytes += total_bytes;
5733 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005734
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005735 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005736 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005737
Alexander Duyck8542db02011-08-26 07:44:43 +00005738 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005739
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 /* Detect a transmit hang in hardware, this serializes the
5741 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005742 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005743 if (eop_desc &&
5744 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005745 (adapter->tx_timeout_factor * HZ)) &&
5746 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005747
Auke Kok9d5c8242008-01-24 02:22:38 -08005748 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005749 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005750 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005751 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005752 " TDH <%x>\n"
5753 " TDT <%x>\n"
5754 " next_to_use <%x>\n"
5755 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005756 "buffer_info[next_to_clean]\n"
5757 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005758 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005759 " jiffies <%lx>\n"
5760 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005761 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005762 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005763 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005764 tx_ring->next_to_use,
5765 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005766 tx_buffer->time_stamp,
5767 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005768 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005769 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005770 netif_stop_subqueue(tx_ring->netdev,
5771 tx_ring->queue_index);
5772
5773 /* we are about to reset, no point in enabling stuff */
5774 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005775 }
5776 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005777
5778 if (unlikely(total_packets &&
5779 netif_carrier_ok(tx_ring->netdev) &&
5780 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5781 /* Make sure that anybody stopping the queue after this
5782 * sees the new next_to_clean.
5783 */
5784 smp_mb();
5785 if (__netif_subqueue_stopped(tx_ring->netdev,
5786 tx_ring->queue_index) &&
5787 !(test_bit(__IGB_DOWN, &adapter->state))) {
5788 netif_wake_subqueue(tx_ring->netdev,
5789 tx_ring->queue_index);
5790
5791 u64_stats_update_begin(&tx_ring->tx_syncp);
5792 tx_ring->tx_stats.restart_queue++;
5793 u64_stats_update_end(&tx_ring->tx_syncp);
5794 }
5795 }
5796
5797 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005798}
5799
Alexander Duyckcd392f52011-08-26 07:43:59 +00005800static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005801 union e1000_adv_rx_desc *rx_desc,
5802 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005803{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005804 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005805
Alexander Duyck294e7d72011-08-26 07:45:57 +00005806 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005807 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005808 return;
5809
5810 /* Rx checksum disabled via ethtool */
5811 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005812 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005813
Auke Kok9d5c8242008-01-24 02:22:38 -08005814 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005815 if (igb_test_staterr(rx_desc,
5816 E1000_RXDEXT_STATERR_TCPE |
5817 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005818 /*
5819 * work around errata with sctp packets where the TCPE aka
5820 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5821 * packets, (aka let the stack check the crc32c)
5822 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005823 if (!((skb->len == 60) &&
5824 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005825 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005826 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005827 u64_stats_update_end(&ring->rx_syncp);
5828 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005829 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005830 return;
5831 }
5832 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005833 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5834 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005835 skb->ip_summed = CHECKSUM_UNNECESSARY;
5836
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005837 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5838 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005839}
5840
Alexander Duyck077887c2011-08-26 07:46:29 +00005841static inline void igb_rx_hash(struct igb_ring *ring,
5842 union e1000_adv_rx_desc *rx_desc,
5843 struct sk_buff *skb)
5844{
5845 if (ring->netdev->features & NETIF_F_RXHASH)
5846 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5847}
5848
Richard Cochran7ebae812012-03-16 10:55:37 +00005849#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005850static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5851 union e1000_adv_rx_desc *rx_desc,
5852 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005853{
5854 struct igb_adapter *adapter = q_vector->adapter;
5855 struct e1000_hw *hw = &adapter->hw;
5856 u64 regval;
5857
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005858 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5859 E1000_RXDADV_STAT_TS))
5860 return;
5861
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005862 /*
5863 * If this bit is set, then the RX registers contain the time stamp. No
5864 * other packet will be time stamped until we read these registers, so
5865 * read the registers to make them available again. Because only one
5866 * packet can be time stamped at a time, we know that the register
5867 * values must belong to this one here and therefore we don't need to
5868 * compare any of the additional attributes stored for it.
5869 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005870 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005871 * can turn into a skb_shared_hwtstamps.
5872 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005873 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005874 u32 *stamp = (u32 *)skb->data;
5875 regval = le32_to_cpu(*(stamp + 2));
5876 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5877 skb_pull(skb, IGB_TS_HDR_LEN);
5878 } else {
5879 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5880 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005881
Nick Nunley757b77e2010-03-26 11:36:47 +00005882 regval = rd32(E1000_RXSTMPL);
5883 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5884 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005885
5886 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5887}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005888
Richard Cochran7ebae812012-03-16 10:55:37 +00005889#endif
Alexander Duyck8be10e92011-08-26 07:47:11 +00005890static void igb_rx_vlan(struct igb_ring *ring,
5891 union e1000_adv_rx_desc *rx_desc,
5892 struct sk_buff *skb)
5893{
5894 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5895 u16 vid;
5896 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5897 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5898 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5899 else
5900 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5901
5902 __vlan_hwaccel_put_tag(skb, vid);
5903 }
5904}
5905
Alexander Duyck44390ca2011-08-26 07:43:38 +00005906static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005907{
5908 /* HW will not DMA in data larger than the given buffer, even if it
5909 * parses the (NFS, of course) header to be larger. In that case, it
5910 * fills the header buffer and spills the rest into the page.
5911 */
5912 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5913 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005914 if (hlen > IGB_RX_HDR_LEN)
5915 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005916 return hlen;
5917}
5918
Alexander Duyckcd392f52011-08-26 07:43:59 +00005919static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005920{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005921 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005922 union e1000_adv_rx_desc *rx_desc;
5923 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005924 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005925 u16 cleaned_count = igb_desc_unused(rx_ring);
5926 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005927
Alexander Duyck601369062011-08-26 07:44:05 +00005928 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005929
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005930 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005931 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005932 struct sk_buff *skb = buffer_info->skb;
5933 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005934
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005935 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005936 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005937
5938 i++;
5939 if (i == rx_ring->count)
5940 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005941
Alexander Duyck601369062011-08-26 07:44:05 +00005942 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005943 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005944
Alexander Duyck16eb8812011-08-26 07:43:54 +00005945 /*
5946 * This memory barrier is needed to keep us from reading
5947 * any other fields out of the rx_desc until we know the
5948 * RXD_STAT_DD bit is set
5949 */
5950 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005951
Alexander Duyck16eb8812011-08-26 07:43:54 +00005952 if (!skb_is_nonlinear(skb)) {
5953 __skb_put(skb, igb_get_hlen(rx_desc));
5954 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005955 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005956 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005957 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005958 }
5959
Alexander Duyck16eb8812011-08-26 07:43:54 +00005960 if (rx_desc->wb.upper.length) {
5961 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005962
Koki Sanagiaa913402010-04-27 01:01:19 +00005963 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005964 buffer_info->page,
5965 buffer_info->page_offset,
5966 length);
5967
Alexander Duyck16eb8812011-08-26 07:43:54 +00005968 skb->len += length;
5969 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00005970 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005971
Alexander Duyckd1eff352009-11-12 18:38:35 +00005972 if ((page_count(buffer_info->page) != 1) ||
5973 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005974 buffer_info->page = NULL;
5975 else
5976 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005977
Alexander Duyck16eb8812011-08-26 07:43:54 +00005978 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5979 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5980 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005981 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005982
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005983 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005984 struct igb_rx_buffer *next_buffer;
5985 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005986 buffer_info->skb = next_buffer->skb;
5987 buffer_info->dma = next_buffer->dma;
5988 next_buffer->skb = skb;
5989 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005990 goto next_desc;
5991 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005992
Ben Greear89eaefb2012-03-06 09:41:58 +00005993 if (unlikely((igb_test_staterr(rx_desc,
5994 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
5995 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005996 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005997 goto next_desc;
5998 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005999
Richard Cochran7ebae812012-03-16 10:55:37 +00006000#ifdef CONFIG_IGB_PTP
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006001 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Richard Cochran7ebae812012-03-16 10:55:37 +00006002#endif
Alexander Duyck077887c2011-08-26 07:46:29 +00006003 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006004 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006005 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006006
6007 total_bytes += skb->len;
6008 total_packets++;
6009
6010 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6011
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006012 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006013
Alexander Duyck16eb8812011-08-26 07:43:54 +00006014 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006015next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006016 if (!budget)
6017 break;
6018
6019 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006020 /* return some buffers to hardware, one at a time is too slow */
6021 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006022 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006023 cleaned_count = 0;
6024 }
6025
6026 /* use prefetched values */
6027 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006028 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006029
Auke Kok9d5c8242008-01-24 02:22:38 -08006030 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006031 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006032 rx_ring->rx_stats.packets += total_packets;
6033 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006034 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006035 q_vector->rx.total_packets += total_packets;
6036 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006037
6038 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006039 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006040
Alexander Duyck16eb8812011-08-26 07:43:54 +00006041 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006042}
6043
Alexander Duyckc023cd82011-08-26 07:43:43 +00006044static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006045 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006046{
6047 struct sk_buff *skb = bi->skb;
6048 dma_addr_t dma = bi->dma;
6049
6050 if (dma)
6051 return true;
6052
6053 if (likely(!skb)) {
6054 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6055 IGB_RX_HDR_LEN);
6056 bi->skb = skb;
6057 if (!skb) {
6058 rx_ring->rx_stats.alloc_failed++;
6059 return false;
6060 }
6061
6062 /* initialize skb for ring */
6063 skb_record_rx_queue(skb, rx_ring->queue_index);
6064 }
6065
6066 dma = dma_map_single(rx_ring->dev, skb->data,
6067 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6068
6069 if (dma_mapping_error(rx_ring->dev, dma)) {
6070 rx_ring->rx_stats.alloc_failed++;
6071 return false;
6072 }
6073
6074 bi->dma = dma;
6075 return true;
6076}
6077
6078static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006079 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006080{
6081 struct page *page = bi->page;
6082 dma_addr_t page_dma = bi->page_dma;
6083 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6084
6085 if (page_dma)
6086 return true;
6087
6088 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006089 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006090 bi->page = page;
6091 if (unlikely(!page)) {
6092 rx_ring->rx_stats.alloc_failed++;
6093 return false;
6094 }
6095 }
6096
6097 page_dma = dma_map_page(rx_ring->dev, page,
6098 page_offset, PAGE_SIZE / 2,
6099 DMA_FROM_DEVICE);
6100
6101 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6102 rx_ring->rx_stats.alloc_failed++;
6103 return false;
6104 }
6105
6106 bi->page_dma = page_dma;
6107 bi->page_offset = page_offset;
6108 return true;
6109}
6110
Auke Kok9d5c8242008-01-24 02:22:38 -08006111/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006112 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006113 * @adapter: address of board private structure
6114 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006115void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006116{
Auke Kok9d5c8242008-01-24 02:22:38 -08006117 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006118 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006119 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006120
Alexander Duyck601369062011-08-26 07:44:05 +00006121 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006122 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006123 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006124
6125 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006126 if (!igb_alloc_mapped_skb(rx_ring, bi))
6127 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006128
Alexander Duyckc023cd82011-08-26 07:43:43 +00006129 /* Refresh the desc even if buffer_addrs didn't change
6130 * because each write-back erases this info. */
6131 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006132
Alexander Duyckc023cd82011-08-26 07:43:43 +00006133 if (!igb_alloc_mapped_page(rx_ring, bi))
6134 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006135
Alexander Duyckc023cd82011-08-26 07:43:43 +00006136 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006137
Alexander Duyckc023cd82011-08-26 07:43:43 +00006138 rx_desc++;
6139 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006140 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006141 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006142 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006143 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006144 i -= rx_ring->count;
6145 }
6146
6147 /* clear the hdr_addr for the next_to_use descriptor */
6148 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006149 }
6150
Alexander Duyckc023cd82011-08-26 07:43:43 +00006151 i += rx_ring->count;
6152
Auke Kok9d5c8242008-01-24 02:22:38 -08006153 if (rx_ring->next_to_use != i) {
6154 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006155
6156 /* Force memory writes to complete before letting h/w
6157 * know there are new descriptors to fetch. (Only
6158 * applicable for weak-ordered memory model archs,
6159 * such as IA-64). */
6160 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006161 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006162 }
6163}
6164
6165/**
6166 * igb_mii_ioctl -
6167 * @netdev:
6168 * @ifreq:
6169 * @cmd:
6170 **/
6171static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6172{
6173 struct igb_adapter *adapter = netdev_priv(netdev);
6174 struct mii_ioctl_data *data = if_mii(ifr);
6175
6176 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6177 return -EOPNOTSUPP;
6178
6179 switch (cmd) {
6180 case SIOCGMIIPHY:
6181 data->phy_id = adapter->hw.phy.addr;
6182 break;
6183 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006184 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6185 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006186 return -EIO;
6187 break;
6188 case SIOCSMIIREG:
6189 default:
6190 return -EOPNOTSUPP;
6191 }
6192 return 0;
6193}
6194
6195/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006196 * igb_hwtstamp_ioctl - control hardware time stamping
6197 * @netdev:
6198 * @ifreq:
6199 * @cmd:
6200 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006201 * Outgoing time stamping can be enabled and disabled. Play nice and
6202 * disable it when requested, although it shouldn't case any overhead
6203 * when no packet needs it. At most one packet in the queue may be
6204 * marked for time stamping, otherwise it would be impossible to tell
6205 * for sure to which packet the hardware time stamp belongs.
6206 *
6207 * Incoming time stamping has to be configured via the hardware
6208 * filters. Not all combinations are supported, in particular event
6209 * type has to be specified. Matching the kind of event packet is
6210 * not supported, with the exception of "all V2 events regardless of
6211 * level 2 or 4".
6212 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006213 **/
6214static int igb_hwtstamp_ioctl(struct net_device *netdev,
6215 struct ifreq *ifr, int cmd)
6216{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006217 struct igb_adapter *adapter = netdev_priv(netdev);
6218 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006219 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006220 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6221 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006222 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006223 bool is_l4 = false;
6224 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006225 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006226
6227 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6228 return -EFAULT;
6229
6230 /* reserved for future extensions */
6231 if (config.flags)
6232 return -EINVAL;
6233
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006234 switch (config.tx_type) {
6235 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006236 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006237 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006238 break;
6239 default:
6240 return -ERANGE;
6241 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006242
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006243 switch (config.rx_filter) {
6244 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006245 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006246 break;
6247 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6248 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6249 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6250 case HWTSTAMP_FILTER_ALL:
6251 /*
6252 * register TSYNCRXCFG must be set, therefore it is not
6253 * possible to time stamp both Sync and Delay_Req messages
6254 * => fall back to time stamping all packets
6255 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006256 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006257 config.rx_filter = HWTSTAMP_FILTER_ALL;
6258 break;
6259 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006260 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006261 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006262 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006263 break;
6264 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006265 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006266 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006267 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006268 break;
6269 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6270 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006271 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006272 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006273 is_l2 = true;
6274 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006275 config.rx_filter = HWTSTAMP_FILTER_SOME;
6276 break;
6277 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6278 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006279 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006280 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006281 is_l2 = true;
6282 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006283 config.rx_filter = HWTSTAMP_FILTER_SOME;
6284 break;
6285 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6286 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6287 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006288 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006289 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006290 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006291 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006292 break;
6293 default:
6294 return -ERANGE;
6295 }
6296
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006297 if (hw->mac.type == e1000_82575) {
6298 if (tsync_rx_ctl | tsync_tx_ctl)
6299 return -EINVAL;
6300 return 0;
6301 }
6302
Nick Nunley757b77e2010-03-26 11:36:47 +00006303 /*
6304 * Per-packet timestamping only works if all packets are
6305 * timestamped, so enable timestamping in all packets as
6306 * long as one rx filter was configured.
6307 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006308 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006309 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6310 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6311 }
6312
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006313 /* enable/disable TX */
6314 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006315 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6316 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 wr32(E1000_TSYNCTXCTL, regval);
6318
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006319 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006320 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006321 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6322 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006323 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006324
6325 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006326 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6327
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006328 /* define ethertype filter for timestamped packets */
6329 if (is_l2)
6330 wr32(E1000_ETQF(3),
6331 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6332 E1000_ETQF_1588 | /* enable timestamping */
6333 ETH_P_1588)); /* 1588 eth protocol type */
6334 else
6335 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006336
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006337#define PTP_PORT 319
6338 /* L4 Queue Filter[3]: filter by destination port and protocol */
6339 if (is_l4) {
6340 u32 ftqf = (IPPROTO_UDP /* UDP */
6341 | E1000_FTQF_VF_BP /* VF not compared */
6342 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6343 | E1000_FTQF_MASK); /* mask all inputs */
6344 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006345
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006346 wr32(E1000_IMIR(3), htons(PTP_PORT));
6347 wr32(E1000_IMIREXT(3),
6348 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6349 if (hw->mac.type == e1000_82576) {
6350 /* enable source port check */
6351 wr32(E1000_SPQF(3), htons(PTP_PORT));
6352 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6353 }
6354 wr32(E1000_FTQF(3), ftqf);
6355 } else {
6356 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6357 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006358 wrfl();
6359
6360 adapter->hwtstamp_config = config;
6361
6362 /* clear TX/RX time stamp registers, just to be sure */
6363 regval = rd32(E1000_TXSTMPH);
6364 regval = rd32(E1000_RXSTMPH);
6365
6366 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6367 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006368}
6369
6370/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006371 * igb_ioctl -
6372 * @netdev:
6373 * @ifreq:
6374 * @cmd:
6375 **/
6376static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6377{
6378 switch (cmd) {
6379 case SIOCGMIIPHY:
6380 case SIOCGMIIREG:
6381 case SIOCSMIIREG:
6382 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006383 case SIOCSHWTSTAMP:
6384 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006385 default:
6386 return -EOPNOTSUPP;
6387 }
6388}
6389
Alexander Duyck009bc062009-07-23 18:08:35 +00006390s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6391{
6392 struct igb_adapter *adapter = hw->back;
6393 u16 cap_offset;
6394
Jon Masonbdaae042011-06-27 07:44:01 +00006395 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006396 if (!cap_offset)
6397 return -E1000_ERR_CONFIG;
6398
6399 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6400
6401 return 0;
6402}
6403
6404s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6405{
6406 struct igb_adapter *adapter = hw->back;
6407 u16 cap_offset;
6408
Jon Masonbdaae042011-06-27 07:44:01 +00006409 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006410 if (!cap_offset)
6411 return -E1000_ERR_CONFIG;
6412
6413 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6414
6415 return 0;
6416}
6417
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006418static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006419{
6420 struct igb_adapter *adapter = netdev_priv(netdev);
6421 struct e1000_hw *hw = &adapter->hw;
6422 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006423 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006424
Alexander Duyck5faf0302011-08-26 07:46:08 +00006425 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006426 /* enable VLAN tag insert/strip */
6427 ctrl = rd32(E1000_CTRL);
6428 ctrl |= E1000_CTRL_VME;
6429 wr32(E1000_CTRL, ctrl);
6430
Alexander Duyck51466232009-10-27 23:47:35 +00006431 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006432 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006433 rctl &= ~E1000_RCTL_CFIEN;
6434 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006435 } else {
6436 /* disable VLAN tag insert/strip */
6437 ctrl = rd32(E1000_CTRL);
6438 ctrl &= ~E1000_CTRL_VME;
6439 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006440 }
6441
Alexander Duycke1739522009-02-19 20:39:44 -08006442 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006443}
6444
Jiri Pirko8e586132011-12-08 19:52:37 -05006445static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006446{
6447 struct igb_adapter *adapter = netdev_priv(netdev);
6448 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006449 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006450
Alexander Duyck51466232009-10-27 23:47:35 +00006451 /* attempt to add filter to vlvf array */
6452 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006453
Alexander Duyck51466232009-10-27 23:47:35 +00006454 /* add the filter since PF can receive vlans w/o entry in vlvf */
6455 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006456
6457 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006458
6459 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006460}
6461
Jiri Pirko8e586132011-12-08 19:52:37 -05006462static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006463{
6464 struct igb_adapter *adapter = netdev_priv(netdev);
6465 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006466 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006467 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006468
Alexander Duyck51466232009-10-27 23:47:35 +00006469 /* remove vlan from VLVF table array */
6470 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006471
Alexander Duyck51466232009-10-27 23:47:35 +00006472 /* if vid was not present in VLVF just remove it from table */
6473 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006474 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006475
6476 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006477
6478 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006479}
6480
6481static void igb_restore_vlan(struct igb_adapter *adapter)
6482{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006483 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006484
Alexander Duyck5faf0302011-08-26 07:46:08 +00006485 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6486
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006487 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6488 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006489}
6490
David Decotigny14ad2512011-04-27 18:32:43 +00006491int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006492{
Alexander Duyck090b1792009-10-27 23:51:55 +00006493 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006494 struct e1000_mac_info *mac = &adapter->hw.mac;
6495
6496 mac->autoneg = 0;
6497
David Decotigny14ad2512011-04-27 18:32:43 +00006498 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6499 * for the switch() below to work */
6500 if ((spd & 1) || (dplx & ~1))
6501 goto err_inval;
6502
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006503 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6504 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006505 spd != SPEED_1000 &&
6506 dplx != DUPLEX_FULL)
6507 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006508
David Decotigny14ad2512011-04-27 18:32:43 +00006509 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006510 case SPEED_10 + DUPLEX_HALF:
6511 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6512 break;
6513 case SPEED_10 + DUPLEX_FULL:
6514 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6515 break;
6516 case SPEED_100 + DUPLEX_HALF:
6517 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6518 break;
6519 case SPEED_100 + DUPLEX_FULL:
6520 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6521 break;
6522 case SPEED_1000 + DUPLEX_FULL:
6523 mac->autoneg = 1;
6524 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6525 break;
6526 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6527 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006528 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006529 }
6530 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006531
6532err_inval:
6533 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6534 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006535}
6536
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006537static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6538 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006539{
6540 struct net_device *netdev = pci_get_drvdata(pdev);
6541 struct igb_adapter *adapter = netdev_priv(netdev);
6542 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006543 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006544 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006545#ifdef CONFIG_PM
6546 int retval = 0;
6547#endif
6548
6549 netif_device_detach(netdev);
6550
Alexander Duycka88f10e2008-07-08 15:13:38 -07006551 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006552 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006553
Alexander Duyck047e0032009-10-27 15:49:27 +00006554 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006555
6556#ifdef CONFIG_PM
6557 retval = pci_save_state(pdev);
6558 if (retval)
6559 return retval;
6560#endif
6561
6562 status = rd32(E1000_STATUS);
6563 if (status & E1000_STATUS_LU)
6564 wufc &= ~E1000_WUFC_LNKC;
6565
6566 if (wufc) {
6567 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006568 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006569
6570 /* turn on all-multi mode if wake on multicast is enabled */
6571 if (wufc & E1000_WUFC_MC) {
6572 rctl = rd32(E1000_RCTL);
6573 rctl |= E1000_RCTL_MPE;
6574 wr32(E1000_RCTL, rctl);
6575 }
6576
6577 ctrl = rd32(E1000_CTRL);
6578 /* advertise wake from D3Cold */
6579 #define E1000_CTRL_ADVD3WUC 0x00100000
6580 /* phy power management enable */
6581 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6582 ctrl |= E1000_CTRL_ADVD3WUC;
6583 wr32(E1000_CTRL, ctrl);
6584
Auke Kok9d5c8242008-01-24 02:22:38 -08006585 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006586 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006587
6588 wr32(E1000_WUC, E1000_WUC_PME_EN);
6589 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006590 } else {
6591 wr32(E1000_WUC, 0);
6592 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006593 }
6594
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006595 *enable_wake = wufc || adapter->en_mng_pt;
6596 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006597 igb_power_down_link(adapter);
6598 else
6599 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006600
6601 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6602 * would have already happened in close and is redundant. */
6603 igb_release_hw_control(adapter);
6604
6605 pci_disable_device(pdev);
6606
Auke Kok9d5c8242008-01-24 02:22:38 -08006607 return 0;
6608}
6609
6610#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006611#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006612static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006613{
6614 int retval;
6615 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006616 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006617
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006618 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006619 if (retval)
6620 return retval;
6621
6622 if (wake) {
6623 pci_prepare_to_sleep(pdev);
6624 } else {
6625 pci_wake_from_d3(pdev, false);
6626 pci_set_power_state(pdev, PCI_D3hot);
6627 }
6628
6629 return 0;
6630}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006631#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006632
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006633static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006634{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006635 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006636 struct net_device *netdev = pci_get_drvdata(pdev);
6637 struct igb_adapter *adapter = netdev_priv(netdev);
6638 struct e1000_hw *hw = &adapter->hw;
6639 u32 err;
6640
6641 pci_set_power_state(pdev, PCI_D0);
6642 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006643 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006644
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006645 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006646 if (err) {
6647 dev_err(&pdev->dev,
6648 "igb: Cannot enable PCI device from suspend\n");
6649 return err;
6650 }
6651 pci_set_master(pdev);
6652
6653 pci_enable_wake(pdev, PCI_D3hot, 0);
6654 pci_enable_wake(pdev, PCI_D3cold, 0);
6655
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006656 if (!rtnl_is_locked()) {
6657 /*
6658 * shut up ASSERT_RTNL() warning in
6659 * netif_set_real_num_tx/rx_queues.
6660 */
6661 rtnl_lock();
6662 err = igb_init_interrupt_scheme(adapter);
6663 rtnl_unlock();
6664 } else {
6665 err = igb_init_interrupt_scheme(adapter);
6666 }
6667 if (err) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006668 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6669 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006670 }
6671
Auke Kok9d5c8242008-01-24 02:22:38 -08006672 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006673
6674 /* let the f/w know that the h/w is now under the control of the
6675 * driver. */
6676 igb_get_hw_control(adapter);
6677
Auke Kok9d5c8242008-01-24 02:22:38 -08006678 wr32(E1000_WUS, ~0);
6679
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006680 if (netdev->flags & IFF_UP) {
6681 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006682 if (err)
6683 return err;
6684 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006685
6686 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006687 return 0;
6688}
6689
6690#ifdef CONFIG_PM_RUNTIME
6691static int igb_runtime_idle(struct device *dev)
6692{
6693 struct pci_dev *pdev = to_pci_dev(dev);
6694 struct net_device *netdev = pci_get_drvdata(pdev);
6695 struct igb_adapter *adapter = netdev_priv(netdev);
6696
6697 if (!igb_has_link(adapter))
6698 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6699
6700 return -EBUSY;
6701}
6702
6703static int igb_runtime_suspend(struct device *dev)
6704{
6705 struct pci_dev *pdev = to_pci_dev(dev);
6706 int retval;
6707 bool wake;
6708
6709 retval = __igb_shutdown(pdev, &wake, 1);
6710 if (retval)
6711 return retval;
6712
6713 if (wake) {
6714 pci_prepare_to_sleep(pdev);
6715 } else {
6716 pci_wake_from_d3(pdev, false);
6717 pci_set_power_state(pdev, PCI_D3hot);
6718 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006719
Auke Kok9d5c8242008-01-24 02:22:38 -08006720 return 0;
6721}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006722
6723static int igb_runtime_resume(struct device *dev)
6724{
6725 return igb_resume(dev);
6726}
6727#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006728#endif
6729
6730static void igb_shutdown(struct pci_dev *pdev)
6731{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006732 bool wake;
6733
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006734 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006735
6736 if (system_state == SYSTEM_POWER_OFF) {
6737 pci_wake_from_d3(pdev, wake);
6738 pci_set_power_state(pdev, PCI_D3hot);
6739 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006740}
6741
6742#ifdef CONFIG_NET_POLL_CONTROLLER
6743/*
6744 * Polling 'interrupt' - used by things like netconsole to send skbs
6745 * without having to re-enable interrupts. It's not called while
6746 * the interrupt routine is executing.
6747 */
6748static void igb_netpoll(struct net_device *netdev)
6749{
6750 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006751 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006752 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006753 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006754
Alexander Duyck047e0032009-10-27 15:49:27 +00006755 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006756 q_vector = adapter->q_vector[i];
6757 if (adapter->msix_entries)
6758 wr32(E1000_EIMC, q_vector->eims_value);
6759 else
6760 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006761 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006762 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006763}
6764#endif /* CONFIG_NET_POLL_CONTROLLER */
6765
6766/**
6767 * igb_io_error_detected - called when PCI error is detected
6768 * @pdev: Pointer to PCI device
6769 * @state: The current pci connection state
6770 *
6771 * This function is called after a PCI bus error affecting
6772 * this device has been detected.
6773 */
6774static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6775 pci_channel_state_t state)
6776{
6777 struct net_device *netdev = pci_get_drvdata(pdev);
6778 struct igb_adapter *adapter = netdev_priv(netdev);
6779
6780 netif_device_detach(netdev);
6781
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006782 if (state == pci_channel_io_perm_failure)
6783 return PCI_ERS_RESULT_DISCONNECT;
6784
Auke Kok9d5c8242008-01-24 02:22:38 -08006785 if (netif_running(netdev))
6786 igb_down(adapter);
6787 pci_disable_device(pdev);
6788
6789 /* Request a slot slot reset. */
6790 return PCI_ERS_RESULT_NEED_RESET;
6791}
6792
6793/**
6794 * igb_io_slot_reset - called after the pci bus has been reset.
6795 * @pdev: Pointer to PCI device
6796 *
6797 * Restart the card from scratch, as if from a cold-boot. Implementation
6798 * resembles the first-half of the igb_resume routine.
6799 */
6800static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6801{
6802 struct net_device *netdev = pci_get_drvdata(pdev);
6803 struct igb_adapter *adapter = netdev_priv(netdev);
6804 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006805 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006806 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006807
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006808 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006809 dev_err(&pdev->dev,
6810 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006811 result = PCI_ERS_RESULT_DISCONNECT;
6812 } else {
6813 pci_set_master(pdev);
6814 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006815 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006816
6817 pci_enable_wake(pdev, PCI_D3hot, 0);
6818 pci_enable_wake(pdev, PCI_D3cold, 0);
6819
6820 igb_reset(adapter);
6821 wr32(E1000_WUS, ~0);
6822 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006823 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006824
Jeff Kirsherea943d42008-12-11 20:34:19 -08006825 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6826 if (err) {
6827 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6828 "failed 0x%0x\n", err);
6829 /* non-fatal, continue */
6830 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006831
Alexander Duyck40a914f2008-11-27 00:24:37 -08006832 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006833}
6834
6835/**
6836 * igb_io_resume - called when traffic can start flowing again.
6837 * @pdev: Pointer to PCI device
6838 *
6839 * This callback is called when the error recovery driver tells us that
6840 * its OK to resume normal operation. Implementation resembles the
6841 * second-half of the igb_resume routine.
6842 */
6843static void igb_io_resume(struct pci_dev *pdev)
6844{
6845 struct net_device *netdev = pci_get_drvdata(pdev);
6846 struct igb_adapter *adapter = netdev_priv(netdev);
6847
Auke Kok9d5c8242008-01-24 02:22:38 -08006848 if (netif_running(netdev)) {
6849 if (igb_up(adapter)) {
6850 dev_err(&pdev->dev, "igb_up failed after reset\n");
6851 return;
6852 }
6853 }
6854
6855 netif_device_attach(netdev);
6856
6857 /* let the f/w know that the h/w is now under the control of the
6858 * driver. */
6859 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006860}
6861
Alexander Duyck26ad9172009-10-05 06:32:49 +00006862static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6863 u8 qsel)
6864{
6865 u32 rar_low, rar_high;
6866 struct e1000_hw *hw = &adapter->hw;
6867
6868 /* HW expects these in little endian so we reverse the byte order
6869 * from network order (big endian) to little endian
6870 */
6871 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6872 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6873 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6874
6875 /* Indicate to hardware the Address is Valid. */
6876 rar_high |= E1000_RAH_AV;
6877
6878 if (hw->mac.type == e1000_82575)
6879 rar_high |= E1000_RAH_POOL_1 * qsel;
6880 else
6881 rar_high |= E1000_RAH_POOL_1 << qsel;
6882
6883 wr32(E1000_RAL(index), rar_low);
6884 wrfl();
6885 wr32(E1000_RAH(index), rar_high);
6886 wrfl();
6887}
6888
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006889static int igb_set_vf_mac(struct igb_adapter *adapter,
6890 int vf, unsigned char *mac_addr)
6891{
6892 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006893 /* VF MAC addresses start at end of receive addresses and moves
6894 * torwards the first, as a result a collision should not be possible */
6895 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006896
Alexander Duyck37680112009-02-19 20:40:30 -08006897 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006898
Alexander Duyck26ad9172009-10-05 06:32:49 +00006899 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006900
6901 return 0;
6902}
6903
Williams, Mitch A8151d292010-02-10 01:44:24 +00006904static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6905{
6906 struct igb_adapter *adapter = netdev_priv(netdev);
6907 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6908 return -EINVAL;
6909 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6910 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6911 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6912 " change effective.");
6913 if (test_bit(__IGB_DOWN, &adapter->state)) {
6914 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6915 " but the PF device is not up.\n");
6916 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6917 " attempting to use the VF device.\n");
6918 }
6919 return igb_set_vf_mac(adapter, vf, mac);
6920}
6921
Lior Levy17dc5662011-02-08 02:28:46 +00006922static int igb_link_mbps(int internal_link_speed)
6923{
6924 switch (internal_link_speed) {
6925 case SPEED_100:
6926 return 100;
6927 case SPEED_1000:
6928 return 1000;
6929 default:
6930 return 0;
6931 }
6932}
6933
6934static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6935 int link_speed)
6936{
6937 int rf_dec, rf_int;
6938 u32 bcnrc_val;
6939
6940 if (tx_rate != 0) {
6941 /* Calculate the rate factor values to set */
6942 rf_int = link_speed / tx_rate;
6943 rf_dec = (link_speed - (rf_int * tx_rate));
6944 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6945
6946 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6947 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6948 E1000_RTTBCNRC_RF_INT_MASK);
6949 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6950 } else {
6951 bcnrc_val = 0;
6952 }
6953
6954 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6955 wr32(E1000_RTTBCNRC, bcnrc_val);
6956}
6957
6958static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6959{
6960 int actual_link_speed, i;
6961 bool reset_rate = false;
6962
6963 /* VF TX rate limit was not set or not supported */
6964 if ((adapter->vf_rate_link_speed == 0) ||
6965 (adapter->hw.mac.type != e1000_82576))
6966 return;
6967
6968 actual_link_speed = igb_link_mbps(adapter->link_speed);
6969 if (actual_link_speed != adapter->vf_rate_link_speed) {
6970 reset_rate = true;
6971 adapter->vf_rate_link_speed = 0;
6972 dev_info(&adapter->pdev->dev,
6973 "Link speed has been changed. VF Transmit "
6974 "rate is disabled\n");
6975 }
6976
6977 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6978 if (reset_rate)
6979 adapter->vf_data[i].tx_rate = 0;
6980
6981 igb_set_vf_rate_limit(&adapter->hw, i,
6982 adapter->vf_data[i].tx_rate,
6983 actual_link_speed);
6984 }
6985}
6986
Williams, Mitch A8151d292010-02-10 01:44:24 +00006987static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6988{
Lior Levy17dc5662011-02-08 02:28:46 +00006989 struct igb_adapter *adapter = netdev_priv(netdev);
6990 struct e1000_hw *hw = &adapter->hw;
6991 int actual_link_speed;
6992
6993 if (hw->mac.type != e1000_82576)
6994 return -EOPNOTSUPP;
6995
6996 actual_link_speed = igb_link_mbps(adapter->link_speed);
6997 if ((vf >= adapter->vfs_allocated_count) ||
6998 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6999 (tx_rate < 0) || (tx_rate > actual_link_speed))
7000 return -EINVAL;
7001
7002 adapter->vf_rate_link_speed = actual_link_speed;
7003 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7004 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7005
7006 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007007}
7008
7009static int igb_ndo_get_vf_config(struct net_device *netdev,
7010 int vf, struct ifla_vf_info *ivi)
7011{
7012 struct igb_adapter *adapter = netdev_priv(netdev);
7013 if (vf >= adapter->vfs_allocated_count)
7014 return -EINVAL;
7015 ivi->vf = vf;
7016 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007017 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007018 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7019 ivi->qos = adapter->vf_data[vf].pf_qos;
7020 return 0;
7021}
7022
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007023static void igb_vmm_control(struct igb_adapter *adapter)
7024{
7025 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007026 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007027
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007028 switch (hw->mac.type) {
7029 case e1000_82575:
7030 default:
7031 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007032 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007033 case e1000_82576:
7034 /* notify HW that the MAC is adding vlan tags */
7035 reg = rd32(E1000_DTXCTL);
7036 reg |= E1000_DTXCTL_VLAN_ADDED;
7037 wr32(E1000_DTXCTL, reg);
7038 case e1000_82580:
7039 /* enable replication vlan tag stripping */
7040 reg = rd32(E1000_RPLOLR);
7041 reg |= E1000_RPLOLR_STRVLAN;
7042 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007043 case e1000_i350:
7044 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007045 break;
7046 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007047
Alexander Duyckd4960302009-10-27 15:53:45 +00007048 if (adapter->vfs_allocated_count) {
7049 igb_vmdq_set_loopback_pf(hw, true);
7050 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007051 igb_vmdq_set_anti_spoofing_pf(hw, true,
7052 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007053 } else {
7054 igb_vmdq_set_loopback_pf(hw, false);
7055 igb_vmdq_set_replication_pf(hw, false);
7056 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007057}
7058
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007059static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7060{
7061 struct e1000_hw *hw = &adapter->hw;
7062 u32 dmac_thr;
7063 u16 hwm;
7064
7065 if (hw->mac.type > e1000_82580) {
7066 if (adapter->flags & IGB_FLAG_DMAC) {
7067 u32 reg;
7068
7069 /* force threshold to 0. */
7070 wr32(E1000_DMCTXTH, 0);
7071
7072 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007073 * DMA Coalescing high water mark needs to be greater
7074 * than the Rx threshold. Set hwm to PBA - max frame
7075 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007076 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007077 hwm = 64 * pba - adapter->max_frame_size / 16;
7078 if (hwm < 64 * (pba - 6))
7079 hwm = 64 * (pba - 6);
7080 reg = rd32(E1000_FCRTC);
7081 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7082 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7083 & E1000_FCRTC_RTH_COAL_MASK);
7084 wr32(E1000_FCRTC, reg);
7085
7086 /*
7087 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7088 * frame size, capping it at PBA - 10KB.
7089 */
7090 dmac_thr = pba - adapter->max_frame_size / 512;
7091 if (dmac_thr < pba - 10)
7092 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007093 reg = rd32(E1000_DMACR);
7094 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007095 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7096 & E1000_DMACR_DMACTHR_MASK);
7097
7098 /* transition to L0x or L1 if available..*/
7099 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7100
7101 /* watchdog timer= +-1000 usec in 32usec intervals */
7102 reg |= (1000 >> 5);
7103 wr32(E1000_DMACR, reg);
7104
7105 /*
7106 * no lower threshold to disable
7107 * coalescing(smart fifb)-UTRESH=0
7108 */
7109 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007110
7111 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7112
7113 wr32(E1000_DMCTLX, reg);
7114
7115 /*
7116 * free space in tx packet buffer to wake from
7117 * DMA coal
7118 */
7119 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7120 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7121
7122 /*
7123 * make low power state decision controlled
7124 * by DMA coal
7125 */
7126 reg = rd32(E1000_PCIEMISC);
7127 reg &= ~E1000_PCIEMISC_LX_DECISION;
7128 wr32(E1000_PCIEMISC, reg);
7129 } /* endif adapter->dmac is not disabled */
7130 } else if (hw->mac.type == e1000_82580) {
7131 u32 reg = rd32(E1000_PCIEMISC);
7132 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7133 wr32(E1000_DMACR, 0);
7134 }
7135}
7136
Auke Kok9d5c8242008-01-24 02:22:38 -08007137/* igb_main.c */