blob: 99364edf0314a5c17ff0d310f4c9b0074fa26b13 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny6e861322012-01-18 22:13:27 +00004 Copyright(c) 2007-2012 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Yan, Zheng749ab2c2012-01-04 20:23:37 +000056#include <linux/pm_runtime.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070057#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070058#include <linux/dca.h>
59#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080060#include "igb.h"
61
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000063#define MIN 2
64#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080065#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000066__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080067char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny6e861322012-01-18 22:13:27 +000071static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080072
Auke Kok9d5c8242008-01-24 02:22:38 -080073static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000077static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -0800100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
103 /* required last entry */
104 {0, }
105};
106
107MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
108
109void igb_reset(struct igb_adapter *);
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000114static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000117static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800118static int igb_sw_init(struct igb_adapter *);
119static int igb_open(struct net_device *);
120static int igb_close(struct net_device *);
121static void igb_configure_tx(struct igb_adapter *);
122static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800123static void igb_clean_all_tx_rings(struct igb_adapter *);
124static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700125static void igb_clean_tx_ring(struct igb_ring *);
126static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000127static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static void igb_update_phy_info(unsigned long);
129static void igb_watchdog(unsigned long);
130static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000131static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000132static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static int igb_change_mtu(struct net_device *, int);
135static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000136static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800137static irqreturn_t igb_intr(int irq, void *);
138static irqreturn_t igb_intr_msi(int irq, void *);
139static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000140static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000142static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700143static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700144#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700145static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000146static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000147static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800148static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149static void igb_tx_timeout(struct net_device *);
150static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000151static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500152static int igb_vlan_rx_add_vid(struct net_device *, u16);
153static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800154static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000155static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800156static void igb_ping_all_vfs(struct igb_adapter *);
157static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800158static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000159static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800160static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000161static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
162static int igb_ndo_set_vf_vlan(struct net_device *netdev,
163 int vf, u16 vlan, u8 qos);
164static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
165static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
166 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000167static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000168
169#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000170static int igb_vf_configure(struct igb_adapter *adapter, int vf);
171static int igb_find_enabled_vfs(struct igb_adapter *adapter);
172static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000173#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800174
Auke Kok9d5c8242008-01-24 02:22:38 -0800175#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000176#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000177static int igb_suspend(struct device *);
Emil Tantilovd9dd9662012-01-28 08:10:35 +0000178#endif
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000179static int igb_resume(struct device *);
180#ifdef CONFIG_PM_RUNTIME
181static int igb_runtime_suspend(struct device *dev);
182static int igb_runtime_resume(struct device *dev);
183static int igb_runtime_idle(struct device *dev);
184#endif
185static const struct dev_pm_ops igb_pm_ops = {
186 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
187 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
188 igb_runtime_idle)
189};
Auke Kok9d5c8242008-01-24 02:22:38 -0800190#endif
191static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700192#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700193static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
194static struct notifier_block dca_notifier = {
195 .notifier_call = igb_notify_dca,
196 .next = NULL,
197 .priority = 0
198};
199#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800200#ifdef CONFIG_NET_POLL_CONTROLLER
201/* for netdump / net console */
202static void igb_netpoll(struct net_device *);
203#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800204#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000205static unsigned int max_vfs = 0;
206module_param(max_vfs, uint, 0);
207MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
208 "per physical function");
209#endif /* CONFIG_PCI_IOV */
210
Auke Kok9d5c8242008-01-24 02:22:38 -0800211static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
212 pci_channel_state_t);
213static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
214static void igb_io_resume(struct pci_dev *);
215
216static struct pci_error_handlers igb_err_handler = {
217 .error_detected = igb_io_error_detected,
218 .slot_reset = igb_io_slot_reset,
219 .resume = igb_io_resume,
220};
221
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000222static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800223
224static struct pci_driver igb_driver = {
225 .name = igb_driver_name,
226 .id_table = igb_pci_tbl,
227 .probe = igb_probe,
228 .remove = __devexit_p(igb_remove),
229#ifdef CONFIG_PM
Yan, Zheng749ab2c2012-01-04 20:23:37 +0000230 .driver.pm = &igb_pm_ops,
Auke Kok9d5c8242008-01-24 02:22:38 -0800231#endif
232 .shutdown = igb_shutdown,
233 .err_handler = &igb_err_handler
234};
235
236MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
237MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
238MODULE_LICENSE("GPL");
239MODULE_VERSION(DRV_VERSION);
240
Taku Izumic97ec422010-04-27 14:39:30 +0000241struct igb_reg_info {
242 u32 ofs;
243 char *name;
244};
245
246static const struct igb_reg_info igb_reg_info_tbl[] = {
247
248 /* General Registers */
249 {E1000_CTRL, "CTRL"},
250 {E1000_STATUS, "STATUS"},
251 {E1000_CTRL_EXT, "CTRL_EXT"},
252
253 /* Interrupt Registers */
254 {E1000_ICR, "ICR"},
255
256 /* RX Registers */
257 {E1000_RCTL, "RCTL"},
258 {E1000_RDLEN(0), "RDLEN"},
259 {E1000_RDH(0), "RDH"},
260 {E1000_RDT(0), "RDT"},
261 {E1000_RXDCTL(0), "RXDCTL"},
262 {E1000_RDBAL(0), "RDBAL"},
263 {E1000_RDBAH(0), "RDBAH"},
264
265 /* TX Registers */
266 {E1000_TCTL, "TCTL"},
267 {E1000_TDBAL(0), "TDBAL"},
268 {E1000_TDBAH(0), "TDBAH"},
269 {E1000_TDLEN(0), "TDLEN"},
270 {E1000_TDH(0), "TDH"},
271 {E1000_TDT(0), "TDT"},
272 {E1000_TXDCTL(0), "TXDCTL"},
273 {E1000_TDFH, "TDFH"},
274 {E1000_TDFT, "TDFT"},
275 {E1000_TDFHS, "TDFHS"},
276 {E1000_TDFPC, "TDFPC"},
277
278 /* List Terminator */
279 {}
280};
281
282/*
283 * igb_regdump - register printout routine
284 */
285static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
286{
287 int n = 0;
288 char rname[16];
289 u32 regs[8];
290
291 switch (reginfo->ofs) {
292 case E1000_RDLEN(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDLEN(n));
295 break;
296 case E1000_RDH(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDH(n));
299 break;
300 case E1000_RDT(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDT(n));
303 break;
304 case E1000_RXDCTL(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RXDCTL(n));
307 break;
308 case E1000_RDBAL(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDBAL(n));
311 break;
312 case E1000_RDBAH(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RDBAH(n));
315 break;
316 case E1000_TDBAL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAL(n));
319 break;
320 case E1000_TDBAH(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_TDBAH(n));
323 break;
324 case E1000_TDLEN(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_TDLEN(n));
327 break;
328 case E1000_TDH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDH(n));
331 break;
332 case E1000_TDT(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDT(n));
335 break;
336 case E1000_TXDCTL(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TXDCTL(n));
339 break;
340 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000341 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000342 return;
343 }
344
345 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000346 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
347 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000348}
349
350/*
351 * igb_dump - Print registers, tx-rings and rx-rings
352 */
353static void igb_dump(struct igb_adapter *adapter)
354{
355 struct net_device *netdev = adapter->netdev;
356 struct e1000_hw *hw = &adapter->hw;
357 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000358 struct igb_ring *tx_ring;
359 union e1000_adv_tx_desc *tx_desc;
360 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000361 struct igb_ring *rx_ring;
362 union e1000_adv_rx_desc *rx_desc;
363 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000364 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000365
366 if (!netif_msg_hw(adapter))
367 return;
368
369 /* Print netdevice Info */
370 if (netdev) {
371 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000372 pr_info("Device Name state trans_start "
373 "last_rx\n");
374 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
375 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000376 }
377
378 /* Print Registers */
379 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000380 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000381 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
382 reginfo->name; reginfo++) {
383 igb_regdump(hw, reginfo);
384 }
385
386 /* Print TX Ring Summary */
387 if (!netdev || !netif_running(netdev))
388 goto exit;
389
390 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000391 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000392 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000393 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000394 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000395 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000396 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
397 n, tx_ring->next_to_use, tx_ring->next_to_clean,
398 (u64)buffer_info->dma,
399 buffer_info->length,
400 buffer_info->next_to_watch,
401 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000402 }
403
404 /* Print TX Rings */
405 if (!netif_msg_tx_done(adapter))
406 goto rx_ring_summary;
407
408 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
409
410 /* Transmit Descriptor Formats
411 *
412 * Advanced Transmit Descriptor
413 * +--------------------------------------------------------------+
414 * 0 | Buffer Address [63:0] |
415 * +--------------------------------------------------------------+
416 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
417 * +--------------------------------------------------------------+
418 * 63 46 45 40 39 38 36 35 32 31 24 15 0
419 */
420
421 for (n = 0; n < adapter->num_tx_queues; n++) {
422 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000423 pr_info("------------------------------------\n");
424 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
425 pr_info("------------------------------------\n");
426 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
427 "[bi->dma ] leng ntw timestamp "
428 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000429
430 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000431 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000432 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000433 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000434 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000435 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000436 if (i == tx_ring->next_to_use &&
437 i == tx_ring->next_to_clean)
438 next_desc = " NTC/U";
439 else if (i == tx_ring->next_to_use)
440 next_desc = " NTU";
441 else if (i == tx_ring->next_to_clean)
442 next_desc = " NTC";
443 else
444 next_desc = "";
445
446 pr_info("T [0x%03X] %016llX %016llX %016llX"
447 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000448 le64_to_cpu(u0->a),
449 le64_to_cpu(u0->b),
450 (u64)buffer_info->dma,
451 buffer_info->length,
452 buffer_info->next_to_watch,
453 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000454 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000455
456 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
457 print_hex_dump(KERN_INFO, "",
458 DUMP_PREFIX_ADDRESS,
459 16, 1, phys_to_virt(buffer_info->dma),
460 buffer_info->length, true);
461 }
462 }
463
464 /* Print RX Rings Summary */
465rx_ring_summary:
466 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000467 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000468 for (n = 0; n < adapter->num_rx_queues; n++) {
469 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000470 pr_info(" %5d %5X %5X\n",
471 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000472 }
473
474 /* Print RX Rings */
475 if (!netif_msg_rx_status(adapter))
476 goto exit;
477
478 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
479
480 /* Advanced Receive Descriptor (Read) Format
481 * 63 1 0
482 * +-----------------------------------------------------+
483 * 0 | Packet Buffer Address [63:1] |A0/NSE|
484 * +----------------------------------------------+------+
485 * 8 | Header Buffer Address [63:1] | DD |
486 * +-----------------------------------------------------+
487 *
488 *
489 * Advanced Receive Descriptor (Write-Back) Format
490 *
491 * 63 48 47 32 31 30 21 20 17 16 4 3 0
492 * +------------------------------------------------------+
493 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
494 * | Checksum Ident | | | | Type | Type |
495 * +------------------------------------------------------+
496 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
497 * +------------------------------------------------------+
498 * 63 48 47 32 31 20 19 0
499 */
500
501 for (n = 0; n < adapter->num_rx_queues; n++) {
502 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000503 pr_info("------------------------------------\n");
504 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
505 pr_info("------------------------------------\n");
506 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
507 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
508 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
509 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000510
511 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000512 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000513 struct igb_rx_buffer *buffer_info;
514 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000515 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000516 u0 = (struct my_u0 *)rx_desc;
517 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000518
519 if (i == rx_ring->next_to_use)
520 next_desc = " NTU";
521 else if (i == rx_ring->next_to_clean)
522 next_desc = " NTC";
523 else
524 next_desc = "";
525
Taku Izumic97ec422010-04-27 14:39:30 +0000526 if (staterr & E1000_RXD_STAT_DD) {
527 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000528 pr_info("%s[0x%03X] %016llX %016llX -------"
529 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000532 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000533 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000534 pr_info("%s[0x%03X] %016llX %016llX %016llX"
535 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000536 le64_to_cpu(u0->a),
537 le64_to_cpu(u0->b),
538 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000539 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000540
541 if (netif_msg_pktdata(adapter)) {
542 print_hex_dump(KERN_INFO, "",
543 DUMP_PREFIX_ADDRESS,
544 16, 1,
545 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000546 IGB_RX_HDR_LEN, true);
547 print_hex_dump(KERN_INFO, "",
548 DUMP_PREFIX_ADDRESS,
549 16, 1,
550 phys_to_virt(
551 buffer_info->page_dma +
552 buffer_info->page_offset),
553 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000554 }
555 }
Taku Izumic97ec422010-04-27 14:39:30 +0000556 }
557 }
558
559exit:
560 return;
561}
562
563
Patrick Ohly38c845c2009-02-12 05:03:41 +0000564/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000565 * igb_read_clock - read raw cycle counter (to be used by time counter)
566 */
567static cycle_t igb_read_clock(const struct cyclecounter *tc)
568{
569 struct igb_adapter *adapter =
570 container_of(tc, struct igb_adapter, cycles);
571 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000572 u64 stamp = 0;
573 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000574
Alexander Duyck55cac242009-11-19 12:42:21 +0000575 /*
576 * The timestamp latches on lowest register read. For the 82580
577 * the lowest register is SYSTIMR instead of SYSTIML. However we never
578 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
579 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000580 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000581 stamp = rd32(E1000_SYSTIMR) >> 8;
582 shift = IGB_82580_TSYNC_SHIFT;
583 }
584
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000585 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
586 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000587 return stamp;
588}
589
Auke Kok9d5c8242008-01-24 02:22:38 -0800590/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000591 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800592 * used by hardware layer to print debugging information
593 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000594struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800595{
596 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000597 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800598}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000599
600/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800601 * igb_init_module - Driver Registration Routine
602 *
603 * igb_init_module is the first routine called when the driver is
604 * loaded. All it does is register with the PCI subsystem.
605 **/
606static int __init igb_init_module(void)
607{
608 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000609 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800610 igb_driver_string, igb_driver_version);
611
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000612 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800613
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700614#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700615 dca_register_notify(&dca_notifier);
616#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800617 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800618 return ret;
619}
620
621module_init(igb_init_module);
622
623/**
624 * igb_exit_module - Driver Exit Cleanup Routine
625 *
626 * igb_exit_module is called just before the driver is removed
627 * from memory.
628 **/
629static void __exit igb_exit_module(void)
630{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700631#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700632 dca_unregister_notify(&dca_notifier);
633#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800634 pci_unregister_driver(&igb_driver);
635}
636
637module_exit(igb_exit_module);
638
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800639#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
640/**
641 * igb_cache_ring_register - Descriptor ring to register mapping
642 * @adapter: board private structure to initialize
643 *
644 * Once we know the feature-set enabled for the device, we'll cache
645 * the register offset the descriptor ring is assigned to.
646 **/
647static void igb_cache_ring_register(struct igb_adapter *adapter)
648{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000649 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000650 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800651
652 switch (adapter->hw.mac.type) {
653 case e1000_82576:
654 /* The queues are allocated for virtualization such that VF 0
655 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
656 * In order to avoid collision we start at the first free queue
657 * and continue consuming queues in the same sequence
658 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000659 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000660 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000661 adapter->rx_ring[i]->reg_idx = rbase_offset +
662 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000663 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800664 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000665 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000666 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800667 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000668 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000669 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000670 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000671 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800672 break;
673 }
674}
675
Alexander Duyck047e0032009-10-27 15:49:27 +0000676static void igb_free_queues(struct igb_adapter *adapter)
677{
Alexander Duyck3025a442010-02-17 01:02:39 +0000678 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000679
Alexander Duyck3025a442010-02-17 01:02:39 +0000680 for (i = 0; i < adapter->num_tx_queues; i++) {
681 kfree(adapter->tx_ring[i]);
682 adapter->tx_ring[i] = NULL;
683 }
684 for (i = 0; i < adapter->num_rx_queues; i++) {
685 kfree(adapter->rx_ring[i]);
686 adapter->rx_ring[i] = NULL;
687 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000688 adapter->num_rx_queues = 0;
689 adapter->num_tx_queues = 0;
690}
691
Auke Kok9d5c8242008-01-24 02:22:38 -0800692/**
693 * igb_alloc_queues - Allocate memory for all rings
694 * @adapter: board private structure to initialize
695 *
696 * We allocate one ring per queue at run-time since we don't know the
697 * number of queues at compile-time.
698 **/
699static int igb_alloc_queues(struct igb_adapter *adapter)
700{
Alexander Duyck3025a442010-02-17 01:02:39 +0000701 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800702 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000703 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800704
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700705 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000706 if (orig_node == -1) {
707 int cur_node = next_online_node(adapter->node);
708 if (cur_node == MAX_NUMNODES)
709 cur_node = first_online_node;
710 adapter->node = cur_node;
711 }
712 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
713 adapter->node);
714 if (!ring)
715 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000716 if (!ring)
717 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800718 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700719 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000720 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000721 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000722 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000723 /* For 82575, context index must be unique per ring. */
724 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000725 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000726 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700727 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000728 /* Restore the adapter's original node */
729 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000730
Auke Kok9d5c8242008-01-24 02:22:38 -0800731 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000732 if (orig_node == -1) {
733 int cur_node = next_online_node(adapter->node);
734 if (cur_node == MAX_NUMNODES)
735 cur_node = first_online_node;
736 adapter->node = cur_node;
737 }
738 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
739 adapter->node);
740 if (!ring)
741 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000742 if (!ring)
743 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800744 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700745 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000746 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000747 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000748 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000749 /* set flag indicating ring supports SCTP checksum offload */
750 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000751 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000752
753 /* On i350, loopback VLAN packets have the tag byte-swapped. */
754 if (adapter->hw.mac.type == e1000_i350)
755 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
756
Alexander Duyck3025a442010-02-17 01:02:39 +0000757 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800758 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000759 /* Restore the adapter's original node */
760 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800761
762 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000763
Auke Kok9d5c8242008-01-24 02:22:38 -0800764 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800765
Alexander Duyck047e0032009-10-27 15:49:27 +0000766err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000767 /* Restore the adapter's original node */
768 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000769 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700770
Alexander Duyck047e0032009-10-27 15:49:27 +0000771 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700772}
773
Alexander Duyck4be000c2011-08-26 07:45:52 +0000774/**
775 * igb_write_ivar - configure ivar for given MSI-X vector
776 * @hw: pointer to the HW structure
777 * @msix_vector: vector number we are allocating to a given ring
778 * @index: row index of IVAR register to write within IVAR table
779 * @offset: column offset of in IVAR, should be multiple of 8
780 *
781 * This function is intended to handle the writing of the IVAR register
782 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
783 * each containing an cause allocation for an Rx and Tx ring, and a
784 * variable number of rows depending on the number of queues supported.
785 **/
786static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
787 int index, int offset)
788{
789 u32 ivar = array_rd32(E1000_IVAR0, index);
790
791 /* clear any bits that are currently set */
792 ivar &= ~((u32)0xFF << offset);
793
794 /* write vector and valid bit */
795 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
796
797 array_wr32(E1000_IVAR0, index, ivar);
798}
799
Auke Kok9d5c8242008-01-24 02:22:38 -0800800#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000801static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800802{
Alexander Duyck047e0032009-10-27 15:49:27 +0000803 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800804 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000805 int rx_queue = IGB_N0_QUEUE;
806 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000807 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000808
Alexander Duyck0ba82992011-08-26 07:45:47 +0000809 if (q_vector->rx.ring)
810 rx_queue = q_vector->rx.ring->reg_idx;
811 if (q_vector->tx.ring)
812 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700813
814 switch (hw->mac.type) {
815 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800816 /* The 82575 assigns vectors using a bitmask, which matches the
817 bitmask for the EICR/EIMS/EIMC registers. To assign one
818 or more queues to a vector, we write the appropriate bits
819 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000820 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800821 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000822 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800823 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000824 if (!adapter->msix_entries && msix_vector == 0)
825 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800826 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000827 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700828 break;
829 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000830 /*
831 * 82576 uses a table that essentially consists of 2 columns
832 * with 8 rows. The ordering is column-major so we use the
833 * lower 3 bits as the row index, and the 4th bit as the
834 * column offset.
835 */
836 if (rx_queue > IGB_N0_QUEUE)
837 igb_write_ivar(hw, msix_vector,
838 rx_queue & 0x7,
839 (rx_queue & 0x8) << 1);
840 if (tx_queue > IGB_N0_QUEUE)
841 igb_write_ivar(hw, msix_vector,
842 tx_queue & 0x7,
843 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000844 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700845 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000846 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000847 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000848 /*
849 * On 82580 and newer adapters the scheme is similar to 82576
850 * however instead of ordering column-major we have things
851 * ordered row-major. So we traverse the table by using
852 * bit 0 as the column offset, and the remaining bits as the
853 * row index.
854 */
855 if (rx_queue > IGB_N0_QUEUE)
856 igb_write_ivar(hw, msix_vector,
857 rx_queue >> 1,
858 (rx_queue & 0x1) << 4);
859 if (tx_queue > IGB_N0_QUEUE)
860 igb_write_ivar(hw, msix_vector,
861 tx_queue >> 1,
862 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000863 q_vector->eims_value = 1 << msix_vector;
864 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700865 default:
866 BUG();
867 break;
868 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000869
870 /* add q_vector eims value to global eims_enable_mask */
871 adapter->eims_enable_mask |= q_vector->eims_value;
872
873 /* configure q_vector to set itr on first interrupt */
874 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800875}
876
877/**
878 * igb_configure_msix - Configure MSI-X hardware
879 *
880 * igb_configure_msix sets up the hardware to properly
881 * generate MSI-X interrupts.
882 **/
883static void igb_configure_msix(struct igb_adapter *adapter)
884{
885 u32 tmp;
886 int i, vector = 0;
887 struct e1000_hw *hw = &adapter->hw;
888
889 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800890
891 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700892 switch (hw->mac.type) {
893 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800894 tmp = rd32(E1000_CTRL_EXT);
895 /* enable MSI-X PBA support*/
896 tmp |= E1000_CTRL_EXT_PBA_CLR;
897
898 /* Auto-Mask interrupts upon ICR read. */
899 tmp |= E1000_CTRL_EXT_EIAME;
900 tmp |= E1000_CTRL_EXT_IRCA;
901
902 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000903
904 /* enable msix_other interrupt */
905 array_wr32(E1000_MSIXBM(0), vector++,
906 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700907 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800908
Alexander Duyck2d064c02008-07-08 15:10:12 -0700909 break;
910
911 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000912 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000913 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000914 /* Turn on MSI-X capability first, or our settings
915 * won't stick. And it will take days to debug. */
916 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
917 E1000_GPIE_PBA | E1000_GPIE_EIAME |
918 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700919
Alexander Duyck047e0032009-10-27 15:49:27 +0000920 /* enable msix_other interrupt */
921 adapter->eims_other = 1 << vector;
922 tmp = (vector++ | E1000_IVAR_VALID) << 8;
923
924 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700925 break;
926 default:
927 /* do nothing, since nothing else supports MSI-X */
928 break;
929 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000930
931 adapter->eims_enable_mask |= adapter->eims_other;
932
Alexander Duyck26b39272010-02-17 01:00:41 +0000933 for (i = 0; i < adapter->num_q_vectors; i++)
934 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000935
Auke Kok9d5c8242008-01-24 02:22:38 -0800936 wrfl();
937}
938
939/**
940 * igb_request_msix - Initialize MSI-X interrupts
941 *
942 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
943 * kernel.
944 **/
945static int igb_request_msix(struct igb_adapter *adapter)
946{
947 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800949 int i, err = 0, vector = 0;
950
Auke Kok9d5c8242008-01-24 02:22:38 -0800951 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800952 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800953 if (err)
954 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 vector++;
956
957 for (i = 0; i < adapter->num_q_vectors; i++) {
958 struct igb_q_vector *q_vector = adapter->q_vector[i];
959
960 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
961
Alexander Duyck0ba82992011-08-26 07:45:47 +0000962 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000963 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000964 q_vector->rx.ring->queue_index);
965 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000966 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000967 q_vector->tx.ring->queue_index);
968 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000969 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000970 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000971 else
972 sprintf(q_vector->name, "%s-unused", netdev->name);
973
974 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800975 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000976 q_vector);
977 if (err)
978 goto out;
979 vector++;
980 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800981
Auke Kok9d5c8242008-01-24 02:22:38 -0800982 igb_configure_msix(adapter);
983 return 0;
984out:
985 return err;
986}
987
988static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
989{
990 if (adapter->msix_entries) {
991 pci_disable_msix(adapter->pdev);
992 kfree(adapter->msix_entries);
993 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000994 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800995 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000996 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800997}
998
Alexander Duyck047e0032009-10-27 15:49:27 +0000999/**
1000 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1001 * @adapter: board private structure to initialize
1002 *
1003 * This function frees the memory allocated to the q_vectors. In addition if
1004 * NAPI is enabled it will delete any references to the NAPI struct prior
1005 * to freeing the q_vector.
1006 **/
1007static void igb_free_q_vectors(struct igb_adapter *adapter)
1008{
1009 int v_idx;
1010
1011 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1012 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1013 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001014 if (!q_vector)
1015 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001016 netif_napi_del(&q_vector->napi);
1017 kfree(q_vector);
1018 }
1019 adapter->num_q_vectors = 0;
1020}
1021
1022/**
1023 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1024 *
1025 * This function resets the device so that it has 0 rx queues, tx queues, and
1026 * MSI-X interrupts allocated.
1027 */
1028static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1029{
1030 igb_free_queues(adapter);
1031 igb_free_q_vectors(adapter);
1032 igb_reset_interrupt_capability(adapter);
1033}
Auke Kok9d5c8242008-01-24 02:22:38 -08001034
1035/**
1036 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1037 *
1038 * Attempt to configure interrupts using the best available
1039 * capabilities of the hardware and kernel.
1040 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001041static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001042{
1043 int err;
1044 int numvecs, i;
1045
Alexander Duyck83b71802009-02-06 23:15:45 +00001046 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001047 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001048 if (adapter->vfs_allocated_count)
1049 adapter->num_tx_queues = 1;
1050 else
1051 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001052
Alexander Duyck047e0032009-10-27 15:49:27 +00001053 /* start with one vector for every rx queue */
1054 numvecs = adapter->num_rx_queues;
1055
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001056 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001057 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1058 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001059
1060 /* store the number of vectors reserved for queues */
1061 adapter->num_q_vectors = numvecs;
1062
1063 /* add 1 vector for link status interrupts */
1064 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001065 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1066 GFP_KERNEL);
1067 if (!adapter->msix_entries)
1068 goto msi_only;
1069
1070 for (i = 0; i < numvecs; i++)
1071 adapter->msix_entries[i].entry = i;
1072
1073 err = pci_enable_msix(adapter->pdev,
1074 adapter->msix_entries,
1075 numvecs);
1076 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001077 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001078
1079 igb_reset_interrupt_capability(adapter);
1080
1081 /* If we can't do MSI-X, try MSI */
1082msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001083#ifdef CONFIG_PCI_IOV
1084 /* disable SR-IOV for non MSI-X configurations */
1085 if (adapter->vf_data) {
1086 struct e1000_hw *hw = &adapter->hw;
1087 /* disable iov and allow time for transactions to clear */
1088 pci_disable_sriov(adapter->pdev);
1089 msleep(500);
1090
1091 kfree(adapter->vf_data);
1092 adapter->vf_data = NULL;
1093 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001094 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001095 msleep(100);
1096 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1097 }
1098#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001099 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001100 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001101 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001102 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001103 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001104 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001105 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001106 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001107out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001108 /* Notify the stack of the (possibly) reduced queue counts. */
1109 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1110 return netif_set_real_num_rx_queues(adapter->netdev,
1111 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001112}
1113
1114/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001115 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1116 * @adapter: board private structure to initialize
1117 *
1118 * We allocate one q_vector per queue interrupt. If allocation fails we
1119 * return -ENOMEM.
1120 **/
1121static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1122{
1123 struct igb_q_vector *q_vector;
1124 struct e1000_hw *hw = &adapter->hw;
1125 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001126 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001127
1128 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001129 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1130 adapter->num_tx_queues)) &&
1131 (adapter->num_rx_queues == v_idx))
1132 adapter->node = orig_node;
1133 if (orig_node == -1) {
1134 int cur_node = next_online_node(adapter->node);
1135 if (cur_node == MAX_NUMNODES)
1136 cur_node = first_online_node;
1137 adapter->node = cur_node;
1138 }
1139 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1140 adapter->node);
1141 if (!q_vector)
1142 q_vector = kzalloc(sizeof(struct igb_q_vector),
1143 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001144 if (!q_vector)
1145 goto err_out;
1146 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001147 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1148 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001149 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1150 adapter->q_vector[v_idx] = q_vector;
1151 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001152 /* Restore the adapter's original node */
1153 adapter->node = orig_node;
1154
Alexander Duyck047e0032009-10-27 15:49:27 +00001155 return 0;
1156
1157err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001158 /* Restore the adapter's original node */
1159 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001160 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001161 return -ENOMEM;
1162}
1163
1164static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1165 int ring_idx, int v_idx)
1166{
Alexander Duyck3025a442010-02-17 01:02:39 +00001167 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001168
Alexander Duyck0ba82992011-08-26 07:45:47 +00001169 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1170 q_vector->rx.ring->q_vector = q_vector;
1171 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001172 q_vector->itr_val = adapter->rx_itr_setting;
1173 if (q_vector->itr_val && q_vector->itr_val <= 3)
1174 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001175}
1176
1177static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1178 int ring_idx, int v_idx)
1179{
Alexander Duyck3025a442010-02-17 01:02:39 +00001180 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001181
Alexander Duyck0ba82992011-08-26 07:45:47 +00001182 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1183 q_vector->tx.ring->q_vector = q_vector;
1184 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001185 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001186 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001187 if (q_vector->itr_val && q_vector->itr_val <= 3)
1188 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001189}
1190
1191/**
1192 * igb_map_ring_to_vector - maps allocated queues to vectors
1193 *
1194 * This function maps the recently allocated queues to vectors.
1195 **/
1196static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1197{
1198 int i;
1199 int v_idx = 0;
1200
1201 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1202 (adapter->num_q_vectors < adapter->num_tx_queues))
1203 return -ENOMEM;
1204
1205 if (adapter->num_q_vectors >=
1206 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1207 for (i = 0; i < adapter->num_rx_queues; i++)
1208 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1209 for (i = 0; i < adapter->num_tx_queues; i++)
1210 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1211 } else {
1212 for (i = 0; i < adapter->num_rx_queues; i++) {
1213 if (i < adapter->num_tx_queues)
1214 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1215 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1216 }
1217 for (; i < adapter->num_tx_queues; i++)
1218 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1219 }
1220 return 0;
1221}
1222
1223/**
1224 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1225 *
1226 * This function initializes the interrupts and allocates all of the queues.
1227 **/
1228static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1229{
1230 struct pci_dev *pdev = adapter->pdev;
1231 int err;
1232
Ben Hutchings21adef32010-09-27 08:28:39 +00001233 err = igb_set_interrupt_capability(adapter);
1234 if (err)
1235 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001236
1237 err = igb_alloc_q_vectors(adapter);
1238 if (err) {
1239 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1240 goto err_alloc_q_vectors;
1241 }
1242
1243 err = igb_alloc_queues(adapter);
1244 if (err) {
1245 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1246 goto err_alloc_queues;
1247 }
1248
1249 err = igb_map_ring_to_vector(adapter);
1250 if (err) {
1251 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1252 goto err_map_queues;
1253 }
1254
1255
1256 return 0;
1257err_map_queues:
1258 igb_free_queues(adapter);
1259err_alloc_queues:
1260 igb_free_q_vectors(adapter);
1261err_alloc_q_vectors:
1262 igb_reset_interrupt_capability(adapter);
1263 return err;
1264}
1265
1266/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001267 * igb_request_irq - initialize interrupts
1268 *
1269 * Attempts to configure interrupts using the best available
1270 * capabilities of the hardware and kernel.
1271 **/
1272static int igb_request_irq(struct igb_adapter *adapter)
1273{
1274 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001275 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001276 int err = 0;
1277
1278 if (adapter->msix_entries) {
1279 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001280 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001281 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001282 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001283 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001284 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001285 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001286 igb_free_all_tx_resources(adapter);
1287 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001288 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001289 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001290 adapter->num_q_vectors = 1;
1291 err = igb_alloc_q_vectors(adapter);
1292 if (err) {
1293 dev_err(&pdev->dev,
1294 "Unable to allocate memory for vectors\n");
1295 goto request_done;
1296 }
1297 err = igb_alloc_queues(adapter);
1298 if (err) {
1299 dev_err(&pdev->dev,
1300 "Unable to allocate memory for queues\n");
1301 igb_free_q_vectors(adapter);
1302 goto request_done;
1303 }
1304 igb_setup_all_tx_resources(adapter);
1305 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001306 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001307
Alexander Duyckc74d5882011-08-26 07:46:45 +00001308 igb_assign_vector(adapter->q_vector[0], 0);
1309
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001310 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001311 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001312 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001313 if (!err)
1314 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001315
Auke Kok9d5c8242008-01-24 02:22:38 -08001316 /* fall back to legacy interrupts */
1317 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001318 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001319 }
1320
Alexander Duyckc74d5882011-08-26 07:46:45 +00001321 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001322 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001323
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001324 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001325 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001326 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
1328request_done:
1329 return err;
1330}
1331
1332static void igb_free_irq(struct igb_adapter *adapter)
1333{
Auke Kok9d5c8242008-01-24 02:22:38 -08001334 if (adapter->msix_entries) {
1335 int vector = 0, i;
1336
Alexander Duyck047e0032009-10-27 15:49:27 +00001337 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001338
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001339 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001340 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001341 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001342 } else {
1343 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001344 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001345}
1346
1347/**
1348 * igb_irq_disable - Mask off interrupt generation on the NIC
1349 * @adapter: board private structure
1350 **/
1351static void igb_irq_disable(struct igb_adapter *adapter)
1352{
1353 struct e1000_hw *hw = &adapter->hw;
1354
Alexander Duyck25568a52009-10-27 23:49:59 +00001355 /*
1356 * we need to be careful when disabling interrupts. The VFs are also
1357 * mapped into these registers and so clearing the bits can cause
1358 * issues on the VF drivers so we only need to clear what we set
1359 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001360 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001361 u32 regval = rd32(E1000_EIAM);
1362 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1363 wr32(E1000_EIMC, adapter->eims_enable_mask);
1364 regval = rd32(E1000_EIAC);
1365 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001366 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001367
1368 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001369 wr32(E1000_IMC, ~0);
1370 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001371 if (adapter->msix_entries) {
1372 int i;
1373 for (i = 0; i < adapter->num_q_vectors; i++)
1374 synchronize_irq(adapter->msix_entries[i].vector);
1375 } else {
1376 synchronize_irq(adapter->pdev->irq);
1377 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001378}
1379
1380/**
1381 * igb_irq_enable - Enable default interrupt generation settings
1382 * @adapter: board private structure
1383 **/
1384static void igb_irq_enable(struct igb_adapter *adapter)
1385{
1386 struct e1000_hw *hw = &adapter->hw;
1387
1388 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001389 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001390 u32 regval = rd32(E1000_EIAC);
1391 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1392 regval = rd32(E1000_EIAM);
1393 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001394 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001395 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001396 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001397 ims |= E1000_IMS_VMMB;
1398 }
1399 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001400 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001401 wr32(E1000_IMS, IMS_ENABLE_MASK |
1402 E1000_IMS_DRSTA);
1403 wr32(E1000_IAM, IMS_ENABLE_MASK |
1404 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001405 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001406}
1407
1408static void igb_update_mng_vlan(struct igb_adapter *adapter)
1409{
Alexander Duyck51466232009-10-27 23:47:35 +00001410 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001411 u16 vid = adapter->hw.mng_cookie.vlan_id;
1412 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001413
Alexander Duyck51466232009-10-27 23:47:35 +00001414 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1415 /* add VID to filter table */
1416 igb_vfta_set(hw, vid, true);
1417 adapter->mng_vlan_id = vid;
1418 } else {
1419 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1420 }
1421
1422 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1423 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001424 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001425 /* remove VID from filter table */
1426 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001427 }
1428}
1429
1430/**
1431 * igb_release_hw_control - release control of the h/w to f/w
1432 * @adapter: address of board private structure
1433 *
1434 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1435 * For ASF and Pass Through versions of f/w this means that the
1436 * driver is no longer loaded.
1437 *
1438 **/
1439static void igb_release_hw_control(struct igb_adapter *adapter)
1440{
1441 struct e1000_hw *hw = &adapter->hw;
1442 u32 ctrl_ext;
1443
1444 /* Let firmware take over control of h/w */
1445 ctrl_ext = rd32(E1000_CTRL_EXT);
1446 wr32(E1000_CTRL_EXT,
1447 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1448}
1449
Auke Kok9d5c8242008-01-24 02:22:38 -08001450/**
1451 * igb_get_hw_control - get control of the h/w from f/w
1452 * @adapter: address of board private structure
1453 *
1454 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1455 * For ASF and Pass Through versions of f/w this means that
1456 * the driver is loaded.
1457 *
1458 **/
1459static void igb_get_hw_control(struct igb_adapter *adapter)
1460{
1461 struct e1000_hw *hw = &adapter->hw;
1462 u32 ctrl_ext;
1463
1464 /* Let firmware know the driver has taken over */
1465 ctrl_ext = rd32(E1000_CTRL_EXT);
1466 wr32(E1000_CTRL_EXT,
1467 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1468}
1469
Auke Kok9d5c8242008-01-24 02:22:38 -08001470/**
1471 * igb_configure - configure the hardware for RX and TX
1472 * @adapter: private board structure
1473 **/
1474static void igb_configure(struct igb_adapter *adapter)
1475{
1476 struct net_device *netdev = adapter->netdev;
1477 int i;
1478
1479 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001480 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001481
1482 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001483
Alexander Duyck85b430b2009-10-27 15:50:29 +00001484 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001485 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001486 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001487
1488 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001489 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001490
1491 igb_rx_fifo_flush_82575(&adapter->hw);
1492
Alexander Duyckc493ea42009-03-20 00:16:50 +00001493 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001494 * at least 1 descriptor unused to make sure
1495 * next_to_use != next_to_clean */
1496 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001497 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001498 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001499 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001500}
1501
Nick Nunley88a268c2010-02-17 01:01:59 +00001502/**
1503 * igb_power_up_link - Power up the phy/serdes link
1504 * @adapter: address of board private structure
1505 **/
1506void igb_power_up_link(struct igb_adapter *adapter)
1507{
1508 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1509 igb_power_up_phy_copper(&adapter->hw);
1510 else
1511 igb_power_up_serdes_link_82575(&adapter->hw);
Koki Sanagia95a0742012-01-04 20:23:38 +00001512 igb_reset_phy(&adapter->hw);
Nick Nunley88a268c2010-02-17 01:01:59 +00001513}
1514
1515/**
1516 * igb_power_down_link - Power down the phy/serdes link
1517 * @adapter: address of board private structure
1518 */
1519static void igb_power_down_link(struct igb_adapter *adapter)
1520{
1521 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1522 igb_power_down_phy_copper_82575(&adapter->hw);
1523 else
1524 igb_shutdown_serdes_link_82575(&adapter->hw);
1525}
Auke Kok9d5c8242008-01-24 02:22:38 -08001526
1527/**
1528 * igb_up - Open the interface and prepare it to handle traffic
1529 * @adapter: board private structure
1530 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001531int igb_up(struct igb_adapter *adapter)
1532{
1533 struct e1000_hw *hw = &adapter->hw;
1534 int i;
1535
1536 /* hardware has been reset, we need to reload some things */
1537 igb_configure(adapter);
1538
1539 clear_bit(__IGB_DOWN, &adapter->state);
1540
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001541 for (i = 0; i < adapter->num_q_vectors; i++)
1542 napi_enable(&(adapter->q_vector[i]->napi));
1543
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001544 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001545 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001546 else
1547 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001548
1549 /* Clear any pending interrupts. */
1550 rd32(E1000_ICR);
1551 igb_irq_enable(adapter);
1552
Alexander Duyckd4960302009-10-27 15:53:45 +00001553 /* notify VFs that reset has been completed */
1554 if (adapter->vfs_allocated_count) {
1555 u32 reg_data = rd32(E1000_CTRL_EXT);
1556 reg_data |= E1000_CTRL_EXT_PFRSTD;
1557 wr32(E1000_CTRL_EXT, reg_data);
1558 }
1559
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001560 netif_tx_start_all_queues(adapter->netdev);
1561
Alexander Duyck25568a52009-10-27 23:49:59 +00001562 /* start the watchdog. */
1563 hw->mac.get_link_status = 1;
1564 schedule_work(&adapter->watchdog_task);
1565
Auke Kok9d5c8242008-01-24 02:22:38 -08001566 return 0;
1567}
1568
1569void igb_down(struct igb_adapter *adapter)
1570{
Auke Kok9d5c8242008-01-24 02:22:38 -08001571 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001572 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001573 u32 tctl, rctl;
1574 int i;
1575
1576 /* signal that we're down so the interrupt handler does not
1577 * reschedule our watchdog timer */
1578 set_bit(__IGB_DOWN, &adapter->state);
1579
1580 /* disable receives in the hardware */
1581 rctl = rd32(E1000_RCTL);
1582 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1583 /* flush and sleep below */
1584
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001585 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001586
1587 /* disable transmits in the hardware */
1588 tctl = rd32(E1000_TCTL);
1589 tctl &= ~E1000_TCTL_EN;
1590 wr32(E1000_TCTL, tctl);
1591 /* flush both disables and wait for them to finish */
1592 wrfl();
1593 msleep(10);
1594
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001595 for (i = 0; i < adapter->num_q_vectors; i++)
1596 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001597
Auke Kok9d5c8242008-01-24 02:22:38 -08001598 igb_irq_disable(adapter);
1599
1600 del_timer_sync(&adapter->watchdog_timer);
1601 del_timer_sync(&adapter->phy_info_timer);
1602
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001604
1605 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001606 spin_lock(&adapter->stats64_lock);
1607 igb_update_stats(adapter, &adapter->stats64);
1608 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001609
Auke Kok9d5c8242008-01-24 02:22:38 -08001610 adapter->link_speed = 0;
1611 adapter->link_duplex = 0;
1612
Jeff Kirsher30236822008-06-24 17:01:15 -07001613 if (!pci_channel_offline(adapter->pdev))
1614 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001615 igb_clean_all_tx_rings(adapter);
1616 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001617#ifdef CONFIG_IGB_DCA
1618
1619 /* since we reset the hardware DCA settings were cleared */
1620 igb_setup_dca(adapter);
1621#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001622}
1623
1624void igb_reinit_locked(struct igb_adapter *adapter)
1625{
1626 WARN_ON(in_interrupt());
1627 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1628 msleep(1);
1629 igb_down(adapter);
1630 igb_up(adapter);
1631 clear_bit(__IGB_RESETTING, &adapter->state);
1632}
1633
1634void igb_reset(struct igb_adapter *adapter)
1635{
Alexander Duyck090b1792009-10-27 23:51:55 +00001636 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001637 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001638 struct e1000_mac_info *mac = &hw->mac;
1639 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001640 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1641 u16 hwm;
1642
1643 /* Repartition Pba for greater than 9k mtu
1644 * To take effect CTRL.RST is required.
1645 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001646 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001647 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001648 case e1000_82580:
1649 pba = rd32(E1000_RXPBS);
1650 pba = igb_rxpbs_adjust_82580(pba);
1651 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001652 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001653 pba = rd32(E1000_RXPBS);
1654 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001655 break;
1656 case e1000_82575:
1657 default:
1658 pba = E1000_PBA_34K;
1659 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001660 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001661
Alexander Duyck2d064c02008-07-08 15:10:12 -07001662 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1663 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001664 /* adjust PBA for jumbo frames */
1665 wr32(E1000_PBA, pba);
1666
1667 /* To maintain wire speed transmits, the Tx FIFO should be
1668 * large enough to accommodate two full transmit packets,
1669 * rounded up to the next 1KB and expressed in KB. Likewise,
1670 * the Rx FIFO should be large enough to accommodate at least
1671 * one full receive packet and is similarly rounded up and
1672 * expressed in KB. */
1673 pba = rd32(E1000_PBA);
1674 /* upper 16 bits has Tx packet buffer allocation size in KB */
1675 tx_space = pba >> 16;
1676 /* lower 16 bits has Rx packet buffer allocation size in KB */
1677 pba &= 0xffff;
1678 /* the tx fifo also stores 16 bytes of information about the tx
1679 * but don't include ethernet FCS because hardware appends it */
1680 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001681 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001682 ETH_FCS_LEN) * 2;
1683 min_tx_space = ALIGN(min_tx_space, 1024);
1684 min_tx_space >>= 10;
1685 /* software strips receive CRC, so leave room for it */
1686 min_rx_space = adapter->max_frame_size;
1687 min_rx_space = ALIGN(min_rx_space, 1024);
1688 min_rx_space >>= 10;
1689
1690 /* If current Tx allocation is less than the min Tx FIFO size,
1691 * and the min Tx FIFO size is less than the current Rx FIFO
1692 * allocation, take space away from current Rx allocation */
1693 if (tx_space < min_tx_space &&
1694 ((min_tx_space - tx_space) < pba)) {
1695 pba = pba - (min_tx_space - tx_space);
1696
1697 /* if short on rx space, rx wins and must trump tx
1698 * adjustment */
1699 if (pba < min_rx_space)
1700 pba = min_rx_space;
1701 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001702 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001703 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001704
1705 /* flow control settings */
1706 /* The high water mark must be low enough to fit one full frame
1707 * (or the size used for early receive) above it in the Rx FIFO.
1708 * Set it to the lower of:
1709 * - 90% of the Rx FIFO size, or
1710 * - the full Rx FIFO size minus one full frame */
1711 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001712 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001713
Alexander Duyckd405ea32009-12-23 13:21:27 +00001714 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1715 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001716 fc->pause_time = 0xFFFF;
1717 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001718 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001719
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001720 /* disable receive for all VFs and wait one second */
1721 if (adapter->vfs_allocated_count) {
1722 int i;
1723 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001724 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001725
1726 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001727 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001728
1729 /* disable transmits and receives */
1730 wr32(E1000_VFRE, 0);
1731 wr32(E1000_VFTE, 0);
1732 }
1733
Auke Kok9d5c8242008-01-24 02:22:38 -08001734 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001735 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001736 wr32(E1000_WUC, 0);
1737
Alexander Duyck330a6d62009-10-27 23:51:35 +00001738 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001739 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001740
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001741 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001742 if (!netif_running(adapter->netdev))
1743 igb_power_down_link(adapter);
1744
Auke Kok9d5c8242008-01-24 02:22:38 -08001745 igb_update_mng_vlan(adapter);
1746
1747 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1748 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1749
Alexander Duyck330a6d62009-10-27 23:51:35 +00001750 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001751}
1752
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001753static netdev_features_t igb_fix_features(struct net_device *netdev,
1754 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001755{
1756 /*
1757 * Since there is no support for separate rx/tx vlan accel
1758 * enable/disable make sure tx flag is always in same state as rx.
1759 */
1760 if (features & NETIF_F_HW_VLAN_RX)
1761 features |= NETIF_F_HW_VLAN_TX;
1762 else
1763 features &= ~NETIF_F_HW_VLAN_TX;
1764
1765 return features;
1766}
1767
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001768static int igb_set_features(struct net_device *netdev,
1769 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001770{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001771 netdev_features_t changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001772
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001773 if (changed & NETIF_F_HW_VLAN_RX)
1774 igb_vlan_mode(netdev, features);
1775
Michał Mirosławac52caa2011-06-08 08:38:01 +00001776 return 0;
1777}
1778
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001779static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001780 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001781 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001782 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001783 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001784 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001785 .ndo_set_mac_address = igb_set_mac,
1786 .ndo_change_mtu = igb_change_mtu,
1787 .ndo_do_ioctl = igb_ioctl,
1788 .ndo_tx_timeout = igb_tx_timeout,
1789 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001790 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1791 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001792 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1793 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1794 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1795 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001796#ifdef CONFIG_NET_POLL_CONTROLLER
1797 .ndo_poll_controller = igb_netpoll,
1798#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001799 .ndo_fix_features = igb_fix_features,
1800 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001801};
1802
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001803/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001804 * igb_probe - Device Initialization Routine
1805 * @pdev: PCI device information struct
1806 * @ent: entry in igb_pci_tbl
1807 *
1808 * Returns 0 on success, negative on failure
1809 *
1810 * igb_probe initializes an adapter identified by a pci_dev structure.
1811 * The OS initialization, configuring of the adapter private structure,
1812 * and a hardware reset occur.
1813 **/
1814static int __devinit igb_probe(struct pci_dev *pdev,
1815 const struct pci_device_id *ent)
1816{
1817 struct net_device *netdev;
1818 struct igb_adapter *adapter;
1819 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001820 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001821 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001822 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001823 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1824 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001825 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001826 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001827 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001828
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001829 /* Catch broken hardware that put the wrong VF device ID in
1830 * the PCIe SR-IOV capability.
1831 */
1832 if (pdev->is_virtfn) {
1833 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1834 pci_name(pdev), pdev->vendor, pdev->device);
1835 return -EINVAL;
1836 }
1837
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001838 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001839 if (err)
1840 return err;
1841
1842 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001843 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001844 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001845 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001846 if (!err)
1847 pci_using_dac = 1;
1848 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001849 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001850 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001851 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001852 if (err) {
1853 dev_err(&pdev->dev, "No usable DMA "
1854 "configuration, aborting\n");
1855 goto err_dma;
1856 }
1857 }
1858 }
1859
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001860 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1861 IORESOURCE_MEM),
1862 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001863 if (err)
1864 goto err_pci_reg;
1865
Frans Pop19d5afd2009-10-02 10:04:12 -07001866 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001867
Auke Kok9d5c8242008-01-24 02:22:38 -08001868 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001869 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001870
1871 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001872 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001873 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001874 if (!netdev)
1875 goto err_alloc_etherdev;
1876
1877 SET_NETDEV_DEV(netdev, &pdev->dev);
1878
1879 pci_set_drvdata(pdev, netdev);
1880 adapter = netdev_priv(netdev);
1881 adapter->netdev = netdev;
1882 adapter->pdev = pdev;
1883 hw = &adapter->hw;
1884 hw->back = adapter;
1885 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1886
1887 mmio_start = pci_resource_start(pdev, 0);
1888 mmio_len = pci_resource_len(pdev, 0);
1889
1890 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001891 hw->hw_addr = ioremap(mmio_start, mmio_len);
1892 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001893 goto err_ioremap;
1894
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001895 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001896 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001897 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001898
1899 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1900
1901 netdev->mem_start = mmio_start;
1902 netdev->mem_end = mmio_start + mmio_len;
1903
Auke Kok9d5c8242008-01-24 02:22:38 -08001904 /* PCI config space info */
1905 hw->vendor_id = pdev->vendor;
1906 hw->device_id = pdev->device;
1907 hw->revision_id = pdev->revision;
1908 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1909 hw->subsystem_device_id = pdev->subsystem_device;
1910
Auke Kok9d5c8242008-01-24 02:22:38 -08001911 /* Copy the default MAC, PHY and NVM function pointers */
1912 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1913 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1914 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1915 /* Initialize skew-specific constants */
1916 err = ei->get_invariants(hw);
1917 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001918 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001919
Alexander Duyck450c87c2009-02-06 23:22:11 +00001920 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001921 err = igb_sw_init(adapter);
1922 if (err)
1923 goto err_sw_init;
1924
1925 igb_get_bus_info_pcie(hw);
1926
1927 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001928
1929 /* Copper options */
1930 if (hw->phy.media_type == e1000_media_type_copper) {
1931 hw->phy.mdix = AUTO_ALL_MODES;
1932 hw->phy.disable_polarity_correction = false;
1933 hw->phy.ms_type = e1000_ms_hw_default;
1934 }
1935
1936 if (igb_check_reset_block(hw))
1937 dev_info(&pdev->dev,
1938 "PHY reset is blocked due to SOL/IDER session.\n");
1939
Alexander Duyck077887c2011-08-26 07:46:29 +00001940 /*
1941 * features is initialized to 0 in allocation, it might have bits
1942 * set by igb_sw_init so we should use an or instead of an
1943 * assignment.
1944 */
1945 netdev->features |= NETIF_F_SG |
1946 NETIF_F_IP_CSUM |
1947 NETIF_F_IPV6_CSUM |
1948 NETIF_F_TSO |
1949 NETIF_F_TSO6 |
1950 NETIF_F_RXHASH |
1951 NETIF_F_RXCSUM |
1952 NETIF_F_HW_VLAN_RX |
1953 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001954
Alexander Duyck077887c2011-08-26 07:46:29 +00001955 /* copy netdev features into list of user selectable features */
1956 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001957
Alexander Duyck077887c2011-08-26 07:46:29 +00001958 /* set this bit last since it cannot be part of hw_features */
1959 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1960
1961 netdev->vlan_features |= NETIF_F_TSO |
1962 NETIF_F_TSO6 |
1963 NETIF_F_IP_CSUM |
1964 NETIF_F_IPV6_CSUM |
1965 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001966
Ben Greear6b8f0922012-03-06 09:41:53 +00001967 netdev->priv_flags |= IFF_SUPP_NOFCS;
1968
Yi Zou7b872a52010-09-22 17:57:58 +00001969 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001970 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001971 netdev->vlan_features |= NETIF_F_HIGHDMA;
1972 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001973
Michał Mirosławac52caa2011-06-08 08:38:01 +00001974 if (hw->mac.type >= e1000_82576) {
1975 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001976 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001977 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001978
Jiri Pirko01789342011-08-16 06:29:00 +00001979 netdev->priv_flags |= IFF_UNICAST_FLT;
1980
Alexander Duyck330a6d62009-10-27 23:51:35 +00001981 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001982
1983 /* before reading the NVM, reset the controller to put the device in a
1984 * known good starting state */
1985 hw->mac.ops.reset_hw(hw);
1986
1987 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001988 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001989 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1990 err = -EIO;
1991 goto err_eeprom;
1992 }
1993
1994 /* copy the MAC address out of the NVM */
1995 if (hw->mac.ops.read_mac_addr(hw))
1996 dev_err(&pdev->dev, "NVM Read Error\n");
1997
1998 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1999 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2000
2001 if (!is_valid_ether_addr(netdev->perm_addr)) {
2002 dev_err(&pdev->dev, "Invalid MAC Address\n");
2003 err = -EIO;
2004 goto err_eeprom;
2005 }
2006
Joe Perchesc061b182010-08-23 18:20:03 +00002007 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002008 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002009 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002010 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002011
2012 INIT_WORK(&adapter->reset_task, igb_reset_task);
2013 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2014
Alexander Duyck450c87c2009-02-06 23:22:11 +00002015 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002016 adapter->fc_autoneg = true;
2017 hw->mac.autoneg = true;
2018 hw->phy.autoneg_advertised = 0x2f;
2019
Alexander Duyck0cce1192009-07-23 18:10:24 +00002020 hw->fc.requested_mode = e1000_fc_default;
2021 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002022
Auke Kok9d5c8242008-01-24 02:22:38 -08002023 igb_validate_mdi_setting(hw);
2024
Auke Kok9d5c8242008-01-24 02:22:38 -08002025 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2026 * enable the ACPI Magic Packet filter
2027 */
2028
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002029 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002030 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002031 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002032 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2033 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2034 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002035 else if (hw->bus.func == 1)
2036 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002037
2038 if (eeprom_data & eeprom_apme_mask)
2039 adapter->eeprom_wol |= E1000_WUFC_MAG;
2040
2041 /* now that we have the eeprom settings, apply the special cases where
2042 * the eeprom may be wrong or the board simply won't support wake on
2043 * lan on a particular port */
2044 switch (pdev->device) {
2045 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2046 adapter->eeprom_wol = 0;
2047 break;
2048 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002049 case E1000_DEV_ID_82576_FIBER:
2050 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002051 /* Wake events only supported on port A for dual fiber
2052 * regardless of eeprom setting */
2053 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2054 adapter->eeprom_wol = 0;
2055 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002056 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002057 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002058 /* if quad port adapter, disable WoL on all but port A */
2059 if (global_quad_port_a != 0)
2060 adapter->eeprom_wol = 0;
2061 else
2062 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2063 /* Reset for multiple quad port adapters */
2064 if (++global_quad_port_a == 4)
2065 global_quad_port_a = 0;
2066 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002067 }
2068
2069 /* initialize the wol settings based on the eeprom settings */
2070 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002071 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002072
2073 /* reset the hardware with the new settings */
2074 igb_reset(adapter);
2075
2076 /* let the f/w know that the h/w is now under the control of the
2077 * driver. */
2078 igb_get_hw_control(adapter);
2079
Auke Kok9d5c8242008-01-24 02:22:38 -08002080 strcpy(netdev->name, "eth%d");
2081 err = register_netdev(netdev);
2082 if (err)
2083 goto err_register;
2084
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002085 /* carrier off reporting is important to ethtool even BEFORE open */
2086 netif_carrier_off(netdev);
2087
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002088#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002089 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002090 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002091 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002092 igb_setup_dca(adapter);
2093 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002094
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002095#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002096 /* do hw tstamp init after resetting */
2097 igb_init_hw_timer(adapter);
2098
Auke Kok9d5c8242008-01-24 02:22:38 -08002099 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2100 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002101 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002102 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002103 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002104 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002105 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002106 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2107 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2108 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2109 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002110 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002111
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002112 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2113 if (ret_val)
2114 strcpy(part_str, "Unknown");
2115 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002116 dev_info(&pdev->dev,
2117 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2118 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002119 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002120 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002121 switch (hw->mac.type) {
2122 case e1000_i350:
2123 igb_set_eee_i350(hw);
2124 break;
2125 default:
2126 break;
2127 }
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002128
2129 pm_runtime_put_noidle(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002130 return 0;
2131
2132err_register:
2133 igb_release_hw_control(adapter);
2134err_eeprom:
2135 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002136 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002137
2138 if (hw->flash_address)
2139 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002140err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002141 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002142 iounmap(hw->hw_addr);
2143err_ioremap:
2144 free_netdev(netdev);
2145err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002146 pci_release_selected_regions(pdev,
2147 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002148err_pci_reg:
2149err_dma:
2150 pci_disable_device(pdev);
2151 return err;
2152}
2153
2154/**
2155 * igb_remove - Device Removal Routine
2156 * @pdev: PCI device information struct
2157 *
2158 * igb_remove is called by the PCI subsystem to alert the driver
2159 * that it should release a PCI device. The could be caused by a
2160 * Hot-Plug event, or because the driver is going to be removed from
2161 * memory.
2162 **/
2163static void __devexit igb_remove(struct pci_dev *pdev)
2164{
2165 struct net_device *netdev = pci_get_drvdata(pdev);
2166 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002167 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002168
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002169 pm_runtime_get_noresume(&pdev->dev);
2170
Tejun Heo760141a2010-12-12 16:45:14 +01002171 /*
2172 * The watchdog timer may be rescheduled, so explicitly
2173 * disable watchdog from being rescheduled.
2174 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002175 set_bit(__IGB_DOWN, &adapter->state);
2176 del_timer_sync(&adapter->watchdog_timer);
2177 del_timer_sync(&adapter->phy_info_timer);
2178
Tejun Heo760141a2010-12-12 16:45:14 +01002179 cancel_work_sync(&adapter->reset_task);
2180 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002181
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002182#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002183 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002184 dev_info(&pdev->dev, "DCA disabled\n");
2185 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002186 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002187 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002188 }
2189#endif
2190
Auke Kok9d5c8242008-01-24 02:22:38 -08002191 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2192 * would have already happened in close and is redundant. */
2193 igb_release_hw_control(adapter);
2194
2195 unregister_netdev(netdev);
2196
Alexander Duyck047e0032009-10-27 15:49:27 +00002197 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002198
Alexander Duyck37680112009-02-19 20:40:30 -08002199#ifdef CONFIG_PCI_IOV
2200 /* reclaim resources allocated to VFs */
2201 if (adapter->vf_data) {
2202 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002203 if (!igb_check_vf_assignment(adapter)) {
2204 pci_disable_sriov(pdev);
2205 msleep(500);
2206 } else {
2207 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2208 }
Alexander Duyck37680112009-02-19 20:40:30 -08002209
2210 kfree(adapter->vf_data);
2211 adapter->vf_data = NULL;
2212 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002213 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002214 msleep(100);
2215 dev_info(&pdev->dev, "IOV Disabled\n");
2216 }
2217#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002218
Alexander Duyck28b07592009-02-06 23:20:31 +00002219 iounmap(hw->hw_addr);
2220 if (hw->flash_address)
2221 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002222 pci_release_selected_regions(pdev,
2223 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002224
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002225 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002226 free_netdev(netdev);
2227
Frans Pop19d5afd2009-10-02 10:04:12 -07002228 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002229
Auke Kok9d5c8242008-01-24 02:22:38 -08002230 pci_disable_device(pdev);
2231}
2232
2233/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002234 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2235 * @adapter: board private structure to initialize
2236 *
2237 * This function initializes the vf specific data storage and then attempts to
2238 * allocate the VFs. The reason for ordering it this way is because it is much
2239 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2240 * the memory for the VFs.
2241 **/
2242static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2243{
2244#ifdef CONFIG_PCI_IOV
2245 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002246 int old_vfs = igb_find_enabled_vfs(adapter);
2247 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002248
Greg Rose0224d662011-10-14 02:57:14 +00002249 if (old_vfs) {
2250 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2251 "max_vfs setting of %d\n", old_vfs, max_vfs);
2252 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002253 }
2254
Greg Rose0224d662011-10-14 02:57:14 +00002255 if (!adapter->vfs_allocated_count)
2256 return;
2257
2258 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2259 sizeof(struct vf_data_storage), GFP_KERNEL);
2260 /* if allocation failed then we do not support SR-IOV */
2261 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002262 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002263 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2264 "Data Storage\n");
2265 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002266 }
Greg Rose0224d662011-10-14 02:57:14 +00002267
2268 if (!old_vfs) {
2269 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2270 goto err_out;
2271 }
2272 dev_info(&pdev->dev, "%d VFs allocated\n",
2273 adapter->vfs_allocated_count);
2274 for (i = 0; i < adapter->vfs_allocated_count; i++)
2275 igb_vf_configure(adapter, i);
2276
2277 /* DMA Coalescing is not supported in IOV mode. */
2278 adapter->flags &= ~IGB_FLAG_DMAC;
2279 goto out;
2280err_out:
2281 kfree(adapter->vf_data);
2282 adapter->vf_data = NULL;
2283 adapter->vfs_allocated_count = 0;
2284out:
2285 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002286#endif /* CONFIG_PCI_IOV */
2287}
2288
Alexander Duyck115f4592009-11-12 18:37:00 +00002289/**
2290 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2291 * @adapter: board private structure to initialize
2292 *
2293 * igb_init_hw_timer initializes the function pointer and values for the hw
2294 * timer found in hardware.
2295 **/
2296static void igb_init_hw_timer(struct igb_adapter *adapter)
2297{
2298 struct e1000_hw *hw = &adapter->hw;
2299
2300 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002301 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002302 case e1000_82580:
2303 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2304 adapter->cycles.read = igb_read_clock;
2305 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2306 adapter->cycles.mult = 1;
2307 /*
2308 * The 82580 timesync updates the system timer every 8ns by 8ns
2309 * and the value cannot be shifted. Instead we need to shift
2310 * the registers to generate a 64bit timer value. As a result
2311 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2312 * 24 in order to generate a larger value for synchronization.
2313 */
2314 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2315 /* disable system timer temporarily by setting bit 31 */
2316 wr32(E1000_TSAUXC, 0x80000000);
2317 wrfl();
2318
2319 /* Set registers so that rollover occurs soon to test this. */
2320 wr32(E1000_SYSTIMR, 0x00000000);
2321 wr32(E1000_SYSTIML, 0x80000000);
2322 wr32(E1000_SYSTIMH, 0x000000FF);
2323 wrfl();
2324
2325 /* enable system timer by clearing bit 31 */
2326 wr32(E1000_TSAUXC, 0x0);
2327 wrfl();
2328
2329 timecounter_init(&adapter->clock,
2330 &adapter->cycles,
2331 ktime_to_ns(ktime_get_real()));
2332 /*
2333 * Synchronize our NIC clock against system wall clock. NIC
2334 * time stamp reading requires ~3us per sample, each sample
2335 * was pretty stable even under load => only require 10
2336 * samples for each offset comparison.
2337 */
2338 memset(&adapter->compare, 0, sizeof(adapter->compare));
2339 adapter->compare.source = &adapter->clock;
2340 adapter->compare.target = ktime_get_real;
2341 adapter->compare.num_samples = 10;
2342 timecompare_update(&adapter->compare, 0);
2343 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002344 case e1000_82576:
2345 /*
2346 * Initialize hardware timer: we keep it running just in case
2347 * that some program needs it later on.
2348 */
2349 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2350 adapter->cycles.read = igb_read_clock;
2351 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2352 adapter->cycles.mult = 1;
2353 /**
2354 * Scale the NIC clock cycle by a large factor so that
2355 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002356 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002357 * factor are a) that the clock register overflows more quickly
2358 * (not such a big deal) and b) that the increment per tick has
2359 * to fit into 24 bits. As a result we need to use a shift of
2360 * 19 so we can fit a value of 16 into the TIMINCA register.
2361 */
2362 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2363 wr32(E1000_TIMINCA,
2364 (1 << E1000_TIMINCA_16NS_SHIFT) |
2365 (16 << IGB_82576_TSYNC_SHIFT));
2366
2367 /* Set registers so that rollover occurs soon to test this. */
2368 wr32(E1000_SYSTIML, 0x00000000);
2369 wr32(E1000_SYSTIMH, 0xFF800000);
2370 wrfl();
2371
2372 timecounter_init(&adapter->clock,
2373 &adapter->cycles,
2374 ktime_to_ns(ktime_get_real()));
2375 /*
2376 * Synchronize our NIC clock against system wall clock. NIC
2377 * time stamp reading requires ~3us per sample, each sample
2378 * was pretty stable even under load => only require 10
2379 * samples for each offset comparison.
2380 */
2381 memset(&adapter->compare, 0, sizeof(adapter->compare));
2382 adapter->compare.source = &adapter->clock;
2383 adapter->compare.target = ktime_get_real;
2384 adapter->compare.num_samples = 10;
2385 timecompare_update(&adapter->compare, 0);
2386 break;
2387 case e1000_82575:
2388 /* 82575 does not support timesync */
2389 default:
2390 break;
2391 }
2392
2393}
2394
Alexander Duycka6b623e2009-10-27 23:47:53 +00002395/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002396 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2397 * @adapter: board private structure to initialize
2398 *
2399 * igb_sw_init initializes the Adapter private data structure.
2400 * Fields are initialized based on PCI device information and
2401 * OS network device settings (MTU size).
2402 **/
2403static int __devinit igb_sw_init(struct igb_adapter *adapter)
2404{
2405 struct e1000_hw *hw = &adapter->hw;
2406 struct net_device *netdev = adapter->netdev;
2407 struct pci_dev *pdev = adapter->pdev;
2408
2409 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2410
Alexander Duyck13fde972011-10-05 13:35:24 +00002411 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002412 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2413 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002414
2415 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002416 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2417 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2418
Alexander Duyck13fde972011-10-05 13:35:24 +00002419 /* set default work limits */
2420 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2421
Alexander Duyck153285f2011-08-26 07:43:32 +00002422 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2423 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002424 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2425
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002426 adapter->node = -1;
2427
Eric Dumazet12dcd862010-10-15 17:27:10 +00002428 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002429#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002430 switch (hw->mac.type) {
2431 case e1000_82576:
2432 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002433 if (max_vfs > 7) {
2434 dev_warn(&pdev->dev,
2435 "Maximum of 7 VFs per PF, using max\n");
2436 adapter->vfs_allocated_count = 7;
2437 } else
2438 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002439 break;
2440 default:
2441 break;
2442 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002443#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002444 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002445 /* i350 cannot do RSS and SR-IOV at the same time */
2446 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2447 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002448
2449 /*
2450 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2451 * then we should combine the queues into a queue pair in order to
2452 * conserve interrupts due to limited supply
2453 */
2454 if ((adapter->rss_queues > 4) ||
2455 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2456 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2457
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002458 /* Setup and initialize a copy of the hw vlan table array */
2459 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2460 E1000_VLAN_FILTER_TBL_SIZE,
2461 GFP_ATOMIC);
2462
Alexander Duycka6b623e2009-10-27 23:47:53 +00002463 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002464 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002465 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2466 return -ENOMEM;
2467 }
2468
Alexander Duycka6b623e2009-10-27 23:47:53 +00002469 igb_probe_vfs(adapter);
2470
Auke Kok9d5c8242008-01-24 02:22:38 -08002471 /* Explicitly disable IRQ since the NIC can be in any state. */
2472 igb_irq_disable(adapter);
2473
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002474 if (hw->mac.type == e1000_i350)
2475 adapter->flags &= ~IGB_FLAG_DMAC;
2476
Auke Kok9d5c8242008-01-24 02:22:38 -08002477 set_bit(__IGB_DOWN, &adapter->state);
2478 return 0;
2479}
2480
2481/**
2482 * igb_open - Called when a network interface is made active
2483 * @netdev: network interface device structure
2484 *
2485 * Returns 0 on success, negative value on failure
2486 *
2487 * The open entry point is called when a network interface is made
2488 * active by the system (IFF_UP). At this point all resources needed
2489 * for transmit and receive operations are allocated, the interrupt
2490 * handler is registered with the OS, the watchdog timer is started,
2491 * and the stack is notified that the interface is ready.
2492 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002493static int __igb_open(struct net_device *netdev, bool resuming)
Auke Kok9d5c8242008-01-24 02:22:38 -08002494{
2495 struct igb_adapter *adapter = netdev_priv(netdev);
2496 struct e1000_hw *hw = &adapter->hw;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002497 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002498 int err;
2499 int i;
2500
2501 /* disallow open during test */
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002502 if (test_bit(__IGB_TESTING, &adapter->state)) {
2503 WARN_ON(resuming);
Auke Kok9d5c8242008-01-24 02:22:38 -08002504 return -EBUSY;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002505 }
2506
2507 if (!resuming)
2508 pm_runtime_get_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002509
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002510 netif_carrier_off(netdev);
2511
Auke Kok9d5c8242008-01-24 02:22:38 -08002512 /* allocate transmit descriptors */
2513 err = igb_setup_all_tx_resources(adapter);
2514 if (err)
2515 goto err_setup_tx;
2516
2517 /* allocate receive descriptors */
2518 err = igb_setup_all_rx_resources(adapter);
2519 if (err)
2520 goto err_setup_rx;
2521
Nick Nunley88a268c2010-02-17 01:01:59 +00002522 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002523
Auke Kok9d5c8242008-01-24 02:22:38 -08002524 /* before we allocate an interrupt, we must be ready to handle it.
2525 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2526 * as soon as we call pci_request_irq, so we have to setup our
2527 * clean_rx handler before we do so. */
2528 igb_configure(adapter);
2529
2530 err = igb_request_irq(adapter);
2531 if (err)
2532 goto err_req_irq;
2533
2534 /* From here on the code is the same as igb_up() */
2535 clear_bit(__IGB_DOWN, &adapter->state);
2536
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002537 for (i = 0; i < adapter->num_q_vectors; i++)
2538 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002539
2540 /* Clear any pending interrupts. */
2541 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002542
2543 igb_irq_enable(adapter);
2544
Alexander Duyckd4960302009-10-27 15:53:45 +00002545 /* notify VFs that reset has been completed */
2546 if (adapter->vfs_allocated_count) {
2547 u32 reg_data = rd32(E1000_CTRL_EXT);
2548 reg_data |= E1000_CTRL_EXT_PFRSTD;
2549 wr32(E1000_CTRL_EXT, reg_data);
2550 }
2551
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002552 netif_tx_start_all_queues(netdev);
2553
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002554 if (!resuming)
2555 pm_runtime_put(&pdev->dev);
2556
Alexander Duyck25568a52009-10-27 23:49:59 +00002557 /* start the watchdog. */
2558 hw->mac.get_link_status = 1;
2559 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002560
2561 return 0;
2562
2563err_req_irq:
2564 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002565 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002566 igb_free_all_rx_resources(adapter);
2567err_setup_rx:
2568 igb_free_all_tx_resources(adapter);
2569err_setup_tx:
2570 igb_reset(adapter);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002571 if (!resuming)
2572 pm_runtime_put(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002573
2574 return err;
2575}
2576
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002577static int igb_open(struct net_device *netdev)
2578{
2579 return __igb_open(netdev, false);
2580}
2581
Auke Kok9d5c8242008-01-24 02:22:38 -08002582/**
2583 * igb_close - Disables a network interface
2584 * @netdev: network interface device structure
2585 *
2586 * Returns 0, this is not allowed to fail
2587 *
2588 * The close entry point is called when an interface is de-activated
2589 * by the OS. The hardware is still under the driver's control, but
2590 * needs to be disabled. A global MAC reset is issued to stop the
2591 * hardware, and all transmit and receive resources are freed.
2592 **/
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002593static int __igb_close(struct net_device *netdev, bool suspending)
Auke Kok9d5c8242008-01-24 02:22:38 -08002594{
2595 struct igb_adapter *adapter = netdev_priv(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002596 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002597
2598 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
Auke Kok9d5c8242008-01-24 02:22:38 -08002599
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002600 if (!suspending)
2601 pm_runtime_get_sync(&pdev->dev);
2602
2603 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002604 igb_free_irq(adapter);
2605
2606 igb_free_all_tx_resources(adapter);
2607 igb_free_all_rx_resources(adapter);
2608
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002609 if (!suspending)
2610 pm_runtime_put_sync(&pdev->dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002611 return 0;
2612}
2613
Yan, Zheng749ab2c2012-01-04 20:23:37 +00002614static int igb_close(struct net_device *netdev)
2615{
2616 return __igb_close(netdev, false);
2617}
2618
Auke Kok9d5c8242008-01-24 02:22:38 -08002619/**
2620 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002621 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2622 *
2623 * Return 0 on success, negative on failure
2624 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002625int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002626{
Alexander Duyck59d71982010-04-27 13:09:25 +00002627 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002628 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002629 int size;
2630
Alexander Duyck06034642011-08-26 07:44:22 +00002631 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002632 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2633 if (!tx_ring->tx_buffer_info)
2634 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002635 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002636 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002637
2638 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002639 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002640 tx_ring->size = ALIGN(tx_ring->size, 4096);
2641
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002642 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002643 tx_ring->desc = dma_alloc_coherent(dev,
2644 tx_ring->size,
2645 &tx_ring->dma,
2646 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002647 set_dev_node(dev, orig_node);
2648 if (!tx_ring->desc)
2649 tx_ring->desc = dma_alloc_coherent(dev,
2650 tx_ring->size,
2651 &tx_ring->dma,
2652 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002653
2654 if (!tx_ring->desc)
2655 goto err;
2656
Auke Kok9d5c8242008-01-24 02:22:38 -08002657 tx_ring->next_to_use = 0;
2658 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002659
Auke Kok9d5c8242008-01-24 02:22:38 -08002660 return 0;
2661
2662err:
Alexander Duyck06034642011-08-26 07:44:22 +00002663 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002664 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 "Unable to allocate memory for the transmit descriptor ring\n");
2666 return -ENOMEM;
2667}
2668
2669/**
2670 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2671 * (Descriptors) for all queues
2672 * @adapter: board private structure
2673 *
2674 * Return 0 on success, negative on failure
2675 **/
2676static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2677{
Alexander Duyck439705e2009-10-27 23:49:20 +00002678 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002679 int i, err = 0;
2680
2681 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002682 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002683 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002684 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002685 "Allocation for Tx Queue %u failed\n", i);
2686 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002687 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002688 break;
2689 }
2690 }
2691
2692 return err;
2693}
2694
2695/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002696 * igb_setup_tctl - configure the transmit control registers
2697 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002698 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002699void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002700{
Auke Kok9d5c8242008-01-24 02:22:38 -08002701 struct e1000_hw *hw = &adapter->hw;
2702 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002703
Alexander Duyck85b430b2009-10-27 15:50:29 +00002704 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2705 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002706
2707 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 tctl = rd32(E1000_TCTL);
2709 tctl &= ~E1000_TCTL_CT;
2710 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2711 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2712
2713 igb_config_collision_dist(hw);
2714
Auke Kok9d5c8242008-01-24 02:22:38 -08002715 /* Enable transmits */
2716 tctl |= E1000_TCTL_EN;
2717
2718 wr32(E1000_TCTL, tctl);
2719}
2720
2721/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002722 * igb_configure_tx_ring - Configure transmit ring after Reset
2723 * @adapter: board private structure
2724 * @ring: tx ring to configure
2725 *
2726 * Configure a transmit ring after a reset.
2727 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002728void igb_configure_tx_ring(struct igb_adapter *adapter,
2729 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002730{
2731 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002732 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002733 u64 tdba = ring->dma;
2734 int reg_idx = ring->reg_idx;
2735
2736 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002737 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002738 wrfl();
2739 mdelay(10);
2740
2741 wr32(E1000_TDLEN(reg_idx),
2742 ring->count * sizeof(union e1000_adv_tx_desc));
2743 wr32(E1000_TDBAL(reg_idx),
2744 tdba & 0x00000000ffffffffULL);
2745 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2746
Alexander Duyckfce99e32009-10-27 15:51:27 +00002747 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002748 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002749 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002750
2751 txdctl |= IGB_TX_PTHRESH;
2752 txdctl |= IGB_TX_HTHRESH << 8;
2753 txdctl |= IGB_TX_WTHRESH << 16;
2754
2755 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2756 wr32(E1000_TXDCTL(reg_idx), txdctl);
Alexander Duyck5c490352012-02-07 02:29:01 +00002757
2758 netdev_tx_reset_queue(txring_txq(ring));
Alexander Duyck85b430b2009-10-27 15:50:29 +00002759}
2760
2761/**
2762 * igb_configure_tx - Configure transmit Unit after Reset
2763 * @adapter: board private structure
2764 *
2765 * Configure the Tx unit of the MAC after a reset.
2766 **/
2767static void igb_configure_tx(struct igb_adapter *adapter)
2768{
2769 int i;
2770
2771 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002772 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002773}
2774
2775/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002777 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2778 *
2779 * Returns 0 on success, negative on failure
2780 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002781int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002782{
Alexander Duyck59d71982010-04-27 13:09:25 +00002783 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002784 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002785 int size, desc_len;
2786
Alexander Duyck06034642011-08-26 07:44:22 +00002787 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002788 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2789 if (!rx_ring->rx_buffer_info)
2790 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002791 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002792 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002793
2794 desc_len = sizeof(union e1000_adv_rx_desc);
2795
2796 /* Round up to nearest 4K */
2797 rx_ring->size = rx_ring->count * desc_len;
2798 rx_ring->size = ALIGN(rx_ring->size, 4096);
2799
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002800 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002801 rx_ring->desc = dma_alloc_coherent(dev,
2802 rx_ring->size,
2803 &rx_ring->dma,
2804 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002805 set_dev_node(dev, orig_node);
2806 if (!rx_ring->desc)
2807 rx_ring->desc = dma_alloc_coherent(dev,
2808 rx_ring->size,
2809 &rx_ring->dma,
2810 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002811
2812 if (!rx_ring->desc)
2813 goto err;
2814
2815 rx_ring->next_to_clean = 0;
2816 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002817
Auke Kok9d5c8242008-01-24 02:22:38 -08002818 return 0;
2819
2820err:
Alexander Duyck06034642011-08-26 07:44:22 +00002821 vfree(rx_ring->rx_buffer_info);
2822 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002823 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2824 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002825 return -ENOMEM;
2826}
2827
2828/**
2829 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2830 * (Descriptors) for all queues
2831 * @adapter: board private structure
2832 *
2833 * Return 0 on success, negative on failure
2834 **/
2835static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2836{
Alexander Duyck439705e2009-10-27 23:49:20 +00002837 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002838 int i, err = 0;
2839
2840 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002841 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002842 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002843 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002844 "Allocation for Rx Queue %u failed\n", i);
2845 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002846 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002847 break;
2848 }
2849 }
2850
2851 return err;
2852}
2853
2854/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002855 * igb_setup_mrqc - configure the multiple receive queue control registers
2856 * @adapter: Board private structure
2857 **/
2858static void igb_setup_mrqc(struct igb_adapter *adapter)
2859{
2860 struct e1000_hw *hw = &adapter->hw;
2861 u32 mrqc, rxcsum;
2862 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2863 union e1000_reta {
2864 u32 dword;
2865 u8 bytes[4];
2866 } reta;
2867 static const u8 rsshash[40] = {
2868 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2869 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2870 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2871 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2872
2873 /* Fill out hash function seeds */
2874 for (j = 0; j < 10; j++) {
2875 u32 rsskey = rsshash[(j * 4)];
2876 rsskey |= rsshash[(j * 4) + 1] << 8;
2877 rsskey |= rsshash[(j * 4) + 2] << 16;
2878 rsskey |= rsshash[(j * 4) + 3] << 24;
2879 array_wr32(E1000_RSSRK(0), j, rsskey);
2880 }
2881
Alexander Duycka99955f2009-11-12 18:37:19 +00002882 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002883
2884 if (adapter->vfs_allocated_count) {
2885 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2886 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002887 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002888 case e1000_82580:
2889 num_rx_queues = 1;
2890 shift = 0;
2891 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002892 case e1000_82576:
2893 shift = 3;
2894 num_rx_queues = 2;
2895 break;
2896 case e1000_82575:
2897 shift = 2;
2898 shift2 = 6;
2899 default:
2900 break;
2901 }
2902 } else {
2903 if (hw->mac.type == e1000_82575)
2904 shift = 6;
2905 }
2906
2907 for (j = 0; j < (32 * 4); j++) {
2908 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2909 if (shift2)
2910 reta.bytes[j & 3] |= num_rx_queues << shift2;
2911 if ((j & 3) == 3)
2912 wr32(E1000_RETA(j >> 2), reta.dword);
2913 }
2914
2915 /*
2916 * Disable raw packet checksumming so that RSS hash is placed in
2917 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2918 * offloads as they are enabled by default
2919 */
2920 rxcsum = rd32(E1000_RXCSUM);
2921 rxcsum |= E1000_RXCSUM_PCSD;
2922
2923 if (adapter->hw.mac.type >= e1000_82576)
2924 /* Enable Receive Checksum Offload for SCTP */
2925 rxcsum |= E1000_RXCSUM_CRCOFL;
2926
2927 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2928 wr32(E1000_RXCSUM, rxcsum);
2929
2930 /* If VMDq is enabled then we set the appropriate mode for that, else
2931 * we default to RSS so that an RSS hash is calculated per packet even
2932 * if we are only using one queue */
2933 if (adapter->vfs_allocated_count) {
2934 if (hw->mac.type > e1000_82575) {
2935 /* Set the default pool for the PF's first queue */
2936 u32 vtctl = rd32(E1000_VT_CTL);
2937 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2938 E1000_VT_CTL_DISABLE_DEF_POOL);
2939 vtctl |= adapter->vfs_allocated_count <<
2940 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2941 wr32(E1000_VT_CTL, vtctl);
2942 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002943 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002944 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2945 else
2946 mrqc = E1000_MRQC_ENABLE_VMDQ;
2947 } else {
2948 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2949 }
2950 igb_vmm_control(adapter);
2951
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002952 /*
2953 * Generate RSS hash based on TCP port numbers and/or
2954 * IPv4/v6 src and dst addresses since UDP cannot be
2955 * hashed reliably due to IP fragmentation
2956 */
2957 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2958 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2959 E1000_MRQC_RSS_FIELD_IPV6 |
2960 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2961 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002962
2963 wr32(E1000_MRQC, mrqc);
2964}
2965
2966/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002967 * igb_setup_rctl - configure the receive control registers
2968 * @adapter: Board private structure
2969 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002970void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002971{
2972 struct e1000_hw *hw = &adapter->hw;
2973 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002974
2975 rctl = rd32(E1000_RCTL);
2976
2977 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002978 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002979
Alexander Duyck69d728b2008-11-25 01:04:03 -08002980 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002981 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002982
Auke Kok87cb7e82008-07-08 15:08:29 -07002983 /*
2984 * enable stripping of CRC. It's unlikely this will break BMC
2985 * redirection as it did with e1000. Newer features require
2986 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002987 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002988 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002989
Alexander Duyck559e9c42009-10-27 23:52:50 +00002990 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002991 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002992
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002993 /* enable LPE to prevent packets larger than max_frame_size */
2994 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002995
Alexander Duyck952f72a2009-10-27 15:51:07 +00002996 /* disable queue 0 to prevent tail write w/o re-config */
2997 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002998
Alexander Duycke1739522009-02-19 20:39:44 -08002999 /* Attention!!! For SR-IOV PF driver operations you must enable
3000 * queue drop for all VF and PF queues to prevent head of line blocking
3001 * if an un-trusted VF does not provide descriptors to hardware.
3002 */
3003 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003004 /* set all queue drop enable bits */
3005 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003006 }
3007
Auke Kok9d5c8242008-01-24 02:22:38 -08003008 wr32(E1000_RCTL, rctl);
3009}
3010
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003011static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3012 int vfn)
3013{
3014 struct e1000_hw *hw = &adapter->hw;
3015 u32 vmolr;
3016
3017 /* if it isn't the PF check to see if VFs are enabled and
3018 * increase the size to support vlan tags */
3019 if (vfn < adapter->vfs_allocated_count &&
3020 adapter->vf_data[vfn].vlans_enabled)
3021 size += VLAN_TAG_SIZE;
3022
3023 vmolr = rd32(E1000_VMOLR(vfn));
3024 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3025 vmolr |= size | E1000_VMOLR_LPE;
3026 wr32(E1000_VMOLR(vfn), vmolr);
3027
3028 return 0;
3029}
3030
Auke Kok9d5c8242008-01-24 02:22:38 -08003031/**
Alexander Duycke1739522009-02-19 20:39:44 -08003032 * igb_rlpml_set - set maximum receive packet size
3033 * @adapter: board private structure
3034 *
3035 * Configure maximum receivable packet size.
3036 **/
3037static void igb_rlpml_set(struct igb_adapter *adapter)
3038{
Alexander Duyck153285f2011-08-26 07:43:32 +00003039 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003040 struct e1000_hw *hw = &adapter->hw;
3041 u16 pf_id = adapter->vfs_allocated_count;
3042
Alexander Duycke1739522009-02-19 20:39:44 -08003043 if (pf_id) {
3044 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003045 /*
3046 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3047 * to our max jumbo frame size, in case we need to enable
3048 * jumbo frames on one of the rings later.
3049 * This will not pass over-length frames into the default
3050 * queue because it's gated by the VMOLR.RLPML.
3051 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003052 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003053 }
3054
3055 wr32(E1000_RLPML, max_frame_size);
3056}
3057
Williams, Mitch A8151d292010-02-10 01:44:24 +00003058static inline void igb_set_vmolr(struct igb_adapter *adapter,
3059 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003060{
3061 struct e1000_hw *hw = &adapter->hw;
3062 u32 vmolr;
3063
3064 /*
3065 * This register exists only on 82576 and newer so if we are older then
3066 * we should exit and do nothing
3067 */
3068 if (hw->mac.type < e1000_82576)
3069 return;
3070
3071 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003072 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3073 if (aupe)
3074 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3075 else
3076 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003077
3078 /* clear all bits that might not be set */
3079 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3080
Alexander Duycka99955f2009-11-12 18:37:19 +00003081 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003082 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3083 /*
3084 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3085 * multicast packets
3086 */
3087 if (vfn <= adapter->vfs_allocated_count)
3088 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3089
3090 wr32(E1000_VMOLR(vfn), vmolr);
3091}
3092
Alexander Duycke1739522009-02-19 20:39:44 -08003093/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003094 * igb_configure_rx_ring - Configure a receive ring after Reset
3095 * @adapter: board private structure
3096 * @ring: receive ring to be configured
3097 *
3098 * Configure the Rx unit of the MAC after a reset.
3099 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003100void igb_configure_rx_ring(struct igb_adapter *adapter,
3101 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003102{
3103 struct e1000_hw *hw = &adapter->hw;
3104 u64 rdba = ring->dma;
3105 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003106 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003107
3108 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003109 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003110
3111 /* Set DMA base address registers */
3112 wr32(E1000_RDBAL(reg_idx),
3113 rdba & 0x00000000ffffffffULL);
3114 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3115 wr32(E1000_RDLEN(reg_idx),
3116 ring->count * sizeof(union e1000_adv_rx_desc));
3117
3118 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003119 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003120 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003121 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003122
Alexander Duyck952f72a2009-10-27 15:51:07 +00003123 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003124 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003125#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003126 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003127#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003128 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003129#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003130 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003131 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003132 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003133 /* Only set Drop Enable if we are supporting multiple queues */
3134 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3135 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003136
3137 wr32(E1000_SRRCTL(reg_idx), srrctl);
3138
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003139 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003140 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003141
Alexander Duyck85b430b2009-10-27 15:50:29 +00003142 rxdctl |= IGB_RX_PTHRESH;
3143 rxdctl |= IGB_RX_HTHRESH << 8;
3144 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003145
3146 /* enable receive descriptor fetching */
3147 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003148 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3149}
3150
3151/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003152 * igb_configure_rx - Configure receive Unit after Reset
3153 * @adapter: board private structure
3154 *
3155 * Configure the Rx unit of the MAC after a reset.
3156 **/
3157static void igb_configure_rx(struct igb_adapter *adapter)
3158{
Hannes Eder91075842009-02-18 19:36:04 -08003159 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003160
Alexander Duyck68d480c2009-10-05 06:33:08 +00003161 /* set UTA to appropriate mode */
3162 igb_set_uta(adapter);
3163
Alexander Duyck26ad9172009-10-05 06:32:49 +00003164 /* set the correct pool for the PF default MAC address in entry 0 */
3165 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3166 adapter->vfs_allocated_count);
3167
Alexander Duyck06cf2662009-10-27 15:53:25 +00003168 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3169 * the Base and Length of the Rx Descriptor Ring */
3170 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003171 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003172}
3173
3174/**
3175 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003176 * @tx_ring: Tx descriptor ring for a specific queue
3177 *
3178 * Free all transmit software resources
3179 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003180void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003181{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003182 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003183
Alexander Duyck06034642011-08-26 07:44:22 +00003184 vfree(tx_ring->tx_buffer_info);
3185 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003186
Alexander Duyck439705e2009-10-27 23:49:20 +00003187 /* if not set, then don't free */
3188 if (!tx_ring->desc)
3189 return;
3190
Alexander Duyck59d71982010-04-27 13:09:25 +00003191 dma_free_coherent(tx_ring->dev, tx_ring->size,
3192 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003193
3194 tx_ring->desc = NULL;
3195}
3196
3197/**
3198 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3199 * @adapter: board private structure
3200 *
3201 * Free all transmit software resources
3202 **/
3203static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3204{
3205 int i;
3206
3207 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003208 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003209}
3210
Alexander Duyckebe42d12011-08-26 07:45:09 +00003211void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3212 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003213{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003214 if (tx_buffer->skb) {
3215 dev_kfree_skb_any(tx_buffer->skb);
3216 if (tx_buffer->dma)
3217 dma_unmap_single(ring->dev,
3218 tx_buffer->dma,
3219 tx_buffer->length,
3220 DMA_TO_DEVICE);
3221 } else if (tx_buffer->dma) {
3222 dma_unmap_page(ring->dev,
3223 tx_buffer->dma,
3224 tx_buffer->length,
3225 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003226 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003227 tx_buffer->next_to_watch = NULL;
3228 tx_buffer->skb = NULL;
3229 tx_buffer->dma = 0;
3230 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003231}
3232
3233/**
3234 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003235 * @tx_ring: ring to be cleaned
3236 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003237static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003238{
Alexander Duyck06034642011-08-26 07:44:22 +00003239 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003240 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003241 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003242
Alexander Duyck06034642011-08-26 07:44:22 +00003243 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003244 return;
3245 /* Free all the Tx ring sk_buffs */
3246
3247 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003248 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003249 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003250 }
3251
Alexander Duyck06034642011-08-26 07:44:22 +00003252 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3253 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003254
3255 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003256 memset(tx_ring->desc, 0, tx_ring->size);
3257
3258 tx_ring->next_to_use = 0;
3259 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003260}
3261
3262/**
3263 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3264 * @adapter: board private structure
3265 **/
3266static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3267{
3268 int i;
3269
3270 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003271 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003272}
3273
3274/**
3275 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003276 * @rx_ring: ring to clean the resources from
3277 *
3278 * Free all receive software resources
3279 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003280void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003281{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003282 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003283
Alexander Duyck06034642011-08-26 07:44:22 +00003284 vfree(rx_ring->rx_buffer_info);
3285 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003286
Alexander Duyck439705e2009-10-27 23:49:20 +00003287 /* if not set, then don't free */
3288 if (!rx_ring->desc)
3289 return;
3290
Alexander Duyck59d71982010-04-27 13:09:25 +00003291 dma_free_coherent(rx_ring->dev, rx_ring->size,
3292 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003293
3294 rx_ring->desc = NULL;
3295}
3296
3297/**
3298 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3299 * @adapter: board private structure
3300 *
3301 * Free all receive software resources
3302 **/
3303static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3304{
3305 int i;
3306
3307 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003308 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003309}
3310
3311/**
3312 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003313 * @rx_ring: ring to free buffers from
3314 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003315static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003316{
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003318 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003319
Alexander Duyck06034642011-08-26 07:44:22 +00003320 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003321 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003322
Auke Kok9d5c8242008-01-24 02:22:38 -08003323 /* Free all the Rx ring sk_buffs */
3324 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003325 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003326 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003327 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003328 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003329 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003330 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003331 buffer_info->dma = 0;
3332 }
3333
3334 if (buffer_info->skb) {
3335 dev_kfree_skb(buffer_info->skb);
3336 buffer_info->skb = NULL;
3337 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003338 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003339 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003340 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003341 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003342 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003343 buffer_info->page_dma = 0;
3344 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003345 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003346 put_page(buffer_info->page);
3347 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003348 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003349 }
3350 }
3351
Alexander Duyck06034642011-08-26 07:44:22 +00003352 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3353 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003354
3355 /* Zero out the descriptor ring */
3356 memset(rx_ring->desc, 0, rx_ring->size);
3357
3358 rx_ring->next_to_clean = 0;
3359 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003360}
3361
3362/**
3363 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3364 * @adapter: board private structure
3365 **/
3366static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3367{
3368 int i;
3369
3370 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003371 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003372}
3373
3374/**
3375 * igb_set_mac - Change the Ethernet Address of the NIC
3376 * @netdev: network interface device structure
3377 * @p: pointer to an address structure
3378 *
3379 * Returns 0 on success, negative on failure
3380 **/
3381static int igb_set_mac(struct net_device *netdev, void *p)
3382{
3383 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003384 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003385 struct sockaddr *addr = p;
3386
3387 if (!is_valid_ether_addr(addr->sa_data))
3388 return -EADDRNOTAVAIL;
3389
3390 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003391 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003392
Alexander Duyck26ad9172009-10-05 06:32:49 +00003393 /* set the correct pool for the new PF MAC address in entry 0 */
3394 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3395 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003396
Auke Kok9d5c8242008-01-24 02:22:38 -08003397 return 0;
3398}
3399
3400/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003401 * igb_write_mc_addr_list - write multicast addresses to MTA
3402 * @netdev: network interface device structure
3403 *
3404 * Writes multicast address list to the MTA hash table.
3405 * Returns: -ENOMEM on failure
3406 * 0 on no addresses written
3407 * X on writing X addresses to MTA
3408 **/
3409static int igb_write_mc_addr_list(struct net_device *netdev)
3410{
3411 struct igb_adapter *adapter = netdev_priv(netdev);
3412 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003413 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003414 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003415 int i;
3416
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003417 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003418 /* nothing to program, so clear mc list */
3419 igb_update_mc_addr_list(hw, NULL, 0);
3420 igb_restore_vf_multicasts(adapter);
3421 return 0;
3422 }
3423
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003424 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003425 if (!mta_list)
3426 return -ENOMEM;
3427
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003429 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003430 netdev_for_each_mc_addr(ha, netdev)
3431 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003432
Alexander Duyck68d480c2009-10-05 06:33:08 +00003433 igb_update_mc_addr_list(hw, mta_list, i);
3434 kfree(mta_list);
3435
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003436 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003437}
3438
3439/**
3440 * igb_write_uc_addr_list - write unicast addresses to RAR table
3441 * @netdev: network interface device structure
3442 *
3443 * Writes unicast address list to the RAR table.
3444 * Returns: -ENOMEM on failure/insufficient address space
3445 * 0 on no addresses written
3446 * X on writing X addresses to the RAR table
3447 **/
3448static int igb_write_uc_addr_list(struct net_device *netdev)
3449{
3450 struct igb_adapter *adapter = netdev_priv(netdev);
3451 struct e1000_hw *hw = &adapter->hw;
3452 unsigned int vfn = adapter->vfs_allocated_count;
3453 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3454 int count = 0;
3455
3456 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003457 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003458 return -ENOMEM;
3459
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003460 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003461 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003462
3463 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003464 if (!rar_entries)
3465 break;
3466 igb_rar_set_qsel(adapter, ha->addr,
3467 rar_entries--,
3468 vfn);
3469 count++;
3470 }
3471 }
3472 /* write the addresses in reverse order to avoid write combining */
3473 for (; rar_entries > 0 ; rar_entries--) {
3474 wr32(E1000_RAH(rar_entries), 0);
3475 wr32(E1000_RAL(rar_entries), 0);
3476 }
3477 wrfl();
3478
3479 return count;
3480}
3481
3482/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003483 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003484 * @netdev: network interface device structure
3485 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003486 * The set_rx_mode entry point is called whenever the unicast or multicast
3487 * address lists or the network interface flags are updated. This routine is
3488 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003489 * promiscuous mode, and all-multi behavior.
3490 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003491static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003492{
3493 struct igb_adapter *adapter = netdev_priv(netdev);
3494 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003495 unsigned int vfn = adapter->vfs_allocated_count;
3496 u32 rctl, vmolr = 0;
3497 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003498
3499 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003500 rctl = rd32(E1000_RCTL);
3501
Alexander Duyck68d480c2009-10-05 06:33:08 +00003502 /* clear the effected bits */
3503 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3504
Patrick McHardy746b9f02008-07-16 20:15:45 -07003505 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003506 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003507 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003508 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003509 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003510 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003511 vmolr |= E1000_VMOLR_MPME;
3512 } else {
3513 /*
3514 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003515 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003516 * that we can at least receive multicast traffic
3517 */
3518 count = igb_write_mc_addr_list(netdev);
3519 if (count < 0) {
3520 rctl |= E1000_RCTL_MPE;
3521 vmolr |= E1000_VMOLR_MPME;
3522 } else if (count) {
3523 vmolr |= E1000_VMOLR_ROMPE;
3524 }
3525 }
3526 /*
3527 * Write addresses to available RAR registers, if there is not
3528 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003529 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003530 */
3531 count = igb_write_uc_addr_list(netdev);
3532 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003533 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003534 vmolr |= E1000_VMOLR_ROPE;
3535 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003536 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003537 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003538 wr32(E1000_RCTL, rctl);
3539
Alexander Duyck68d480c2009-10-05 06:33:08 +00003540 /*
3541 * In order to support SR-IOV and eventually VMDq it is necessary to set
3542 * the VMOLR to enable the appropriate modes. Without this workaround
3543 * we will have issues with VLAN tag stripping not being done for frames
3544 * that are only arriving because we are the default pool
3545 */
3546 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003547 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003548
Alexander Duyck68d480c2009-10-05 06:33:08 +00003549 vmolr |= rd32(E1000_VMOLR(vfn)) &
3550 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3551 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003552 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003553}
3554
Greg Rose13800462010-11-06 02:08:26 +00003555static void igb_check_wvbr(struct igb_adapter *adapter)
3556{
3557 struct e1000_hw *hw = &adapter->hw;
3558 u32 wvbr = 0;
3559
3560 switch (hw->mac.type) {
3561 case e1000_82576:
3562 case e1000_i350:
3563 if (!(wvbr = rd32(E1000_WVBR)))
3564 return;
3565 break;
3566 default:
3567 break;
3568 }
3569
3570 adapter->wvbr |= wvbr;
3571}
3572
3573#define IGB_STAGGERED_QUEUE_OFFSET 8
3574
3575static void igb_spoof_check(struct igb_adapter *adapter)
3576{
3577 int j;
3578
3579 if (!adapter->wvbr)
3580 return;
3581
3582 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3583 if (adapter->wvbr & (1 << j) ||
3584 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3585 dev_warn(&adapter->pdev->dev,
3586 "Spoof event(s) detected on VF %d\n", j);
3587 adapter->wvbr &=
3588 ~((1 << j) |
3589 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3590 }
3591 }
3592}
3593
Auke Kok9d5c8242008-01-24 02:22:38 -08003594/* Need to wait a few seconds after link up to get diagnostic information from
3595 * the phy */
3596static void igb_update_phy_info(unsigned long data)
3597{
3598 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003599 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003600}
3601
3602/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003603 * igb_has_link - check shared code for link and determine up/down
3604 * @adapter: pointer to driver private info
3605 **/
Nick Nunley31455352010-02-17 01:01:21 +00003606bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003607{
3608 struct e1000_hw *hw = &adapter->hw;
3609 bool link_active = false;
3610 s32 ret_val = 0;
3611
3612 /* get_link_status is set on LSC (link status) interrupt or
3613 * rx sequence error interrupt. get_link_status will stay
3614 * false until the e1000_check_for_link establishes link
3615 * for copper adapters ONLY
3616 */
3617 switch (hw->phy.media_type) {
3618 case e1000_media_type_copper:
3619 if (hw->mac.get_link_status) {
3620 ret_val = hw->mac.ops.check_for_link(hw);
3621 link_active = !hw->mac.get_link_status;
3622 } else {
3623 link_active = true;
3624 }
3625 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003626 case e1000_media_type_internal_serdes:
3627 ret_val = hw->mac.ops.check_for_link(hw);
3628 link_active = hw->mac.serdes_has_link;
3629 break;
3630 default:
3631 case e1000_media_type_unknown:
3632 break;
3633 }
3634
3635 return link_active;
3636}
3637
Stefan Assmann563988d2011-04-05 04:27:15 +00003638static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3639{
3640 bool ret = false;
3641 u32 ctrl_ext, thstat;
3642
3643 /* check for thermal sensor event on i350, copper only */
3644 if (hw->mac.type == e1000_i350) {
3645 thstat = rd32(E1000_THSTAT);
3646 ctrl_ext = rd32(E1000_CTRL_EXT);
3647
3648 if ((hw->phy.media_type == e1000_media_type_copper) &&
3649 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3650 ret = !!(thstat & event);
3651 }
3652 }
3653
3654 return ret;
3655}
3656
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003657/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003658 * igb_watchdog - Timer Call-back
3659 * @data: pointer to adapter cast into an unsigned long
3660 **/
3661static void igb_watchdog(unsigned long data)
3662{
3663 struct igb_adapter *adapter = (struct igb_adapter *)data;
3664 /* Do the rest outside of interrupt context */
3665 schedule_work(&adapter->watchdog_task);
3666}
3667
3668static void igb_watchdog_task(struct work_struct *work)
3669{
3670 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003671 struct igb_adapter,
3672 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003673 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003674 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003675 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003676 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003677
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003678 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003679 if (link) {
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003680 /* Cancel scheduled suspend requests. */
3681 pm_runtime_resume(netdev->dev.parent);
3682
Auke Kok9d5c8242008-01-24 02:22:38 -08003683 if (!netif_carrier_ok(netdev)) {
3684 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003685 hw->mac.ops.get_speed_and_duplex(hw,
3686 &adapter->link_speed,
3687 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003688
3689 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003690 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003691 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3692 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003693 netdev->name,
3694 adapter->link_speed,
3695 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003696 "Full" : "Half",
3697 (ctrl & E1000_CTRL_TFCE) &&
3698 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3699 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3700 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003701
Stefan Assmann563988d2011-04-05 04:27:15 +00003702 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003703 if (igb_thermal_sensor_event(hw,
3704 E1000_THSTAT_LINK_THROTTLE)) {
3705 netdev_info(netdev, "The network adapter link "
3706 "speed was downshifted because it "
3707 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003708 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003709
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003710 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003711 adapter->tx_timeout_factor = 1;
3712 switch (adapter->link_speed) {
3713 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003714 adapter->tx_timeout_factor = 14;
3715 break;
3716 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003717 /* maybe add some timeout factor ? */
3718 break;
3719 }
3720
3721 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003722
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003723 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003724 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003725
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003726 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003727 if (!test_bit(__IGB_DOWN, &adapter->state))
3728 mod_timer(&adapter->phy_info_timer,
3729 round_jiffies(jiffies + 2 * HZ));
3730 }
3731 } else {
3732 if (netif_carrier_ok(netdev)) {
3733 adapter->link_speed = 0;
3734 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003735
3736 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003737 if (igb_thermal_sensor_event(hw,
3738 E1000_THSTAT_PWR_DOWN)) {
3739 netdev_err(netdev, "The network adapter was "
3740 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003741 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003742
Alexander Duyck527d47c2008-11-27 00:21:39 -08003743 /* Links status message must follow this format */
3744 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3745 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003746 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003747
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003748 igb_ping_all_vfs(adapter);
3749
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003750 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003751 if (!test_bit(__IGB_DOWN, &adapter->state))
3752 mod_timer(&adapter->phy_info_timer,
3753 round_jiffies(jiffies + 2 * HZ));
Yan, Zheng749ab2c2012-01-04 20:23:37 +00003754
3755 pm_schedule_suspend(netdev->dev.parent,
3756 MSEC_PER_SEC * 5);
Auke Kok9d5c8242008-01-24 02:22:38 -08003757 }
3758 }
3759
Eric Dumazet12dcd862010-10-15 17:27:10 +00003760 spin_lock(&adapter->stats64_lock);
3761 igb_update_stats(adapter, &adapter->stats64);
3762 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003763
Alexander Duyckdbabb062009-11-12 18:38:16 +00003764 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003765 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003766 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003767 /* We've lost link, so the controller stops DMA,
3768 * but we've got queued Tx work that's never going
3769 * to get done, so reset controller to flush Tx.
3770 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003771 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3772 adapter->tx_timeout_count++;
3773 schedule_work(&adapter->reset_task);
3774 /* return immediately since reset is imminent */
3775 return;
3776 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003777 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003778
Alexander Duyckdbabb062009-11-12 18:38:16 +00003779 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003780 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003781 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003782
Auke Kok9d5c8242008-01-24 02:22:38 -08003783 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003784 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003785 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003786 for (i = 0; i < adapter->num_q_vectors; i++)
3787 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003788 wr32(E1000_EICS, eics);
3789 } else {
3790 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3791 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003792
Greg Rose13800462010-11-06 02:08:26 +00003793 igb_spoof_check(adapter);
3794
Auke Kok9d5c8242008-01-24 02:22:38 -08003795 /* Reset the timer */
3796 if (!test_bit(__IGB_DOWN, &adapter->state))
3797 mod_timer(&adapter->watchdog_timer,
3798 round_jiffies(jiffies + 2 * HZ));
3799}
3800
3801enum latency_range {
3802 lowest_latency = 0,
3803 low_latency = 1,
3804 bulk_latency = 2,
3805 latency_invalid = 255
3806};
3807
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003808/**
3809 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3810 *
3811 * Stores a new ITR value based on strictly on packet size. This
3812 * algorithm is less sophisticated than that used in igb_update_itr,
3813 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003814 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003815 * were determined based on theoretical maximum wire speed and testing
3816 * data, in order to minimize response time while increasing bulk
3817 * throughput.
3818 * This functionality is controlled by the InterruptThrottleRate module
3819 * parameter (see igb_param.c)
3820 * NOTE: This function is called only when operating in a multiqueue
3821 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003822 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003823 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003824static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003825{
Alexander Duyck047e0032009-10-27 15:49:27 +00003826 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003827 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003828 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003829 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003830
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003831 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3832 * ints/sec - ITR timer value of 120 ticks.
3833 */
3834 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003835 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003836 goto set_itr_val;
3837 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003838
Alexander Duyck0ba82992011-08-26 07:45:47 +00003839 packets = q_vector->rx.total_packets;
3840 if (packets)
3841 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003842
Alexander Duyck0ba82992011-08-26 07:45:47 +00003843 packets = q_vector->tx.total_packets;
3844 if (packets)
3845 avg_wire_size = max_t(u32, avg_wire_size,
3846 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003847
3848 /* if avg_wire_size isn't set no work was done */
3849 if (!avg_wire_size)
3850 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003851
3852 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3853 avg_wire_size += 24;
3854
3855 /* Don't starve jumbo frames */
3856 avg_wire_size = min(avg_wire_size, 3000);
3857
3858 /* Give a little boost to mid-size frames */
3859 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3860 new_val = avg_wire_size / 3;
3861 else
3862 new_val = avg_wire_size / 2;
3863
Alexander Duyck0ba82992011-08-26 07:45:47 +00003864 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3865 if (new_val < IGB_20K_ITR &&
3866 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3867 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3868 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003869
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003870set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003871 if (new_val != q_vector->itr_val) {
3872 q_vector->itr_val = new_val;
3873 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003874 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003875clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003876 q_vector->rx.total_bytes = 0;
3877 q_vector->rx.total_packets = 0;
3878 q_vector->tx.total_bytes = 0;
3879 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003880}
3881
3882/**
3883 * igb_update_itr - update the dynamic ITR value based on statistics
3884 * Stores a new ITR value based on packets and byte
3885 * counts during the last interrupt. The advantage of per interrupt
3886 * computation is faster updates and more accurate ITR for the current
3887 * traffic pattern. Constants in this function were computed
3888 * based on theoretical maximum wire speed and thresholds were set based
3889 * on testing data as well as attempting to minimize response time
3890 * while increasing bulk throughput.
3891 * this functionality is controlled by the InterruptThrottleRate module
3892 * parameter (see igb_param.c)
3893 * NOTE: These calculations are only valid when operating in a single-
3894 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003895 * @q_vector: pointer to q_vector
3896 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003898static void igb_update_itr(struct igb_q_vector *q_vector,
3899 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003900{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003901 unsigned int packets = ring_container->total_packets;
3902 unsigned int bytes = ring_container->total_bytes;
3903 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003904
Alexander Duyck0ba82992011-08-26 07:45:47 +00003905 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003906 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003908
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 case lowest_latency:
3911 /* handle TSO and jumbo frames */
3912 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003913 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003914 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003915 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 break;
3917 case low_latency: /* 50 usec aka 20000 ints/s */
3918 if (bytes > 10000) {
3919 /* this if handles the TSO accounting */
3920 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003921 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003923 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003924 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003925 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003926 }
3927 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003928 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003929 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003930 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003931 }
3932 break;
3933 case bulk_latency: /* 250 usec aka 4000 ints/s */
3934 if (bytes > 25000) {
3935 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003936 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003937 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003938 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003939 }
3940 break;
3941 }
3942
Alexander Duyck0ba82992011-08-26 07:45:47 +00003943 /* clear work counters since we have the values we need */
3944 ring_container->total_bytes = 0;
3945 ring_container->total_packets = 0;
3946
3947 /* write updated itr to ring container */
3948 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003949}
3950
Alexander Duyck0ba82992011-08-26 07:45:47 +00003951static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003952{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003953 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003954 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003955 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003956
3957 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3958 if (adapter->link_speed != SPEED_1000) {
3959 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003960 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003961 goto set_itr_now;
3962 }
3963
Alexander Duyck0ba82992011-08-26 07:45:47 +00003964 igb_update_itr(q_vector, &q_vector->tx);
3965 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003966
Alexander Duyck0ba82992011-08-26 07:45:47 +00003967 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003968
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003969 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003970 if (current_itr == lowest_latency &&
3971 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3972 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003973 current_itr = low_latency;
3974
Auke Kok9d5c8242008-01-24 02:22:38 -08003975 switch (current_itr) {
3976 /* counts and packets in update_itr are dependent on these numbers */
3977 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003978 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003979 break;
3980 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003981 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003982 break;
3983 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003984 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003985 break;
3986 default:
3987 break;
3988 }
3989
3990set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003991 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003992 /* this attempts to bias the interrupt rate towards Bulk
3993 * by adding intermediate steps when interrupt rate is
3994 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003995 new_itr = new_itr > q_vector->itr_val ?
3996 max((new_itr * q_vector->itr_val) /
3997 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003998 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003999 new_itr;
4000 /* Don't write the value here; it resets the adapter's
4001 * internal timer, and causes us to delay far longer than
4002 * we should between interrupts. Instead, we write the ITR
4003 * value at the beginning of the next interrupt so the timing
4004 * ends up being correct.
4005 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004006 q_vector->itr_val = new_itr;
4007 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004008 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004009}
4010
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00004011static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4012 u32 type_tucmd, u32 mss_l4len_idx)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004013{
4014 struct e1000_adv_tx_context_desc *context_desc;
4015 u16 i = tx_ring->next_to_use;
4016
4017 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4018
4019 i++;
4020 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4021
4022 /* set bits to identify this as an advanced context descriptor */
4023 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4024
4025 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004026 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004027 mss_l4len_idx |= tx_ring->reg_idx << 4;
4028
4029 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4030 context_desc->seqnum_seed = 0;
4031 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4032 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4033}
4034
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004035static int igb_tso(struct igb_ring *tx_ring,
4036 struct igb_tx_buffer *first,
4037 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004038{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004039 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004040 u32 vlan_macip_lens, type_tucmd;
4041 u32 mss_l4len_idx, l4len;
4042
4043 if (!skb_is_gso(skb))
4044 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004045
4046 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004047 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004048 if (err)
4049 return err;
4050 }
4051
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004052 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4053 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004054
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004055 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004056 struct iphdr *iph = ip_hdr(skb);
4057 iph->tot_len = 0;
4058 iph->check = 0;
4059 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4060 iph->daddr, 0,
4061 IPPROTO_TCP,
4062 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004063 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004064 first->tx_flags |= IGB_TX_FLAGS_TSO |
4065 IGB_TX_FLAGS_CSUM |
4066 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004067 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004068 ipv6_hdr(skb)->payload_len = 0;
4069 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4070 &ipv6_hdr(skb)->daddr,
4071 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004072 first->tx_flags |= IGB_TX_FLAGS_TSO |
4073 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004074 }
4075
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004076 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004077 l4len = tcp_hdrlen(skb);
4078 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004079
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004080 /* update gso size and bytecount with header size */
4081 first->gso_segs = skb_shinfo(skb)->gso_segs;
4082 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4083
Auke Kok9d5c8242008-01-24 02:22:38 -08004084 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004085 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4086 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004087
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004088 /* VLAN MACLEN IPLEN */
4089 vlan_macip_lens = skb_network_header_len(skb);
4090 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004091 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004092
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004093 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004094
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004095 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004096}
4097
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004098static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004099{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004100 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004101 u32 vlan_macip_lens = 0;
4102 u32 mss_l4len_idx = 0;
4103 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004104
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004105 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004106 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4107 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004108 } else {
4109 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004110 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004111 case __constant_htons(ETH_P_IP):
4112 vlan_macip_lens |= skb_network_header_len(skb);
4113 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4114 l4_hdr = ip_hdr(skb)->protocol;
4115 break;
4116 case __constant_htons(ETH_P_IPV6):
4117 vlan_macip_lens |= skb_network_header_len(skb);
4118 l4_hdr = ipv6_hdr(skb)->nexthdr;
4119 break;
4120 default:
4121 if (unlikely(net_ratelimit())) {
4122 dev_warn(tx_ring->dev,
4123 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004124 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004125 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004126 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004127 }
4128
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004129 switch (l4_hdr) {
4130 case IPPROTO_TCP:
4131 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4132 mss_l4len_idx = tcp_hdrlen(skb) <<
4133 E1000_ADVTXD_L4LEN_SHIFT;
4134 break;
4135 case IPPROTO_SCTP:
4136 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4137 mss_l4len_idx = sizeof(struct sctphdr) <<
4138 E1000_ADVTXD_L4LEN_SHIFT;
4139 break;
4140 case IPPROTO_UDP:
4141 mss_l4len_idx = sizeof(struct udphdr) <<
4142 E1000_ADVTXD_L4LEN_SHIFT;
4143 break;
4144 default:
4145 if (unlikely(net_ratelimit())) {
4146 dev_warn(tx_ring->dev,
4147 "partial checksum but l4 proto=%x!\n",
4148 l4_hdr);
4149 }
4150 break;
4151 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004152
4153 /* update TX checksum flag */
4154 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004155 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004156
4157 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004158 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004159
4160 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004161}
4162
Alexander Duycke032afc2011-08-26 07:44:48 +00004163static __le32 igb_tx_cmd_type(u32 tx_flags)
4164{
4165 /* set type for advanced descriptor with frame checksum insertion */
4166 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4167 E1000_ADVTXD_DCMD_IFCS |
4168 E1000_ADVTXD_DCMD_DEXT);
4169
4170 /* set HW vlan bit if vlan is present */
4171 if (tx_flags & IGB_TX_FLAGS_VLAN)
4172 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4173
4174 /* set timestamp bit if present */
4175 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4176 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4177
4178 /* set segmentation bits for TSO */
4179 if (tx_flags & IGB_TX_FLAGS_TSO)
4180 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4181
4182 return cmd_type;
4183}
4184
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004185static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4186 union e1000_adv_tx_desc *tx_desc,
4187 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004188{
4189 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4190
4191 /* 82575 requires a unique index per ring if any offload is enabled */
4192 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004193 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004194 olinfo_status |= tx_ring->reg_idx << 4;
4195
4196 /* insert L4 checksum */
4197 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4198 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4199
4200 /* insert IPv4 checksum */
4201 if (tx_flags & IGB_TX_FLAGS_IPV4)
4202 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4203 }
4204
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004205 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004206}
4207
Alexander Duyckebe42d12011-08-26 07:45:09 +00004208/*
4209 * The largest size we can write to the descriptor is 65535. In order to
4210 * maintain a power of two alignment we have to limit ourselves to 32K.
4211 */
4212#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004213#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004214
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004215static void igb_tx_map(struct igb_ring *tx_ring,
4216 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004217 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004218{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004219 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004220 struct igb_tx_buffer *tx_buffer_info;
4221 union e1000_adv_tx_desc *tx_desc;
4222 dma_addr_t dma;
4223 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4224 unsigned int data_len = skb->data_len;
4225 unsigned int size = skb_headlen(skb);
4226 unsigned int paylen = skb->len - hdr_len;
4227 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004228 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004229 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004230
4231 tx_desc = IGB_TX_DESC(tx_ring, i);
4232
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004233 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004234 cmd_type = igb_tx_cmd_type(tx_flags);
4235
4236 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4237 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004238 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004239
Alexander Duyckebe42d12011-08-26 07:45:09 +00004240 /* record length, and DMA address */
4241 first->length = size;
4242 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004243 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004244
Alexander Duyckebe42d12011-08-26 07:45:09 +00004245 for (;;) {
4246 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4247 tx_desc->read.cmd_type_len =
4248 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004249
Alexander Duyckebe42d12011-08-26 07:45:09 +00004250 i++;
4251 tx_desc++;
4252 if (i == tx_ring->count) {
4253 tx_desc = IGB_TX_DESC(tx_ring, 0);
4254 i = 0;
4255 }
4256
4257 dma += IGB_MAX_DATA_PER_TXD;
4258 size -= IGB_MAX_DATA_PER_TXD;
4259
4260 tx_desc->read.olinfo_status = 0;
4261 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4262 }
4263
4264 if (likely(!data_len))
4265 break;
4266
4267 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4268
Alexander Duyck65689fe2009-03-20 00:17:43 +00004269 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004270 tx_desc++;
4271 if (i == tx_ring->count) {
4272 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004273 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004274 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004275
Eric Dumazet9e903e02011-10-18 21:00:24 +00004276 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004277 data_len -= size;
4278
4279 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4280 size, DMA_TO_DEVICE);
4281 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004282 goto dma_error;
4283
Alexander Duyckebe42d12011-08-26 07:45:09 +00004284 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4285 tx_buffer_info->length = size;
4286 tx_buffer_info->dma = dma;
4287
4288 tx_desc->read.olinfo_status = 0;
4289 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4290
4291 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004292 }
4293
Eric Dumazetbdbc0632012-01-04 20:23:36 +00004294 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4295
Alexander Duyckebe42d12011-08-26 07:45:09 +00004296 /* write last descriptor with RS and EOP bits */
4297 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
Ben Greear6b8f0922012-03-06 09:41:53 +00004298 if (unlikely(skb->no_fcs))
4299 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
Alexander Duyckebe42d12011-08-26 07:45:09 +00004300 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004301
4302 /* set the timestamp */
4303 first->time_stamp = jiffies;
4304
Alexander Duyckebe42d12011-08-26 07:45:09 +00004305 /*
4306 * Force memory writes to complete before letting h/w know there
4307 * are new descriptors to fetch. (Only applicable for weak-ordered
4308 * memory model archs, such as IA-64).
4309 *
4310 * We also need this memory barrier to make certain all of the
4311 * status bits have been updated before next_to_watch is written.
4312 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004313 wmb();
4314
Alexander Duyckebe42d12011-08-26 07:45:09 +00004315 /* set next_to_watch value indicating a packet is present */
4316 first->next_to_watch = tx_desc;
4317
4318 i++;
4319 if (i == tx_ring->count)
4320 i = 0;
4321
Auke Kok9d5c8242008-01-24 02:22:38 -08004322 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004323
Alexander Duyckfce99e32009-10-27 15:51:27 +00004324 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004325
Auke Kok9d5c8242008-01-24 02:22:38 -08004326 /* we need this if more than one processor can write to our tail
4327 * at a time, it syncronizes IO on IA64/Altix systems */
4328 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004329
4330 return;
4331
4332dma_error:
4333 dev_err(tx_ring->dev, "TX DMA map failed\n");
4334
4335 /* clear dma mappings for failed tx_buffer_info map */
4336 for (;;) {
4337 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4338 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4339 if (tx_buffer_info == first)
4340 break;
4341 if (i == 0)
4342 i = tx_ring->count;
4343 i--;
4344 }
4345
4346 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004347}
4348
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004349static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004350{
Alexander Duycke694e962009-10-27 15:53:06 +00004351 struct net_device *netdev = tx_ring->netdev;
4352
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004353 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004354
Auke Kok9d5c8242008-01-24 02:22:38 -08004355 /* Herbert's original patch had:
4356 * smp_mb__after_netif_stop_queue();
4357 * but since that doesn't exist yet, just open code it. */
4358 smp_mb();
4359
4360 /* We need to check again in a case another CPU has just
4361 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004362 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004363 return -EBUSY;
4364
4365 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004366 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004367
4368 u64_stats_update_begin(&tx_ring->tx_syncp2);
4369 tx_ring->tx_stats.restart_queue2++;
4370 u64_stats_update_end(&tx_ring->tx_syncp2);
4371
Auke Kok9d5c8242008-01-24 02:22:38 -08004372 return 0;
4373}
4374
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004375static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004376{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004377 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004378 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004379 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004380}
4381
Alexander Duyckcd392f52011-08-26 07:43:59 +00004382netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4383 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004384{
Alexander Duyck8542db02011-08-26 07:44:43 +00004385 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004386 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004387 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004388 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004389 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004390
Auke Kok9d5c8242008-01-24 02:22:38 -08004391 /* need: 1 descriptor per page,
4392 * + 2 desc gap to keep tail from touching head,
4393 * + 1 desc for skb->data,
4394 * + 1 desc for context descriptor,
4395 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004396 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004397 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004398 return NETDEV_TX_BUSY;
4399 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004400
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004401 /* record the location of the first descriptor for this packet */
4402 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4403 first->skb = skb;
4404 first->bytecount = skb->len;
4405 first->gso_segs = 1;
4406
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004407 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4408 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004409 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004410 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004411
Jesse Grosseab6d182010-10-20 13:56:03 +00004412 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004413 tx_flags |= IGB_TX_FLAGS_VLAN;
4414 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4415 }
4416
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004417 /* record initial flags and protocol */
4418 first->tx_flags = tx_flags;
4419 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004420
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004421 tso = igb_tso(tx_ring, first, &hdr_len);
4422 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004423 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004424 else if (!tso)
4425 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004426
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004427 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004428
4429 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004430 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004431
Auke Kok9d5c8242008-01-24 02:22:38 -08004432 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004433
4434out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004435 igb_unmap_and_free_tx_resource(tx_ring, first);
4436
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004437 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004438}
4439
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004440static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4441 struct sk_buff *skb)
4442{
4443 unsigned int r_idx = skb->queue_mapping;
4444
4445 if (r_idx >= adapter->num_tx_queues)
4446 r_idx = r_idx % adapter->num_tx_queues;
4447
4448 return adapter->tx_ring[r_idx];
4449}
4450
Alexander Duyckcd392f52011-08-26 07:43:59 +00004451static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4452 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004453{
4454 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004455
4456 if (test_bit(__IGB_DOWN, &adapter->state)) {
4457 dev_kfree_skb_any(skb);
4458 return NETDEV_TX_OK;
4459 }
4460
4461 if (skb->len <= 0) {
4462 dev_kfree_skb_any(skb);
4463 return NETDEV_TX_OK;
4464 }
4465
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004466 /*
4467 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4468 * in order to meet this minimum size requirement.
4469 */
4470 if (skb->len < 17) {
4471 if (skb_padto(skb, 17))
4472 return NETDEV_TX_OK;
4473 skb->len = 17;
4474 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004475
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004476 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004477}
4478
4479/**
4480 * igb_tx_timeout - Respond to a Tx Hang
4481 * @netdev: network interface device structure
4482 **/
4483static void igb_tx_timeout(struct net_device *netdev)
4484{
4485 struct igb_adapter *adapter = netdev_priv(netdev);
4486 struct e1000_hw *hw = &adapter->hw;
4487
4488 /* Do the reset outside of interrupt context */
4489 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004490
Alexander Duyck06218a82011-08-26 07:46:55 +00004491 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004492 hw->dev_spec._82575.global_device_reset = true;
4493
Auke Kok9d5c8242008-01-24 02:22:38 -08004494 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004495 wr32(E1000_EICS,
4496 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004497}
4498
4499static void igb_reset_task(struct work_struct *work)
4500{
4501 struct igb_adapter *adapter;
4502 adapter = container_of(work, struct igb_adapter, reset_task);
4503
Taku Izumic97ec422010-04-27 14:39:30 +00004504 igb_dump(adapter);
4505 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004506 igb_reinit_locked(adapter);
4507}
4508
4509/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004510 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004511 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004512 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004513 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004514 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004515static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4516 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004517{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004518 struct igb_adapter *adapter = netdev_priv(netdev);
4519
4520 spin_lock(&adapter->stats64_lock);
4521 igb_update_stats(adapter, &adapter->stats64);
4522 memcpy(stats, &adapter->stats64, sizeof(*stats));
4523 spin_unlock(&adapter->stats64_lock);
4524
4525 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004526}
4527
4528/**
4529 * igb_change_mtu - Change the Maximum Transfer Unit
4530 * @netdev: network interface device structure
4531 * @new_mtu: new value for maximum frame size
4532 *
4533 * Returns 0 on success, negative on failure
4534 **/
4535static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4536{
4537 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004538 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004539 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004540
Alexander Duyckc809d222009-10-27 23:52:13 +00004541 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004542 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004543 return -EINVAL;
4544 }
4545
Alexander Duyck153285f2011-08-26 07:43:32 +00004546#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004547 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004548 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004549 return -EINVAL;
4550 }
4551
4552 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4553 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004554
Auke Kok9d5c8242008-01-24 02:22:38 -08004555 /* igb_down has a dependency on max_frame_size */
4556 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004557
Alexander Duyck4c844852009-10-27 15:52:07 +00004558 if (netif_running(netdev))
4559 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004560
Alexander Duyck090b1792009-10-27 23:51:55 +00004561 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004562 netdev->mtu, new_mtu);
4563 netdev->mtu = new_mtu;
4564
4565 if (netif_running(netdev))
4566 igb_up(adapter);
4567 else
4568 igb_reset(adapter);
4569
4570 clear_bit(__IGB_RESETTING, &adapter->state);
4571
4572 return 0;
4573}
4574
4575/**
4576 * igb_update_stats - Update the board statistics counters
4577 * @adapter: board private structure
4578 **/
4579
Eric Dumazet12dcd862010-10-15 17:27:10 +00004580void igb_update_stats(struct igb_adapter *adapter,
4581 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004582{
4583 struct e1000_hw *hw = &adapter->hw;
4584 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004585 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004586 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004587 int i;
4588 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004589 unsigned int start;
4590 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004591
4592#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4593
4594 /*
4595 * Prevent stats update while adapter is being reset, or if the pci
4596 * connection is down.
4597 */
4598 if (adapter->link_speed == 0)
4599 return;
4600 if (pci_channel_offline(pdev))
4601 return;
4602
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004603 bytes = 0;
4604 packets = 0;
4605 for (i = 0; i < adapter->num_rx_queues; i++) {
4606 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004607 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004608
Alexander Duyck3025a442010-02-17 01:02:39 +00004609 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004610 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004611
4612 do {
4613 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4614 _bytes = ring->rx_stats.bytes;
4615 _packets = ring->rx_stats.packets;
4616 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4617 bytes += _bytes;
4618 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004619 }
4620
Alexander Duyck128e45e2009-11-12 18:37:38 +00004621 net_stats->rx_bytes = bytes;
4622 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004623
4624 bytes = 0;
4625 packets = 0;
4626 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004627 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004628 do {
4629 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4630 _bytes = ring->tx_stats.bytes;
4631 _packets = ring->tx_stats.packets;
4632 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4633 bytes += _bytes;
4634 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004635 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004636 net_stats->tx_bytes = bytes;
4637 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004638
4639 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004640 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4641 adapter->stats.gprc += rd32(E1000_GPRC);
4642 adapter->stats.gorc += rd32(E1000_GORCL);
4643 rd32(E1000_GORCH); /* clear GORCL */
4644 adapter->stats.bprc += rd32(E1000_BPRC);
4645 adapter->stats.mprc += rd32(E1000_MPRC);
4646 adapter->stats.roc += rd32(E1000_ROC);
4647
4648 adapter->stats.prc64 += rd32(E1000_PRC64);
4649 adapter->stats.prc127 += rd32(E1000_PRC127);
4650 adapter->stats.prc255 += rd32(E1000_PRC255);
4651 adapter->stats.prc511 += rd32(E1000_PRC511);
4652 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4653 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4654 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4655 adapter->stats.sec += rd32(E1000_SEC);
4656
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004657 mpc = rd32(E1000_MPC);
4658 adapter->stats.mpc += mpc;
4659 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004660 adapter->stats.scc += rd32(E1000_SCC);
4661 adapter->stats.ecol += rd32(E1000_ECOL);
4662 adapter->stats.mcc += rd32(E1000_MCC);
4663 adapter->stats.latecol += rd32(E1000_LATECOL);
4664 adapter->stats.dc += rd32(E1000_DC);
4665 adapter->stats.rlec += rd32(E1000_RLEC);
4666 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4667 adapter->stats.xontxc += rd32(E1000_XONTXC);
4668 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4669 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4670 adapter->stats.fcruc += rd32(E1000_FCRUC);
4671 adapter->stats.gptc += rd32(E1000_GPTC);
4672 adapter->stats.gotc += rd32(E1000_GOTCL);
4673 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004674 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004675 adapter->stats.ruc += rd32(E1000_RUC);
4676 adapter->stats.rfc += rd32(E1000_RFC);
4677 adapter->stats.rjc += rd32(E1000_RJC);
4678 adapter->stats.tor += rd32(E1000_TORH);
4679 adapter->stats.tot += rd32(E1000_TOTH);
4680 adapter->stats.tpr += rd32(E1000_TPR);
4681
4682 adapter->stats.ptc64 += rd32(E1000_PTC64);
4683 adapter->stats.ptc127 += rd32(E1000_PTC127);
4684 adapter->stats.ptc255 += rd32(E1000_PTC255);
4685 adapter->stats.ptc511 += rd32(E1000_PTC511);
4686 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4687 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4688
4689 adapter->stats.mptc += rd32(E1000_MPTC);
4690 adapter->stats.bptc += rd32(E1000_BPTC);
4691
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004692 adapter->stats.tpt += rd32(E1000_TPT);
4693 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004694
4695 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004696 /* read internal phy specific stats */
4697 reg = rd32(E1000_CTRL_EXT);
4698 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4699 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4700 adapter->stats.tncrs += rd32(E1000_TNCRS);
4701 }
4702
Auke Kok9d5c8242008-01-24 02:22:38 -08004703 adapter->stats.tsctc += rd32(E1000_TSCTC);
4704 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4705
4706 adapter->stats.iac += rd32(E1000_IAC);
4707 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4708 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4709 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4710 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4711 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4712 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4713 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4714 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4715
4716 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004717 net_stats->multicast = adapter->stats.mprc;
4718 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004719
4720 /* Rx Errors */
4721
4722 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004723 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004724 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004725 adapter->stats.crcerrs + adapter->stats.algnerrc +
4726 adapter->stats.ruc + adapter->stats.roc +
4727 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004728 net_stats->rx_length_errors = adapter->stats.ruc +
4729 adapter->stats.roc;
4730 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4731 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4732 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004733
4734 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004735 net_stats->tx_errors = adapter->stats.ecol +
4736 adapter->stats.latecol;
4737 net_stats->tx_aborted_errors = adapter->stats.ecol;
4738 net_stats->tx_window_errors = adapter->stats.latecol;
4739 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004740
4741 /* Tx Dropped needs to be maintained elsewhere */
4742
4743 /* Phy Stats */
4744 if (hw->phy.media_type == e1000_media_type_copper) {
4745 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004746 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004747 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4748 adapter->phy_stats.idle_errors += phy_tmp;
4749 }
4750 }
4751
4752 /* Management Stats */
4753 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4754 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4755 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004756
4757 /* OS2BMC Stats */
4758 reg = rd32(E1000_MANC);
4759 if (reg & E1000_MANC_EN_BMC2OS) {
4760 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4761 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4762 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4763 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4764 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004765}
4766
Auke Kok9d5c8242008-01-24 02:22:38 -08004767static irqreturn_t igb_msix_other(int irq, void *data)
4768{
Alexander Duyck047e0032009-10-27 15:49:27 +00004769 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004770 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004771 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004772 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004773
Alexander Duyck7f081d42010-01-07 17:41:00 +00004774 if (icr & E1000_ICR_DRSTA)
4775 schedule_work(&adapter->reset_task);
4776
Alexander Duyck047e0032009-10-27 15:49:27 +00004777 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004778 /* HW is reporting DMA is out of sync */
4779 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004780 /* The DMA Out of Sync is also indication of a spoof event
4781 * in IOV mode. Check the Wrong VM Behavior register to
4782 * see if it is really a spoof event. */
4783 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004784 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004785
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004786 /* Check for a mailbox event */
4787 if (icr & E1000_ICR_VMMB)
4788 igb_msg_task(adapter);
4789
4790 if (icr & E1000_ICR_LSC) {
4791 hw->mac.get_link_status = 1;
4792 /* guard against interrupt when we're going down */
4793 if (!test_bit(__IGB_DOWN, &adapter->state))
4794 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4795 }
4796
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004797 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004798
4799 return IRQ_HANDLED;
4800}
4801
Alexander Duyck047e0032009-10-27 15:49:27 +00004802static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004803{
Alexander Duyck26b39272010-02-17 01:00:41 +00004804 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004805 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004806
Alexander Duyck047e0032009-10-27 15:49:27 +00004807 if (!q_vector->set_itr)
4808 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004809
Alexander Duyck047e0032009-10-27 15:49:27 +00004810 if (!itr_val)
4811 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004812
Alexander Duyck26b39272010-02-17 01:00:41 +00004813 if (adapter->hw.mac.type == e1000_82575)
4814 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004815 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004816 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004817
4818 writel(itr_val, q_vector->itr_register);
4819 q_vector->set_itr = 0;
4820}
4821
4822static irqreturn_t igb_msix_ring(int irq, void *data)
4823{
4824 struct igb_q_vector *q_vector = data;
4825
4826 /* Write the ITR value calculated from the previous interrupt. */
4827 igb_write_itr(q_vector);
4828
4829 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004830
Auke Kok9d5c8242008-01-24 02:22:38 -08004831 return IRQ_HANDLED;
4832}
4833
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004834#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004835static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004836{
Alexander Duyck047e0032009-10-27 15:49:27 +00004837 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004838 struct e1000_hw *hw = &adapter->hw;
4839 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004840
Alexander Duyck047e0032009-10-27 15:49:27 +00004841 if (q_vector->cpu == cpu)
4842 goto out_no_update;
4843
Alexander Duyck0ba82992011-08-26 07:45:47 +00004844 if (q_vector->tx.ring) {
4845 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004846 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4847 if (hw->mac.type == e1000_82575) {
4848 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4849 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4850 } else {
4851 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4852 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4853 E1000_DCA_TXCTRL_CPUID_SHIFT;
4854 }
4855 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4856 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4857 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004858 if (q_vector->rx.ring) {
4859 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004860 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4861 if (hw->mac.type == e1000_82575) {
4862 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4863 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4864 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004865 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004866 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004867 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004868 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004869 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4870 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4871 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4872 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004873 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004874 q_vector->cpu = cpu;
4875out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004876 put_cpu();
4877}
4878
4879static void igb_setup_dca(struct igb_adapter *adapter)
4880{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004881 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004882 int i;
4883
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004884 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004885 return;
4886
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004887 /* Always use CB2 mode, difference is masked in the CB driver. */
4888 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4889
Alexander Duyck047e0032009-10-27 15:49:27 +00004890 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004891 adapter->q_vector[i]->cpu = -1;
4892 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004893 }
4894}
4895
4896static int __igb_notify_dca(struct device *dev, void *data)
4897{
4898 struct net_device *netdev = dev_get_drvdata(dev);
4899 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004900 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004901 struct e1000_hw *hw = &adapter->hw;
4902 unsigned long event = *(unsigned long *)data;
4903
4904 switch (event) {
4905 case DCA_PROVIDER_ADD:
4906 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004907 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004908 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004909 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004910 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004911 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004912 igb_setup_dca(adapter);
4913 break;
4914 }
4915 /* Fall Through since DCA is disabled. */
4916 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004917 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004918 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004919 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004920 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004921 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004922 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004923 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004924 }
4925 break;
4926 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004927
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004928 return 0;
4929}
4930
4931static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4932 void *p)
4933{
4934 int ret_val;
4935
4936 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4937 __igb_notify_dca);
4938
4939 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4940}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004941#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004942
Greg Rose0224d662011-10-14 02:57:14 +00004943#ifdef CONFIG_PCI_IOV
4944static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4945{
4946 unsigned char mac_addr[ETH_ALEN];
4947 struct pci_dev *pdev = adapter->pdev;
4948 struct e1000_hw *hw = &adapter->hw;
4949 struct pci_dev *pvfdev;
4950 unsigned int device_id;
4951 u16 thisvf_devfn;
4952
4953 random_ether_addr(mac_addr);
4954 igb_set_vf_mac(adapter, vf, mac_addr);
4955
4956 switch (adapter->hw.mac.type) {
4957 case e1000_82576:
4958 device_id = IGB_82576_VF_DEV_ID;
4959 /* VF Stride for 82576 is 2 */
4960 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4961 (pdev->devfn & 1);
4962 break;
4963 case e1000_i350:
4964 device_id = IGB_I350_VF_DEV_ID;
4965 /* VF Stride for I350 is 4 */
4966 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4967 (pdev->devfn & 3);
4968 break;
4969 default:
4970 device_id = 0;
4971 thisvf_devfn = 0;
4972 break;
4973 }
4974
4975 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4976 while (pvfdev) {
4977 if (pvfdev->devfn == thisvf_devfn)
4978 break;
4979 pvfdev = pci_get_device(hw->vendor_id,
4980 device_id, pvfdev);
4981 }
4982
4983 if (pvfdev)
4984 adapter->vf_data[vf].vfdev = pvfdev;
4985 else
4986 dev_err(&pdev->dev,
4987 "Couldn't find pci dev ptr for VF %4.4x\n",
4988 thisvf_devfn);
4989 return pvfdev != NULL;
4990}
4991
4992static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4993{
4994 struct e1000_hw *hw = &adapter->hw;
4995 struct pci_dev *pdev = adapter->pdev;
4996 struct pci_dev *pvfdev;
4997 u16 vf_devfn = 0;
4998 u16 vf_stride;
4999 unsigned int device_id;
5000 int vfs_found = 0;
5001
5002 switch (adapter->hw.mac.type) {
5003 case e1000_82576:
5004 device_id = IGB_82576_VF_DEV_ID;
5005 /* VF Stride for 82576 is 2 */
5006 vf_stride = 2;
5007 break;
5008 case e1000_i350:
5009 device_id = IGB_I350_VF_DEV_ID;
5010 /* VF Stride for I350 is 4 */
5011 vf_stride = 4;
5012 break;
5013 default:
5014 device_id = 0;
5015 vf_stride = 0;
5016 break;
5017 }
5018
5019 vf_devfn = pdev->devfn + 0x80;
5020 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5021 while (pvfdev) {
Greg Rose06292922012-02-02 23:51:43 +00005022 if (pvfdev->devfn == vf_devfn &&
5023 (pvfdev->bus->number >= pdev->bus->number))
Greg Rose0224d662011-10-14 02:57:14 +00005024 vfs_found++;
5025 vf_devfn += vf_stride;
5026 pvfdev = pci_get_device(hw->vendor_id,
5027 device_id, pvfdev);
5028 }
5029
5030 return vfs_found;
5031}
5032
5033static int igb_check_vf_assignment(struct igb_adapter *adapter)
5034{
5035 int i;
5036 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5037 if (adapter->vf_data[i].vfdev) {
5038 if (adapter->vf_data[i].vfdev->dev_flags &
5039 PCI_DEV_FLAGS_ASSIGNED)
5040 return true;
5041 }
5042 }
5043 return false;
5044}
5045
5046#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005047static void igb_ping_all_vfs(struct igb_adapter *adapter)
5048{
5049 struct e1000_hw *hw = &adapter->hw;
5050 u32 ping;
5051 int i;
5052
5053 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5054 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005055 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005056 ping |= E1000_VT_MSGTYPE_CTS;
5057 igb_write_mbx(hw, &ping, 1, i);
5058 }
5059}
5060
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005061static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5062{
5063 struct e1000_hw *hw = &adapter->hw;
5064 u32 vmolr = rd32(E1000_VMOLR(vf));
5065 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5066
Alexander Duyckd85b90042010-09-22 17:56:20 +00005067 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005068 IGB_VF_FLAG_MULTI_PROMISC);
5069 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5070
5071 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5072 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005073 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005074 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5075 } else {
5076 /*
5077 * if we have hashes and we are clearing a multicast promisc
5078 * flag we need to write the hashes to the MTA as this step
5079 * was previously skipped
5080 */
5081 if (vf_data->num_vf_mc_hashes > 30) {
5082 vmolr |= E1000_VMOLR_MPME;
5083 } else if (vf_data->num_vf_mc_hashes) {
5084 int j;
5085 vmolr |= E1000_VMOLR_ROMPE;
5086 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5087 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5088 }
5089 }
5090
5091 wr32(E1000_VMOLR(vf), vmolr);
5092
5093 /* there are flags left unprocessed, likely not supported */
5094 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5095 return -EINVAL;
5096
5097 return 0;
5098
5099}
5100
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005101static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5102 u32 *msgbuf, u32 vf)
5103{
5104 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5105 u16 *hash_list = (u16 *)&msgbuf[1];
5106 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5107 int i;
5108
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005109 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005110 * to this VF for later use to restore when the PF multi cast
5111 * list changes
5112 */
5113 vf_data->num_vf_mc_hashes = n;
5114
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005115 /* only up to 30 hash values supported */
5116 if (n > 30)
5117 n = 30;
5118
5119 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005120 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005121 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005122
5123 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005124 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005125
5126 return 0;
5127}
5128
5129static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5130{
5131 struct e1000_hw *hw = &adapter->hw;
5132 struct vf_data_storage *vf_data;
5133 int i, j;
5134
5135 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005136 u32 vmolr = rd32(E1000_VMOLR(i));
5137 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5138
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005139 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005140
5141 if ((vf_data->num_vf_mc_hashes > 30) ||
5142 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5143 vmolr |= E1000_VMOLR_MPME;
5144 } else if (vf_data->num_vf_mc_hashes) {
5145 vmolr |= E1000_VMOLR_ROMPE;
5146 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5147 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5148 }
5149 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005150 }
5151}
5152
5153static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5154{
5155 struct e1000_hw *hw = &adapter->hw;
5156 u32 pool_mask, reg, vid;
5157 int i;
5158
5159 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5160
5161 /* Find the vlan filter for this id */
5162 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5163 reg = rd32(E1000_VLVF(i));
5164
5165 /* remove the vf from the pool */
5166 reg &= ~pool_mask;
5167
5168 /* if pool is empty then remove entry from vfta */
5169 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5170 (reg & E1000_VLVF_VLANID_ENABLE)) {
5171 reg = 0;
5172 vid = reg & E1000_VLVF_VLANID_MASK;
5173 igb_vfta_set(hw, vid, false);
5174 }
5175
5176 wr32(E1000_VLVF(i), reg);
5177 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005178
5179 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005180}
5181
5182static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5183{
5184 struct e1000_hw *hw = &adapter->hw;
5185 u32 reg, i;
5186
Alexander Duyck51466232009-10-27 23:47:35 +00005187 /* The vlvf table only exists on 82576 hardware and newer */
5188 if (hw->mac.type < e1000_82576)
5189 return -1;
5190
5191 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005192 if (!adapter->vfs_allocated_count)
5193 return -1;
5194
5195 /* Find the vlan filter for this id */
5196 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5197 reg = rd32(E1000_VLVF(i));
5198 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5199 vid == (reg & E1000_VLVF_VLANID_MASK))
5200 break;
5201 }
5202
5203 if (add) {
5204 if (i == E1000_VLVF_ARRAY_SIZE) {
5205 /* Did not find a matching VLAN ID entry that was
5206 * enabled. Search for a free filter entry, i.e.
5207 * one without the enable bit set
5208 */
5209 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5210 reg = rd32(E1000_VLVF(i));
5211 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5212 break;
5213 }
5214 }
5215 if (i < E1000_VLVF_ARRAY_SIZE) {
5216 /* Found an enabled/available entry */
5217 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5218
5219 /* if !enabled we need to set this up in vfta */
5220 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005221 /* add VID to filter table */
5222 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005223 reg |= E1000_VLVF_VLANID_ENABLE;
5224 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005225 reg &= ~E1000_VLVF_VLANID_MASK;
5226 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005227 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005228
5229 /* do not modify RLPML for PF devices */
5230 if (vf >= adapter->vfs_allocated_count)
5231 return 0;
5232
5233 if (!adapter->vf_data[vf].vlans_enabled) {
5234 u32 size;
5235 reg = rd32(E1000_VMOLR(vf));
5236 size = reg & E1000_VMOLR_RLPML_MASK;
5237 size += 4;
5238 reg &= ~E1000_VMOLR_RLPML_MASK;
5239 reg |= size;
5240 wr32(E1000_VMOLR(vf), reg);
5241 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005242
Alexander Duyck51466232009-10-27 23:47:35 +00005243 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005244 }
5245 } else {
5246 if (i < E1000_VLVF_ARRAY_SIZE) {
5247 /* remove vf from the pool */
5248 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5249 /* if pool is empty then remove entry from vfta */
5250 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5251 reg = 0;
5252 igb_vfta_set(hw, vid, false);
5253 }
5254 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005255
5256 /* do not modify RLPML for PF devices */
5257 if (vf >= adapter->vfs_allocated_count)
5258 return 0;
5259
5260 adapter->vf_data[vf].vlans_enabled--;
5261 if (!adapter->vf_data[vf].vlans_enabled) {
5262 u32 size;
5263 reg = rd32(E1000_VMOLR(vf));
5264 size = reg & E1000_VMOLR_RLPML_MASK;
5265 size -= 4;
5266 reg &= ~E1000_VMOLR_RLPML_MASK;
5267 reg |= size;
5268 wr32(E1000_VMOLR(vf), reg);
5269 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005270 }
5271 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005272 return 0;
5273}
5274
5275static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5276{
5277 struct e1000_hw *hw = &adapter->hw;
5278
5279 if (vid)
5280 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5281 else
5282 wr32(E1000_VMVIR(vf), 0);
5283}
5284
5285static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5286 int vf, u16 vlan, u8 qos)
5287{
5288 int err = 0;
5289 struct igb_adapter *adapter = netdev_priv(netdev);
5290
5291 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5292 return -EINVAL;
5293 if (vlan || qos) {
5294 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5295 if (err)
5296 goto out;
5297 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5298 igb_set_vmolr(adapter, vf, !vlan);
5299 adapter->vf_data[vf].pf_vlan = vlan;
5300 adapter->vf_data[vf].pf_qos = qos;
5301 dev_info(&adapter->pdev->dev,
5302 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5303 if (test_bit(__IGB_DOWN, &adapter->state)) {
5304 dev_warn(&adapter->pdev->dev,
5305 "The VF VLAN has been set,"
5306 " but the PF device is not up.\n");
5307 dev_warn(&adapter->pdev->dev,
5308 "Bring the PF device up before"
5309 " attempting to use the VF device.\n");
5310 }
5311 } else {
5312 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5313 false, vf);
5314 igb_set_vmvir(adapter, vlan, vf);
5315 igb_set_vmolr(adapter, vf, true);
5316 adapter->vf_data[vf].pf_vlan = 0;
5317 adapter->vf_data[vf].pf_qos = 0;
5318 }
5319out:
5320 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005321}
5322
5323static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5324{
5325 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5326 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5327
5328 return igb_vlvf_set(adapter, vid, add, vf);
5329}
5330
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005331static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005332{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005333 /* clear flags - except flag that indicates PF has set the MAC */
5334 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005335 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005336
5337 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005338 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005339
5340 /* reset vlans for device */
5341 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005342 if (adapter->vf_data[vf].pf_vlan)
5343 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5344 adapter->vf_data[vf].pf_vlan,
5345 adapter->vf_data[vf].pf_qos);
5346 else
5347 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005348
5349 /* reset multicast table array for vf */
5350 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5351
5352 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005353 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005354}
5355
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005356static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5357{
5358 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5359
5360 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005361 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5362 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005363
5364 /* process remaining reset events */
5365 igb_vf_reset(adapter, vf);
5366}
5367
5368static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005369{
5370 struct e1000_hw *hw = &adapter->hw;
5371 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005372 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005373 u32 reg, msgbuf[3];
5374 u8 *addr = (u8 *)(&msgbuf[1]);
5375
5376 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005377 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005378
5379 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005380 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005381
5382 /* enable transmit and receive for vf */
5383 reg = rd32(E1000_VFTE);
5384 wr32(E1000_VFTE, reg | (1 << vf));
5385 reg = rd32(E1000_VFRE);
5386 wr32(E1000_VFRE, reg | (1 << vf));
5387
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005388 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389
5390 /* reply to reset with ack and vf mac address */
5391 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5392 memcpy(addr, vf_mac, 6);
5393 igb_write_mbx(hw, msgbuf, 3, vf);
5394}
5395
5396static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5397{
Greg Rosede42edd2010-07-01 13:39:23 +00005398 /*
5399 * The VF MAC Address is stored in a packed array of bytes
5400 * starting at the second 32 bit word of the msg array
5401 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005402 unsigned char *addr = (char *)&msg[1];
5403 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005404
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005405 if (is_valid_ether_addr(addr))
5406 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005407
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005408 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005409}
5410
5411static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5412{
5413 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005414 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005415 u32 msg = E1000_VT_MSGTYPE_NACK;
5416
5417 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005418 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5419 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005420 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005421 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005422 }
5423}
5424
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005425static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005426{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005427 struct pci_dev *pdev = adapter->pdev;
5428 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005429 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005430 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005431 s32 retval;
5432
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005433 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005434
Alexander Duyckfef45f42009-12-11 22:57:34 -08005435 if (retval) {
5436 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005437 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005438 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5439 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5440 return;
5441 goto out;
5442 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005443
5444 /* this is a message we already processed, do nothing */
5445 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005446 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005447
5448 /*
5449 * until the vf completes a reset it should not be
5450 * allowed to start any configuration.
5451 */
5452
5453 if (msgbuf[0] == E1000_VF_RESET) {
5454 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005455 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005456 }
5457
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005458 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005459 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5460 return;
5461 retval = -1;
5462 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005463 }
5464
5465 switch ((msgbuf[0] & 0xFFFF)) {
5466 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005467 retval = -EINVAL;
5468 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5469 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5470 else
5471 dev_warn(&pdev->dev,
5472 "VF %d attempted to override administratively "
5473 "set MAC address\nReload the VF driver to "
5474 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005475 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005476 case E1000_VF_SET_PROMISC:
5477 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5478 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005479 case E1000_VF_SET_MULTICAST:
5480 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5481 break;
5482 case E1000_VF_SET_LPE:
5483 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5484 break;
5485 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005486 retval = -1;
5487 if (vf_data->pf_vlan)
5488 dev_warn(&pdev->dev,
5489 "VF %d attempted to override administratively "
5490 "set VLAN tag\nReload the VF driver to "
5491 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005492 else
5493 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005494 break;
5495 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005496 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005497 retval = -1;
5498 break;
5499 }
5500
Alexander Duyckfef45f42009-12-11 22:57:34 -08005501 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5502out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005503 /* notify the VF of the results of what it sent us */
5504 if (retval)
5505 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5506 else
5507 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5508
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005509 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005510}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005511
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005512static void igb_msg_task(struct igb_adapter *adapter)
5513{
5514 struct e1000_hw *hw = &adapter->hw;
5515 u32 vf;
5516
5517 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5518 /* process any reset requests */
5519 if (!igb_check_for_rst(hw, vf))
5520 igb_vf_reset_event(adapter, vf);
5521
5522 /* process any messages pending */
5523 if (!igb_check_for_msg(hw, vf))
5524 igb_rcv_msg_from_vf(adapter, vf);
5525
5526 /* process any acks */
5527 if (!igb_check_for_ack(hw, vf))
5528 igb_rcv_ack_from_vf(adapter, vf);
5529 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005530}
5531
Auke Kok9d5c8242008-01-24 02:22:38 -08005532/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005533 * igb_set_uta - Set unicast filter table address
5534 * @adapter: board private structure
5535 *
5536 * The unicast table address is a register array of 32-bit registers.
5537 * The table is meant to be used in a way similar to how the MTA is used
5538 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005539 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5540 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005541 **/
5542static void igb_set_uta(struct igb_adapter *adapter)
5543{
5544 struct e1000_hw *hw = &adapter->hw;
5545 int i;
5546
5547 /* The UTA table only exists on 82576 hardware and newer */
5548 if (hw->mac.type < e1000_82576)
5549 return;
5550
5551 /* we only need to do this if VMDq is enabled */
5552 if (!adapter->vfs_allocated_count)
5553 return;
5554
5555 for (i = 0; i < hw->mac.uta_reg_count; i++)
5556 array_wr32(E1000_UTA, i, ~0);
5557}
5558
5559/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005560 * igb_intr_msi - Interrupt Handler
5561 * @irq: interrupt number
5562 * @data: pointer to a network interface device structure
5563 **/
5564static irqreturn_t igb_intr_msi(int irq, void *data)
5565{
Alexander Duyck047e0032009-10-27 15:49:27 +00005566 struct igb_adapter *adapter = data;
5567 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005568 struct e1000_hw *hw = &adapter->hw;
5569 /* read ICR disables interrupts using IAM */
5570 u32 icr = rd32(E1000_ICR);
5571
Alexander Duyck047e0032009-10-27 15:49:27 +00005572 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005573
Alexander Duyck7f081d42010-01-07 17:41:00 +00005574 if (icr & E1000_ICR_DRSTA)
5575 schedule_work(&adapter->reset_task);
5576
Alexander Duyck047e0032009-10-27 15:49:27 +00005577 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005578 /* HW is reporting DMA is out of sync */
5579 adapter->stats.doosync++;
5580 }
5581
Auke Kok9d5c8242008-01-24 02:22:38 -08005582 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5583 hw->mac.get_link_status = 1;
5584 if (!test_bit(__IGB_DOWN, &adapter->state))
5585 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5586 }
5587
Alexander Duyck047e0032009-10-27 15:49:27 +00005588 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005589
5590 return IRQ_HANDLED;
5591}
5592
5593/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005594 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005595 * @irq: interrupt number
5596 * @data: pointer to a network interface device structure
5597 **/
5598static irqreturn_t igb_intr(int irq, void *data)
5599{
Alexander Duyck047e0032009-10-27 15:49:27 +00005600 struct igb_adapter *adapter = data;
5601 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005602 struct e1000_hw *hw = &adapter->hw;
5603 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5604 * need for the IMC write */
5605 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005606
5607 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5608 * not set, then the adapter didn't send an interrupt */
5609 if (!(icr & E1000_ICR_INT_ASSERTED))
5610 return IRQ_NONE;
5611
Alexander Duyck0ba82992011-08-26 07:45:47 +00005612 igb_write_itr(q_vector);
5613
Alexander Duyck7f081d42010-01-07 17:41:00 +00005614 if (icr & E1000_ICR_DRSTA)
5615 schedule_work(&adapter->reset_task);
5616
Alexander Duyck047e0032009-10-27 15:49:27 +00005617 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005618 /* HW is reporting DMA is out of sync */
5619 adapter->stats.doosync++;
5620 }
5621
Auke Kok9d5c8242008-01-24 02:22:38 -08005622 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5623 hw->mac.get_link_status = 1;
5624 /* guard against interrupt when we're going down */
5625 if (!test_bit(__IGB_DOWN, &adapter->state))
5626 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5627 }
5628
Alexander Duyck047e0032009-10-27 15:49:27 +00005629 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005630
5631 return IRQ_HANDLED;
5632}
5633
Stephen Hemmingerc50b52a2012-01-18 22:13:26 +00005634static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005635{
Alexander Duyck047e0032009-10-27 15:49:27 +00005636 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005637 struct e1000_hw *hw = &adapter->hw;
5638
Alexander Duyck0ba82992011-08-26 07:45:47 +00005639 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5640 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5641 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5642 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005643 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005644 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005645 }
5646
5647 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5648 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005649 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005650 else
5651 igb_irq_enable(adapter);
5652 }
5653}
5654
Auke Kok9d5c8242008-01-24 02:22:38 -08005655/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005656 * igb_poll - NAPI Rx polling callback
5657 * @napi: napi polling structure
5658 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005659 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005660static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005661{
Alexander Duyck047e0032009-10-27 15:49:27 +00005662 struct igb_q_vector *q_vector = container_of(napi,
5663 struct igb_q_vector,
5664 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005665 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005666
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005667#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005668 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5669 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005670#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005671 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005672 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005673
Alexander Duyck0ba82992011-08-26 07:45:47 +00005674 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005675 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005676
Alexander Duyck16eb8812011-08-26 07:43:54 +00005677 /* If all work not completed, return budget and keep polling */
5678 if (!clean_complete)
5679 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005680
Alexander Duyck46544252009-02-19 20:39:04 -08005681 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005682 napi_complete(napi);
5683 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005684
Alexander Duyck16eb8812011-08-26 07:43:54 +00005685 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005686}
Al Viro6d8126f2008-03-16 22:23:24 +00005687
Auke Kok9d5c8242008-01-24 02:22:38 -08005688/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005689 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005690 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005691 * @shhwtstamps: timestamp structure to update
5692 * @regval: unsigned 64bit system time value.
5693 *
5694 * We need to convert the system time value stored in the RX/TXSTMP registers
5695 * into a hwtstamp which can be used by the upper level timestamping functions
5696 */
5697static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5698 struct skb_shared_hwtstamps *shhwtstamps,
5699 u64 regval)
5700{
5701 u64 ns;
5702
Alexander Duyck55cac242009-11-19 12:42:21 +00005703 /*
5704 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5705 * 24 to match clock shift we setup earlier.
5706 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005707 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005708 regval <<= IGB_82580_TSYNC_SHIFT;
5709
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005710 ns = timecounter_cyc2time(&adapter->clock, regval);
5711 timecompare_update(&adapter->compare, ns);
5712 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5713 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5714 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5715}
5716
5717/**
5718 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5719 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005720 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005721 *
5722 * If we were asked to do hardware stamping and such a time stamp is
5723 * available, then it must have been for this skb here because we only
5724 * allow only one such packet into the queue.
5725 */
Alexander Duyck06034642011-08-26 07:44:22 +00005726static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5727 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005728{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005729 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005730 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005731 struct skb_shared_hwtstamps shhwtstamps;
5732 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005733
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005734 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005735 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005736 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5737 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005738
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005739 regval = rd32(E1000_TXSTMPL);
5740 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5741
5742 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005743 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005744}
5745
5746/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005747 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005748 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005749 * returns true if ring is completely cleaned
5750 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005751static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005752{
Alexander Duyck047e0032009-10-27 15:49:27 +00005753 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005754 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005755 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005756 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005757 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005758 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005759 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005760
Alexander Duyck13fde972011-10-05 13:35:24 +00005761 if (test_bit(__IGB_DOWN, &adapter->state))
5762 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005763
Alexander Duyck06034642011-08-26 07:44:22 +00005764 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005765 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005766 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005767
Alexander Duyck13fde972011-10-05 13:35:24 +00005768 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005769 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005770
Alexander Duyck8542db02011-08-26 07:44:43 +00005771 /* prevent any other reads prior to eop_desc */
5772 rmb();
5773
5774 /* if next_to_watch is not set then there is no work pending */
5775 if (!eop_desc)
5776 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005777
5778 /* if DD is not set pending work has not been completed */
5779 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5780 break;
5781
Alexander Duyck8542db02011-08-26 07:44:43 +00005782 /* clear next_to_watch to prevent false hangs */
5783 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005784
Alexander Duyckebe42d12011-08-26 07:45:09 +00005785 /* update the statistics for this packet */
5786 total_bytes += tx_buffer->bytecount;
5787 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005788
Alexander Duyckebe42d12011-08-26 07:45:09 +00005789 /* retrieve hardware timestamp */
5790 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005791
Alexander Duyckebe42d12011-08-26 07:45:09 +00005792 /* free the skb */
5793 dev_kfree_skb_any(tx_buffer->skb);
5794 tx_buffer->skb = NULL;
5795
5796 /* unmap skb header data */
5797 dma_unmap_single(tx_ring->dev,
5798 tx_buffer->dma,
5799 tx_buffer->length,
5800 DMA_TO_DEVICE);
5801
5802 /* clear last DMA location and unmap remaining buffers */
5803 while (tx_desc != eop_desc) {
5804 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005805
Alexander Duyck13fde972011-10-05 13:35:24 +00005806 tx_buffer++;
5807 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005808 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005809 if (unlikely(!i)) {
5810 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005811 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005812 tx_desc = IGB_TX_DESC(tx_ring, 0);
5813 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005814
5815 /* unmap any remaining paged data */
5816 if (tx_buffer->dma) {
5817 dma_unmap_page(tx_ring->dev,
5818 tx_buffer->dma,
5819 tx_buffer->length,
5820 DMA_TO_DEVICE);
5821 }
5822 }
5823
5824 /* clear last DMA location */
5825 tx_buffer->dma = 0;
5826
5827 /* move us one more past the eop_desc for start of next pkt */
5828 tx_buffer++;
5829 tx_desc++;
5830 i++;
5831 if (unlikely(!i)) {
5832 i -= tx_ring->count;
5833 tx_buffer = tx_ring->tx_buffer_info;
5834 tx_desc = IGB_TX_DESC(tx_ring, 0);
5835 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005836 }
5837
Eric Dumazetbdbc0632012-01-04 20:23:36 +00005838 netdev_tx_completed_queue(txring_txq(tx_ring),
5839 total_packets, total_bytes);
Alexander Duyck8542db02011-08-26 07:44:43 +00005840 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005841 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005842 u64_stats_update_begin(&tx_ring->tx_syncp);
5843 tx_ring->tx_stats.bytes += total_bytes;
5844 tx_ring->tx_stats.packets += total_packets;
5845 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005846 q_vector->tx.total_bytes += total_bytes;
5847 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005848
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005849 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005850 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005851
Alexander Duyck8542db02011-08-26 07:44:43 +00005852 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005853
Auke Kok9d5c8242008-01-24 02:22:38 -08005854 /* Detect a transmit hang in hardware, this serializes the
5855 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005856 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005857 if (eop_desc &&
5858 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005859 (adapter->tx_timeout_factor * HZ)) &&
5860 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005861
Auke Kok9d5c8242008-01-24 02:22:38 -08005862 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005863 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005864 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005865 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005866 " TDH <%x>\n"
5867 " TDT <%x>\n"
5868 " next_to_use <%x>\n"
5869 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005870 "buffer_info[next_to_clean]\n"
5871 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005872 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 " jiffies <%lx>\n"
5874 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005875 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005876 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005877 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005878 tx_ring->next_to_use,
5879 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005880 tx_buffer->time_stamp,
5881 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005882 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005883 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005884 netif_stop_subqueue(tx_ring->netdev,
5885 tx_ring->queue_index);
5886
5887 /* we are about to reset, no point in enabling stuff */
5888 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 }
5890 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005891
5892 if (unlikely(total_packets &&
5893 netif_carrier_ok(tx_ring->netdev) &&
5894 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5895 /* Make sure that anybody stopping the queue after this
5896 * sees the new next_to_clean.
5897 */
5898 smp_mb();
5899 if (__netif_subqueue_stopped(tx_ring->netdev,
5900 tx_ring->queue_index) &&
5901 !(test_bit(__IGB_DOWN, &adapter->state))) {
5902 netif_wake_subqueue(tx_ring->netdev,
5903 tx_ring->queue_index);
5904
5905 u64_stats_update_begin(&tx_ring->tx_syncp);
5906 tx_ring->tx_stats.restart_queue++;
5907 u64_stats_update_end(&tx_ring->tx_syncp);
5908 }
5909 }
5910
5911 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005912}
5913
Alexander Duyckcd392f52011-08-26 07:43:59 +00005914static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005915 union e1000_adv_rx_desc *rx_desc,
5916 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005917{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005918 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005919
Alexander Duyck294e7d72011-08-26 07:45:57 +00005920 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005921 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005922 return;
5923
5924 /* Rx checksum disabled via ethtool */
5925 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005926 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005927
Auke Kok9d5c8242008-01-24 02:22:38 -08005928 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005929 if (igb_test_staterr(rx_desc,
5930 E1000_RXDEXT_STATERR_TCPE |
5931 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005932 /*
5933 * work around errata with sctp packets where the TCPE aka
5934 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5935 * packets, (aka let the stack check the crc32c)
5936 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005937 if (!((skb->len == 60) &&
5938 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005939 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005940 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005941 u64_stats_update_end(&ring->rx_syncp);
5942 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005943 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005944 return;
5945 }
5946 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005947 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5948 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005949 skb->ip_summed = CHECKSUM_UNNECESSARY;
5950
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005951 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5952 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005953}
5954
Alexander Duyck077887c2011-08-26 07:46:29 +00005955static inline void igb_rx_hash(struct igb_ring *ring,
5956 union e1000_adv_rx_desc *rx_desc,
5957 struct sk_buff *skb)
5958{
5959 if (ring->netdev->features & NETIF_F_RXHASH)
5960 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5961}
5962
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005963static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5964 union e1000_adv_rx_desc *rx_desc,
5965 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005966{
5967 struct igb_adapter *adapter = q_vector->adapter;
5968 struct e1000_hw *hw = &adapter->hw;
5969 u64 regval;
5970
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005971 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5972 E1000_RXDADV_STAT_TS))
5973 return;
5974
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005975 /*
5976 * If this bit is set, then the RX registers contain the time stamp. No
5977 * other packet will be time stamped until we read these registers, so
5978 * read the registers to make them available again. Because only one
5979 * packet can be time stamped at a time, we know that the register
5980 * values must belong to this one here and therefore we don't need to
5981 * compare any of the additional attributes stored for it.
5982 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005983 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005984 * can turn into a skb_shared_hwtstamps.
5985 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005986 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005987 u32 *stamp = (u32 *)skb->data;
5988 regval = le32_to_cpu(*(stamp + 2));
5989 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5990 skb_pull(skb, IGB_TS_HDR_LEN);
5991 } else {
5992 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5993 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005994
Nick Nunley757b77e2010-03-26 11:36:47 +00005995 regval = rd32(E1000_RXSTMPL);
5996 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5997 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005998
5999 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6000}
Alexander Duyck8be10e92011-08-26 07:47:11 +00006001
6002static void igb_rx_vlan(struct igb_ring *ring,
6003 union e1000_adv_rx_desc *rx_desc,
6004 struct sk_buff *skb)
6005{
6006 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6007 u16 vid;
6008 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6009 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
6010 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6011 else
6012 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6013
6014 __vlan_hwaccel_put_tag(skb, vid);
6015 }
6016}
6017
Alexander Duyck44390ca2011-08-26 07:43:38 +00006018static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006019{
6020 /* HW will not DMA in data larger than the given buffer, even if it
6021 * parses the (NFS, of course) header to be larger. In that case, it
6022 * fills the header buffer and spills the rest into the page.
6023 */
6024 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
6025 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00006026 if (hlen > IGB_RX_HDR_LEN)
6027 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00006028 return hlen;
6029}
6030
Alexander Duyckcd392f52011-08-26 07:43:59 +00006031static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08006032{
Alexander Duyck0ba82992011-08-26 07:45:47 +00006033 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006034 union e1000_adv_rx_desc *rx_desc;
6035 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08006036 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006037 u16 cleaned_count = igb_desc_unused(rx_ring);
6038 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08006039
Alexander Duyck601369062011-08-26 07:44:05 +00006040 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08006041
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006042 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006043 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00006044 struct sk_buff *skb = buffer_info->skb;
6045 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006046
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006047 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006048 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006049
6050 i++;
6051 if (i == rx_ring->count)
6052 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00006053
Alexander Duyck601369062011-08-26 07:44:05 +00006054 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006055 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006056
Alexander Duyck16eb8812011-08-26 07:43:54 +00006057 /*
6058 * This memory barrier is needed to keep us from reading
6059 * any other fields out of the rx_desc until we know the
6060 * RXD_STAT_DD bit is set
6061 */
6062 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006063
Alexander Duyck16eb8812011-08-26 07:43:54 +00006064 if (!skb_is_nonlinear(skb)) {
6065 __skb_put(skb, igb_get_hlen(rx_desc));
6066 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006067 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006068 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006069 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006070 }
6071
Alexander Duyck16eb8812011-08-26 07:43:54 +00006072 if (rx_desc->wb.upper.length) {
6073 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006074
Koki Sanagiaa913402010-04-27 01:01:19 +00006075 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006076 buffer_info->page,
6077 buffer_info->page_offset,
6078 length);
6079
Alexander Duyck16eb8812011-08-26 07:43:54 +00006080 skb->len += length;
6081 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006082 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006083
Alexander Duyckd1eff352009-11-12 18:38:35 +00006084 if ((page_count(buffer_info->page) != 1) ||
6085 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006086 buffer_info->page = NULL;
6087 else
6088 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006089
Alexander Duyck16eb8812011-08-26 07:43:54 +00006090 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6091 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6092 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006093 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006094
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006095 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006096 struct igb_rx_buffer *next_buffer;
6097 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006098 buffer_info->skb = next_buffer->skb;
6099 buffer_info->dma = next_buffer->dma;
6100 next_buffer->skb = skb;
6101 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006102 goto next_desc;
6103 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006104
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006105 if (igb_test_staterr(rx_desc,
6106 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006107 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006108 goto next_desc;
6109 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006110
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006111 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006112 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006113 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006114 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006115
6116 total_bytes += skb->len;
6117 total_packets++;
6118
6119 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6120
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006121 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006122
Alexander Duyck16eb8812011-08-26 07:43:54 +00006123 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006124next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006125 if (!budget)
6126 break;
6127
6128 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006129 /* return some buffers to hardware, one at a time is too slow */
6130 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006131 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006132 cleaned_count = 0;
6133 }
6134
6135 /* use prefetched values */
6136 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006137 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006138
Auke Kok9d5c8242008-01-24 02:22:38 -08006139 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006140 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006141 rx_ring->rx_stats.packets += total_packets;
6142 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006143 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006144 q_vector->rx.total_packets += total_packets;
6145 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006146
6147 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006148 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006149
Alexander Duyck16eb8812011-08-26 07:43:54 +00006150 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006151}
6152
Alexander Duyckc023cd82011-08-26 07:43:43 +00006153static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006154 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006155{
6156 struct sk_buff *skb = bi->skb;
6157 dma_addr_t dma = bi->dma;
6158
6159 if (dma)
6160 return true;
6161
6162 if (likely(!skb)) {
6163 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6164 IGB_RX_HDR_LEN);
6165 bi->skb = skb;
6166 if (!skb) {
6167 rx_ring->rx_stats.alloc_failed++;
6168 return false;
6169 }
6170
6171 /* initialize skb for ring */
6172 skb_record_rx_queue(skb, rx_ring->queue_index);
6173 }
6174
6175 dma = dma_map_single(rx_ring->dev, skb->data,
6176 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6177
6178 if (dma_mapping_error(rx_ring->dev, dma)) {
6179 rx_ring->rx_stats.alloc_failed++;
6180 return false;
6181 }
6182
6183 bi->dma = dma;
6184 return true;
6185}
6186
6187static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006188 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006189{
6190 struct page *page = bi->page;
6191 dma_addr_t page_dma = bi->page_dma;
6192 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6193
6194 if (page_dma)
6195 return true;
6196
6197 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006198 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006199 bi->page = page;
6200 if (unlikely(!page)) {
6201 rx_ring->rx_stats.alloc_failed++;
6202 return false;
6203 }
6204 }
6205
6206 page_dma = dma_map_page(rx_ring->dev, page,
6207 page_offset, PAGE_SIZE / 2,
6208 DMA_FROM_DEVICE);
6209
6210 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6211 rx_ring->rx_stats.alloc_failed++;
6212 return false;
6213 }
6214
6215 bi->page_dma = page_dma;
6216 bi->page_offset = page_offset;
6217 return true;
6218}
6219
Auke Kok9d5c8242008-01-24 02:22:38 -08006220/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006221 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006222 * @adapter: address of board private structure
6223 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006224void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006225{
Auke Kok9d5c8242008-01-24 02:22:38 -08006226 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006227 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006228 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006229
Alexander Duyck601369062011-08-26 07:44:05 +00006230 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006231 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006232 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006233
6234 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006235 if (!igb_alloc_mapped_skb(rx_ring, bi))
6236 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006237
Alexander Duyckc023cd82011-08-26 07:43:43 +00006238 /* Refresh the desc even if buffer_addrs didn't change
6239 * because each write-back erases this info. */
6240 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006241
Alexander Duyckc023cd82011-08-26 07:43:43 +00006242 if (!igb_alloc_mapped_page(rx_ring, bi))
6243 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006244
Alexander Duyckc023cd82011-08-26 07:43:43 +00006245 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006246
Alexander Duyckc023cd82011-08-26 07:43:43 +00006247 rx_desc++;
6248 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006249 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006250 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006251 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006252 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006253 i -= rx_ring->count;
6254 }
6255
6256 /* clear the hdr_addr for the next_to_use descriptor */
6257 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006258 }
6259
Alexander Duyckc023cd82011-08-26 07:43:43 +00006260 i += rx_ring->count;
6261
Auke Kok9d5c8242008-01-24 02:22:38 -08006262 if (rx_ring->next_to_use != i) {
6263 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006264
6265 /* Force memory writes to complete before letting h/w
6266 * know there are new descriptors to fetch. (Only
6267 * applicable for weak-ordered memory model archs,
6268 * such as IA-64). */
6269 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006270 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006271 }
6272}
6273
6274/**
6275 * igb_mii_ioctl -
6276 * @netdev:
6277 * @ifreq:
6278 * @cmd:
6279 **/
6280static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6281{
6282 struct igb_adapter *adapter = netdev_priv(netdev);
6283 struct mii_ioctl_data *data = if_mii(ifr);
6284
6285 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6286 return -EOPNOTSUPP;
6287
6288 switch (cmd) {
6289 case SIOCGMIIPHY:
6290 data->phy_id = adapter->hw.phy.addr;
6291 break;
6292 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006293 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6294 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006295 return -EIO;
6296 break;
6297 case SIOCSMIIREG:
6298 default:
6299 return -EOPNOTSUPP;
6300 }
6301 return 0;
6302}
6303
6304/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006305 * igb_hwtstamp_ioctl - control hardware time stamping
6306 * @netdev:
6307 * @ifreq:
6308 * @cmd:
6309 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006310 * Outgoing time stamping can be enabled and disabled. Play nice and
6311 * disable it when requested, although it shouldn't case any overhead
6312 * when no packet needs it. At most one packet in the queue may be
6313 * marked for time stamping, otherwise it would be impossible to tell
6314 * for sure to which packet the hardware time stamp belongs.
6315 *
6316 * Incoming time stamping has to be configured via the hardware
6317 * filters. Not all combinations are supported, in particular event
6318 * type has to be specified. Matching the kind of event packet is
6319 * not supported, with the exception of "all V2 events regardless of
6320 * level 2 or 4".
6321 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006322 **/
6323static int igb_hwtstamp_ioctl(struct net_device *netdev,
6324 struct ifreq *ifr, int cmd)
6325{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006326 struct igb_adapter *adapter = netdev_priv(netdev);
6327 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006328 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006329 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6330 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006331 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006332 bool is_l4 = false;
6333 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006335
6336 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6337 return -EFAULT;
6338
6339 /* reserved for future extensions */
6340 if (config.flags)
6341 return -EINVAL;
6342
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006343 switch (config.tx_type) {
6344 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006345 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006346 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006347 break;
6348 default:
6349 return -ERANGE;
6350 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006351
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006352 switch (config.rx_filter) {
6353 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006354 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006355 break;
6356 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6357 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6358 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6359 case HWTSTAMP_FILTER_ALL:
6360 /*
6361 * register TSYNCRXCFG must be set, therefore it is not
6362 * possible to time stamp both Sync and Delay_Req messages
6363 * => fall back to time stamping all packets
6364 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006365 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 config.rx_filter = HWTSTAMP_FILTER_ALL;
6367 break;
6368 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006369 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006370 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006371 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006372 break;
6373 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006374 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006375 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006376 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006377 break;
6378 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6379 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006380 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006381 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006382 is_l2 = true;
6383 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006384 config.rx_filter = HWTSTAMP_FILTER_SOME;
6385 break;
6386 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6387 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006388 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006389 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006390 is_l2 = true;
6391 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006392 config.rx_filter = HWTSTAMP_FILTER_SOME;
6393 break;
6394 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6395 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6396 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006397 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006398 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006399 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006400 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006401 break;
6402 default:
6403 return -ERANGE;
6404 }
6405
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006406 if (hw->mac.type == e1000_82575) {
6407 if (tsync_rx_ctl | tsync_tx_ctl)
6408 return -EINVAL;
6409 return 0;
6410 }
6411
Nick Nunley757b77e2010-03-26 11:36:47 +00006412 /*
6413 * Per-packet timestamping only works if all packets are
6414 * timestamped, so enable timestamping in all packets as
6415 * long as one rx filter was configured.
6416 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006417 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006418 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6419 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6420 }
6421
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006422 /* enable/disable TX */
6423 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006424 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6425 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006426 wr32(E1000_TSYNCTXCTL, regval);
6427
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006428 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006429 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006430 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6431 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006432 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006433
6434 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006435 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6436
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006437 /* define ethertype filter for timestamped packets */
6438 if (is_l2)
6439 wr32(E1000_ETQF(3),
6440 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6441 E1000_ETQF_1588 | /* enable timestamping */
6442 ETH_P_1588)); /* 1588 eth protocol type */
6443 else
6444 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006445
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006446#define PTP_PORT 319
6447 /* L4 Queue Filter[3]: filter by destination port and protocol */
6448 if (is_l4) {
6449 u32 ftqf = (IPPROTO_UDP /* UDP */
6450 | E1000_FTQF_VF_BP /* VF not compared */
6451 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6452 | E1000_FTQF_MASK); /* mask all inputs */
6453 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006454
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006455 wr32(E1000_IMIR(3), htons(PTP_PORT));
6456 wr32(E1000_IMIREXT(3),
6457 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6458 if (hw->mac.type == e1000_82576) {
6459 /* enable source port check */
6460 wr32(E1000_SPQF(3), htons(PTP_PORT));
6461 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6462 }
6463 wr32(E1000_FTQF(3), ftqf);
6464 } else {
6465 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6466 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006467 wrfl();
6468
6469 adapter->hwtstamp_config = config;
6470
6471 /* clear TX/RX time stamp registers, just to be sure */
6472 regval = rd32(E1000_TXSTMPH);
6473 regval = rd32(E1000_RXSTMPH);
6474
6475 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6476 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006477}
6478
6479/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006480 * igb_ioctl -
6481 * @netdev:
6482 * @ifreq:
6483 * @cmd:
6484 **/
6485static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6486{
6487 switch (cmd) {
6488 case SIOCGMIIPHY:
6489 case SIOCGMIIREG:
6490 case SIOCSMIIREG:
6491 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006492 case SIOCSHWTSTAMP:
6493 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006494 default:
6495 return -EOPNOTSUPP;
6496 }
6497}
6498
Alexander Duyck009bc062009-07-23 18:08:35 +00006499s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6500{
6501 struct igb_adapter *adapter = hw->back;
6502 u16 cap_offset;
6503
Jon Masonbdaae042011-06-27 07:44:01 +00006504 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006505 if (!cap_offset)
6506 return -E1000_ERR_CONFIG;
6507
6508 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6509
6510 return 0;
6511}
6512
6513s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6514{
6515 struct igb_adapter *adapter = hw->back;
6516 u16 cap_offset;
6517
Jon Masonbdaae042011-06-27 07:44:01 +00006518 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006519 if (!cap_offset)
6520 return -E1000_ERR_CONFIG;
6521
6522 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6523
6524 return 0;
6525}
6526
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006527static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006528{
6529 struct igb_adapter *adapter = netdev_priv(netdev);
6530 struct e1000_hw *hw = &adapter->hw;
6531 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006532 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006533
Alexander Duyck5faf0302011-08-26 07:46:08 +00006534 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006535 /* enable VLAN tag insert/strip */
6536 ctrl = rd32(E1000_CTRL);
6537 ctrl |= E1000_CTRL_VME;
6538 wr32(E1000_CTRL, ctrl);
6539
Alexander Duyck51466232009-10-27 23:47:35 +00006540 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006541 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006542 rctl &= ~E1000_RCTL_CFIEN;
6543 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006544 } else {
6545 /* disable VLAN tag insert/strip */
6546 ctrl = rd32(E1000_CTRL);
6547 ctrl &= ~E1000_CTRL_VME;
6548 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006549 }
6550
Alexander Duycke1739522009-02-19 20:39:44 -08006551 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006552}
6553
Jiri Pirko8e586132011-12-08 19:52:37 -05006554static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006555{
6556 struct igb_adapter *adapter = netdev_priv(netdev);
6557 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006558 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006559
Alexander Duyck51466232009-10-27 23:47:35 +00006560 /* attempt to add filter to vlvf array */
6561 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006562
Alexander Duyck51466232009-10-27 23:47:35 +00006563 /* add the filter since PF can receive vlans w/o entry in vlvf */
6564 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006565
6566 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006567
6568 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006569}
6570
Jiri Pirko8e586132011-12-08 19:52:37 -05006571static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006572{
6573 struct igb_adapter *adapter = netdev_priv(netdev);
6574 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006575 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006576 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006577
Alexander Duyck51466232009-10-27 23:47:35 +00006578 /* remove vlan from VLVF table array */
6579 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006580
Alexander Duyck51466232009-10-27 23:47:35 +00006581 /* if vid was not present in VLVF just remove it from table */
6582 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006583 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006584
6585 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006586
6587 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006588}
6589
6590static void igb_restore_vlan(struct igb_adapter *adapter)
6591{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006592 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006593
Alexander Duyck5faf0302011-08-26 07:46:08 +00006594 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6595
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006596 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6597 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006598}
6599
David Decotigny14ad2512011-04-27 18:32:43 +00006600int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006601{
Alexander Duyck090b1792009-10-27 23:51:55 +00006602 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006603 struct e1000_mac_info *mac = &adapter->hw.mac;
6604
6605 mac->autoneg = 0;
6606
David Decotigny14ad2512011-04-27 18:32:43 +00006607 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6608 * for the switch() below to work */
6609 if ((spd & 1) || (dplx & ~1))
6610 goto err_inval;
6611
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006612 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6613 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006614 spd != SPEED_1000 &&
6615 dplx != DUPLEX_FULL)
6616 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006617
David Decotigny14ad2512011-04-27 18:32:43 +00006618 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006619 case SPEED_10 + DUPLEX_HALF:
6620 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6621 break;
6622 case SPEED_10 + DUPLEX_FULL:
6623 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6624 break;
6625 case SPEED_100 + DUPLEX_HALF:
6626 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6627 break;
6628 case SPEED_100 + DUPLEX_FULL:
6629 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6630 break;
6631 case SPEED_1000 + DUPLEX_FULL:
6632 mac->autoneg = 1;
6633 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6634 break;
6635 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6636 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006637 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 }
6639 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006640
6641err_inval:
6642 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6643 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006644}
6645
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006646static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6647 bool runtime)
Auke Kok9d5c8242008-01-24 02:22:38 -08006648{
6649 struct net_device *netdev = pci_get_drvdata(pdev);
6650 struct igb_adapter *adapter = netdev_priv(netdev);
6651 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006652 u32 ctrl, rctl, status;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006653 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
Auke Kok9d5c8242008-01-24 02:22:38 -08006654#ifdef CONFIG_PM
6655 int retval = 0;
6656#endif
6657
6658 netif_device_detach(netdev);
6659
Alexander Duycka88f10e2008-07-08 15:13:38 -07006660 if (netif_running(netdev))
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006661 __igb_close(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006662
Alexander Duyck047e0032009-10-27 15:49:27 +00006663 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006664
6665#ifdef CONFIG_PM
6666 retval = pci_save_state(pdev);
6667 if (retval)
6668 return retval;
6669#endif
6670
6671 status = rd32(E1000_STATUS);
6672 if (status & E1000_STATUS_LU)
6673 wufc &= ~E1000_WUFC_LNKC;
6674
6675 if (wufc) {
6676 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006677 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006678
6679 /* turn on all-multi mode if wake on multicast is enabled */
6680 if (wufc & E1000_WUFC_MC) {
6681 rctl = rd32(E1000_RCTL);
6682 rctl |= E1000_RCTL_MPE;
6683 wr32(E1000_RCTL, rctl);
6684 }
6685
6686 ctrl = rd32(E1000_CTRL);
6687 /* advertise wake from D3Cold */
6688 #define E1000_CTRL_ADVD3WUC 0x00100000
6689 /* phy power management enable */
6690 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6691 ctrl |= E1000_CTRL_ADVD3WUC;
6692 wr32(E1000_CTRL, ctrl);
6693
Auke Kok9d5c8242008-01-24 02:22:38 -08006694 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006695 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006696
6697 wr32(E1000_WUC, E1000_WUC_PME_EN);
6698 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006699 } else {
6700 wr32(E1000_WUC, 0);
6701 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006702 }
6703
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006704 *enable_wake = wufc || adapter->en_mng_pt;
6705 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006706 igb_power_down_link(adapter);
6707 else
6708 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006709
6710 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6711 * would have already happened in close and is redundant. */
6712 igb_release_hw_control(adapter);
6713
6714 pci_disable_device(pdev);
6715
Auke Kok9d5c8242008-01-24 02:22:38 -08006716 return 0;
6717}
6718
6719#ifdef CONFIG_PM
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006720#ifdef CONFIG_PM_SLEEP
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006721static int igb_suspend(struct device *dev)
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006722{
6723 int retval;
6724 bool wake;
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006725 struct pci_dev *pdev = to_pci_dev(dev);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006726
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006727 retval = __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006728 if (retval)
6729 return retval;
6730
6731 if (wake) {
6732 pci_prepare_to_sleep(pdev);
6733 } else {
6734 pci_wake_from_d3(pdev, false);
6735 pci_set_power_state(pdev, PCI_D3hot);
6736 }
6737
6738 return 0;
6739}
Emil Tantilovd9dd9662012-01-28 08:10:35 +00006740#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006741
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006742static int igb_resume(struct device *dev)
Auke Kok9d5c8242008-01-24 02:22:38 -08006743{
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006744 struct pci_dev *pdev = to_pci_dev(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006745 struct net_device *netdev = pci_get_drvdata(pdev);
6746 struct igb_adapter *adapter = netdev_priv(netdev);
6747 struct e1000_hw *hw = &adapter->hw;
6748 u32 err;
6749
6750 pci_set_power_state(pdev, PCI_D0);
6751 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006752 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006753
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006754 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006755 if (err) {
6756 dev_err(&pdev->dev,
6757 "igb: Cannot enable PCI device from suspend\n");
6758 return err;
6759 }
6760 pci_set_master(pdev);
6761
6762 pci_enable_wake(pdev, PCI_D3hot, 0);
6763 pci_enable_wake(pdev, PCI_D3cold, 0);
6764
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006765 if (!rtnl_is_locked()) {
6766 /*
6767 * shut up ASSERT_RTNL() warning in
6768 * netif_set_real_num_tx/rx_queues.
6769 */
6770 rtnl_lock();
6771 err = igb_init_interrupt_scheme(adapter);
6772 rtnl_unlock();
6773 } else {
6774 err = igb_init_interrupt_scheme(adapter);
6775 }
6776 if (err) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006777 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6778 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006779 }
6780
Auke Kok9d5c8242008-01-24 02:22:38 -08006781 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006782
6783 /* let the f/w know that the h/w is now under the control of the
6784 * driver. */
6785 igb_get_hw_control(adapter);
6786
Auke Kok9d5c8242008-01-24 02:22:38 -08006787 wr32(E1000_WUS, ~0);
6788
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006789 if (netdev->flags & IFF_UP) {
6790 err = __igb_open(netdev, true);
Alexander Duycka88f10e2008-07-08 15:13:38 -07006791 if (err)
6792 return err;
6793 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006794
6795 netif_device_attach(netdev);
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006796 return 0;
6797}
6798
6799#ifdef CONFIG_PM_RUNTIME
6800static int igb_runtime_idle(struct device *dev)
6801{
6802 struct pci_dev *pdev = to_pci_dev(dev);
6803 struct net_device *netdev = pci_get_drvdata(pdev);
6804 struct igb_adapter *adapter = netdev_priv(netdev);
6805
6806 if (!igb_has_link(adapter))
6807 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6808
6809 return -EBUSY;
6810}
6811
6812static int igb_runtime_suspend(struct device *dev)
6813{
6814 struct pci_dev *pdev = to_pci_dev(dev);
6815 int retval;
6816 bool wake;
6817
6818 retval = __igb_shutdown(pdev, &wake, 1);
6819 if (retval)
6820 return retval;
6821
6822 if (wake) {
6823 pci_prepare_to_sleep(pdev);
6824 } else {
6825 pci_wake_from_d3(pdev, false);
6826 pci_set_power_state(pdev, PCI_D3hot);
6827 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006828
Auke Kok9d5c8242008-01-24 02:22:38 -08006829 return 0;
6830}
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006831
6832static int igb_runtime_resume(struct device *dev)
6833{
6834 return igb_resume(dev);
6835}
6836#endif /* CONFIG_PM_RUNTIME */
Auke Kok9d5c8242008-01-24 02:22:38 -08006837#endif
6838
6839static void igb_shutdown(struct pci_dev *pdev)
6840{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006841 bool wake;
6842
Yan, Zheng749ab2c2012-01-04 20:23:37 +00006843 __igb_shutdown(pdev, &wake, 0);
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006844
6845 if (system_state == SYSTEM_POWER_OFF) {
6846 pci_wake_from_d3(pdev, wake);
6847 pci_set_power_state(pdev, PCI_D3hot);
6848 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006849}
6850
6851#ifdef CONFIG_NET_POLL_CONTROLLER
6852/*
6853 * Polling 'interrupt' - used by things like netconsole to send skbs
6854 * without having to re-enable interrupts. It's not called while
6855 * the interrupt routine is executing.
6856 */
6857static void igb_netpoll(struct net_device *netdev)
6858{
6859 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006860 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006861 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006862 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006863
Alexander Duyck047e0032009-10-27 15:49:27 +00006864 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006865 q_vector = adapter->q_vector[i];
6866 if (adapter->msix_entries)
6867 wr32(E1000_EIMC, q_vector->eims_value);
6868 else
6869 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006870 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006871 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006872}
6873#endif /* CONFIG_NET_POLL_CONTROLLER */
6874
6875/**
6876 * igb_io_error_detected - called when PCI error is detected
6877 * @pdev: Pointer to PCI device
6878 * @state: The current pci connection state
6879 *
6880 * This function is called after a PCI bus error affecting
6881 * this device has been detected.
6882 */
6883static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6884 pci_channel_state_t state)
6885{
6886 struct net_device *netdev = pci_get_drvdata(pdev);
6887 struct igb_adapter *adapter = netdev_priv(netdev);
6888
6889 netif_device_detach(netdev);
6890
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006891 if (state == pci_channel_io_perm_failure)
6892 return PCI_ERS_RESULT_DISCONNECT;
6893
Auke Kok9d5c8242008-01-24 02:22:38 -08006894 if (netif_running(netdev))
6895 igb_down(adapter);
6896 pci_disable_device(pdev);
6897
6898 /* Request a slot slot reset. */
6899 return PCI_ERS_RESULT_NEED_RESET;
6900}
6901
6902/**
6903 * igb_io_slot_reset - called after the pci bus has been reset.
6904 * @pdev: Pointer to PCI device
6905 *
6906 * Restart the card from scratch, as if from a cold-boot. Implementation
6907 * resembles the first-half of the igb_resume routine.
6908 */
6909static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6910{
6911 struct net_device *netdev = pci_get_drvdata(pdev);
6912 struct igb_adapter *adapter = netdev_priv(netdev);
6913 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006914 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006915 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006916
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006917 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006918 dev_err(&pdev->dev,
6919 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006920 result = PCI_ERS_RESULT_DISCONNECT;
6921 } else {
6922 pci_set_master(pdev);
6923 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006924 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006925
6926 pci_enable_wake(pdev, PCI_D3hot, 0);
6927 pci_enable_wake(pdev, PCI_D3cold, 0);
6928
6929 igb_reset(adapter);
6930 wr32(E1000_WUS, ~0);
6931 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006932 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006933
Jeff Kirsherea943d42008-12-11 20:34:19 -08006934 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6935 if (err) {
6936 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6937 "failed 0x%0x\n", err);
6938 /* non-fatal, continue */
6939 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006940
Alexander Duyck40a914f2008-11-27 00:24:37 -08006941 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006942}
6943
6944/**
6945 * igb_io_resume - called when traffic can start flowing again.
6946 * @pdev: Pointer to PCI device
6947 *
6948 * This callback is called when the error recovery driver tells us that
6949 * its OK to resume normal operation. Implementation resembles the
6950 * second-half of the igb_resume routine.
6951 */
6952static void igb_io_resume(struct pci_dev *pdev)
6953{
6954 struct net_device *netdev = pci_get_drvdata(pdev);
6955 struct igb_adapter *adapter = netdev_priv(netdev);
6956
Auke Kok9d5c8242008-01-24 02:22:38 -08006957 if (netif_running(netdev)) {
6958 if (igb_up(adapter)) {
6959 dev_err(&pdev->dev, "igb_up failed after reset\n");
6960 return;
6961 }
6962 }
6963
6964 netif_device_attach(netdev);
6965
6966 /* let the f/w know that the h/w is now under the control of the
6967 * driver. */
6968 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006969}
6970
Alexander Duyck26ad9172009-10-05 06:32:49 +00006971static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6972 u8 qsel)
6973{
6974 u32 rar_low, rar_high;
6975 struct e1000_hw *hw = &adapter->hw;
6976
6977 /* HW expects these in little endian so we reverse the byte order
6978 * from network order (big endian) to little endian
6979 */
6980 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6981 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6982 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6983
6984 /* Indicate to hardware the Address is Valid. */
6985 rar_high |= E1000_RAH_AV;
6986
6987 if (hw->mac.type == e1000_82575)
6988 rar_high |= E1000_RAH_POOL_1 * qsel;
6989 else
6990 rar_high |= E1000_RAH_POOL_1 << qsel;
6991
6992 wr32(E1000_RAL(index), rar_low);
6993 wrfl();
6994 wr32(E1000_RAH(index), rar_high);
6995 wrfl();
6996}
6997
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006998static int igb_set_vf_mac(struct igb_adapter *adapter,
6999 int vf, unsigned char *mac_addr)
7000{
7001 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00007002 /* VF MAC addresses start at end of receive addresses and moves
7003 * torwards the first, as a result a collision should not be possible */
7004 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007005
Alexander Duyck37680112009-02-19 20:40:30 -08007006 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007007
Alexander Duyck26ad9172009-10-05 06:32:49 +00007008 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007009
7010 return 0;
7011}
7012
Williams, Mitch A8151d292010-02-10 01:44:24 +00007013static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7014{
7015 struct igb_adapter *adapter = netdev_priv(netdev);
7016 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7017 return -EINVAL;
7018 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7019 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7020 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
7021 " change effective.");
7022 if (test_bit(__IGB_DOWN, &adapter->state)) {
7023 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
7024 " but the PF device is not up.\n");
7025 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
7026 " attempting to use the VF device.\n");
7027 }
7028 return igb_set_vf_mac(adapter, vf, mac);
7029}
7030
Lior Levy17dc5662011-02-08 02:28:46 +00007031static int igb_link_mbps(int internal_link_speed)
7032{
7033 switch (internal_link_speed) {
7034 case SPEED_100:
7035 return 100;
7036 case SPEED_1000:
7037 return 1000;
7038 default:
7039 return 0;
7040 }
7041}
7042
7043static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7044 int link_speed)
7045{
7046 int rf_dec, rf_int;
7047 u32 bcnrc_val;
7048
7049 if (tx_rate != 0) {
7050 /* Calculate the rate factor values to set */
7051 rf_int = link_speed / tx_rate;
7052 rf_dec = (link_speed - (rf_int * tx_rate));
7053 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
7054
7055 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7056 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7057 E1000_RTTBCNRC_RF_INT_MASK);
7058 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7059 } else {
7060 bcnrc_val = 0;
7061 }
7062
7063 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7064 wr32(E1000_RTTBCNRC, bcnrc_val);
7065}
7066
7067static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7068{
7069 int actual_link_speed, i;
7070 bool reset_rate = false;
7071
7072 /* VF TX rate limit was not set or not supported */
7073 if ((adapter->vf_rate_link_speed == 0) ||
7074 (adapter->hw.mac.type != e1000_82576))
7075 return;
7076
7077 actual_link_speed = igb_link_mbps(adapter->link_speed);
7078 if (actual_link_speed != adapter->vf_rate_link_speed) {
7079 reset_rate = true;
7080 adapter->vf_rate_link_speed = 0;
7081 dev_info(&adapter->pdev->dev,
7082 "Link speed has been changed. VF Transmit "
7083 "rate is disabled\n");
7084 }
7085
7086 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7087 if (reset_rate)
7088 adapter->vf_data[i].tx_rate = 0;
7089
7090 igb_set_vf_rate_limit(&adapter->hw, i,
7091 adapter->vf_data[i].tx_rate,
7092 actual_link_speed);
7093 }
7094}
7095
Williams, Mitch A8151d292010-02-10 01:44:24 +00007096static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7097{
Lior Levy17dc5662011-02-08 02:28:46 +00007098 struct igb_adapter *adapter = netdev_priv(netdev);
7099 struct e1000_hw *hw = &adapter->hw;
7100 int actual_link_speed;
7101
7102 if (hw->mac.type != e1000_82576)
7103 return -EOPNOTSUPP;
7104
7105 actual_link_speed = igb_link_mbps(adapter->link_speed);
7106 if ((vf >= adapter->vfs_allocated_count) ||
7107 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7108 (tx_rate < 0) || (tx_rate > actual_link_speed))
7109 return -EINVAL;
7110
7111 adapter->vf_rate_link_speed = actual_link_speed;
7112 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7113 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7114
7115 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007116}
7117
7118static int igb_ndo_get_vf_config(struct net_device *netdev,
7119 int vf, struct ifla_vf_info *ivi)
7120{
7121 struct igb_adapter *adapter = netdev_priv(netdev);
7122 if (vf >= adapter->vfs_allocated_count)
7123 return -EINVAL;
7124 ivi->vf = vf;
7125 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007126 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007127 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7128 ivi->qos = adapter->vf_data[vf].pf_qos;
7129 return 0;
7130}
7131
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007132static void igb_vmm_control(struct igb_adapter *adapter)
7133{
7134 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007135 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007136
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007137 switch (hw->mac.type) {
7138 case e1000_82575:
7139 default:
7140 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007141 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007142 case e1000_82576:
7143 /* notify HW that the MAC is adding vlan tags */
7144 reg = rd32(E1000_DTXCTL);
7145 reg |= E1000_DTXCTL_VLAN_ADDED;
7146 wr32(E1000_DTXCTL, reg);
7147 case e1000_82580:
7148 /* enable replication vlan tag stripping */
7149 reg = rd32(E1000_RPLOLR);
7150 reg |= E1000_RPLOLR_STRVLAN;
7151 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007152 case e1000_i350:
7153 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007154 break;
7155 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007156
Alexander Duyckd4960302009-10-27 15:53:45 +00007157 if (adapter->vfs_allocated_count) {
7158 igb_vmdq_set_loopback_pf(hw, true);
7159 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007160 igb_vmdq_set_anti_spoofing_pf(hw, true,
7161 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007162 } else {
7163 igb_vmdq_set_loopback_pf(hw, false);
7164 igb_vmdq_set_replication_pf(hw, false);
7165 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007166}
7167
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007168static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7169{
7170 struct e1000_hw *hw = &adapter->hw;
7171 u32 dmac_thr;
7172 u16 hwm;
7173
7174 if (hw->mac.type > e1000_82580) {
7175 if (adapter->flags & IGB_FLAG_DMAC) {
7176 u32 reg;
7177
7178 /* force threshold to 0. */
7179 wr32(E1000_DMCTXTH, 0);
7180
7181 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007182 * DMA Coalescing high water mark needs to be greater
7183 * than the Rx threshold. Set hwm to PBA - max frame
7184 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007185 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007186 hwm = 64 * pba - adapter->max_frame_size / 16;
7187 if (hwm < 64 * (pba - 6))
7188 hwm = 64 * (pba - 6);
7189 reg = rd32(E1000_FCRTC);
7190 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7191 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7192 & E1000_FCRTC_RTH_COAL_MASK);
7193 wr32(E1000_FCRTC, reg);
7194
7195 /*
7196 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7197 * frame size, capping it at PBA - 10KB.
7198 */
7199 dmac_thr = pba - adapter->max_frame_size / 512;
7200 if (dmac_thr < pba - 10)
7201 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007202 reg = rd32(E1000_DMACR);
7203 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007204 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7205 & E1000_DMACR_DMACTHR_MASK);
7206
7207 /* transition to L0x or L1 if available..*/
7208 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7209
7210 /* watchdog timer= +-1000 usec in 32usec intervals */
7211 reg |= (1000 >> 5);
7212 wr32(E1000_DMACR, reg);
7213
7214 /*
7215 * no lower threshold to disable
7216 * coalescing(smart fifb)-UTRESH=0
7217 */
7218 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007219
7220 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7221
7222 wr32(E1000_DMCTLX, reg);
7223
7224 /*
7225 * free space in tx packet buffer to wake from
7226 * DMA coal
7227 */
7228 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7229 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7230
7231 /*
7232 * make low power state decision controlled
7233 * by DMA coal
7234 */
7235 reg = rd32(E1000_PCIEMISC);
7236 reg &= ~E1000_PCIEMISC_LX_DECISION;
7237 wr32(E1000_PCIEMISC, reg);
7238 } /* endif adapter->dmac is not disabled */
7239 } else if (hw->mac.type == e1000_82580) {
7240 u32 reg = rd32(E1000_PCIEMISC);
7241 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7242 wr32(E1000_DMACR, 0);
7243 }
7244}
7245
Auke Kok9d5c8242008-01-24 02:22:38 -08007246/* igb_main.c */