blob: 89d576ce57763fac0cc69e1dacefefabf5d8508f [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
Jeff Kirsher876d2d62011-10-21 20:01:34 +000028#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
Auke Kok9d5c8242008-01-24 02:22:38 -080030#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000033#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <net/checksum.h>
40#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000041#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080042#include <linux/mii.h>
43#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000044#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080045#include <linux/if_vlan.h>
46#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070047#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080048#include <linux/delay.h>
49#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000050#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080053#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080054#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070056#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070057#include <linux/dca.h>
58#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080059#include "igb.h"
60
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080061#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000062#define MIN 2
63#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080064#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000065__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080066char igb_driver_name[] = "igb";
67char igb_driver_version[] = DRV_VERSION;
68static const char igb_driver_string[] =
69 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000070static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080071
Auke Kok9d5c8242008-01-24 02:22:38 -080072static const struct e1000_info *igb_info_tbl[] = {
73 [board_82575] = &e1000_82575_info,
74};
75
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000076static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000098 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
102 /* required last entry */
103 {0, }
104};
105
106MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
107
108void igb_reset(struct igb_adapter *);
109static int igb_setup_all_tx_resources(struct igb_adapter *);
110static int igb_setup_all_rx_resources(struct igb_adapter *);
111static void igb_free_all_tx_resources(struct igb_adapter *);
112static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000113static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static int igb_probe(struct pci_dev *, const struct pci_device_id *);
115static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000116static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800117static int igb_sw_init(struct igb_adapter *);
118static int igb_open(struct net_device *);
119static int igb_close(struct net_device *);
120static void igb_configure_tx(struct igb_adapter *);
121static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static void igb_clean_all_tx_rings(struct igb_adapter *);
123static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700124static void igb_clean_tx_ring(struct igb_ring *);
125static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000126static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800127static void igb_update_phy_info(unsigned long);
128static void igb_watchdog(unsigned long);
129static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000130static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000131static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
132 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800133static int igb_change_mtu(struct net_device *, int);
134static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000135static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800136static irqreturn_t igb_intr(int irq, void *);
137static irqreturn_t igb_intr_msi(int irq, void *);
138static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700140#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000141static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700142static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700143#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700144static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000145static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000146static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800147static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
148static void igb_tx_timeout(struct net_device *);
149static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000150static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Jiri Pirko8e586132011-12-08 19:52:37 -0500151static int igb_vlan_rx_add_vid(struct net_device *, u16);
152static int igb_vlan_rx_kill_vid(struct net_device *, u16);
Auke Kok9d5c8242008-01-24 02:22:38 -0800153static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000154static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_ping_all_vfs(struct igb_adapter *);
156static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000158static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800159static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000160static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
161static int igb_ndo_set_vf_vlan(struct net_device *netdev,
162 int vf, u16 vlan, u8 qos);
163static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
164static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
165 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000166static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000167
168#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000169static int igb_vf_configure(struct igb_adapter *adapter, int vf);
170static int igb_find_enabled_vfs(struct igb_adapter *adapter);
171static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000172#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800173
Auke Kok9d5c8242008-01-24 02:22:38 -0800174#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000175static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800176static int igb_resume(struct pci_dev *);
177#endif
178static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700179#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700180static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
181static struct notifier_block dca_notifier = {
182 .notifier_call = igb_notify_dca,
183 .next = NULL,
184 .priority = 0
185};
186#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800187#ifdef CONFIG_NET_POLL_CONTROLLER
188/* for netdump / net console */
189static void igb_netpoll(struct net_device *);
190#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800191#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000192static unsigned int max_vfs = 0;
193module_param(max_vfs, uint, 0);
194MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
195 "per physical function");
196#endif /* CONFIG_PCI_IOV */
197
Auke Kok9d5c8242008-01-24 02:22:38 -0800198static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
199 pci_channel_state_t);
200static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
201static void igb_io_resume(struct pci_dev *);
202
203static struct pci_error_handlers igb_err_handler = {
204 .error_detected = igb_io_error_detected,
205 .slot_reset = igb_io_slot_reset,
206 .resume = igb_io_resume,
207};
208
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000209static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800210
211static struct pci_driver igb_driver = {
212 .name = igb_driver_name,
213 .id_table = igb_pci_tbl,
214 .probe = igb_probe,
215 .remove = __devexit_p(igb_remove),
216#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300217 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800218 .suspend = igb_suspend,
219 .resume = igb_resume,
220#endif
221 .shutdown = igb_shutdown,
222 .err_handler = &igb_err_handler
223};
224
225MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
226MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
227MODULE_LICENSE("GPL");
228MODULE_VERSION(DRV_VERSION);
229
Taku Izumic97ec422010-04-27 14:39:30 +0000230struct igb_reg_info {
231 u32 ofs;
232 char *name;
233};
234
235static const struct igb_reg_info igb_reg_info_tbl[] = {
236
237 /* General Registers */
238 {E1000_CTRL, "CTRL"},
239 {E1000_STATUS, "STATUS"},
240 {E1000_CTRL_EXT, "CTRL_EXT"},
241
242 /* Interrupt Registers */
243 {E1000_ICR, "ICR"},
244
245 /* RX Registers */
246 {E1000_RCTL, "RCTL"},
247 {E1000_RDLEN(0), "RDLEN"},
248 {E1000_RDH(0), "RDH"},
249 {E1000_RDT(0), "RDT"},
250 {E1000_RXDCTL(0), "RXDCTL"},
251 {E1000_RDBAL(0), "RDBAL"},
252 {E1000_RDBAH(0), "RDBAH"},
253
254 /* TX Registers */
255 {E1000_TCTL, "TCTL"},
256 {E1000_TDBAL(0), "TDBAL"},
257 {E1000_TDBAH(0), "TDBAH"},
258 {E1000_TDLEN(0), "TDLEN"},
259 {E1000_TDH(0), "TDH"},
260 {E1000_TDT(0), "TDT"},
261 {E1000_TXDCTL(0), "TXDCTL"},
262 {E1000_TDFH, "TDFH"},
263 {E1000_TDFT, "TDFT"},
264 {E1000_TDFHS, "TDFHS"},
265 {E1000_TDFPC, "TDFPC"},
266
267 /* List Terminator */
268 {}
269};
270
271/*
272 * igb_regdump - register printout routine
273 */
274static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
275{
276 int n = 0;
277 char rname[16];
278 u32 regs[8];
279
280 switch (reginfo->ofs) {
281 case E1000_RDLEN(0):
282 for (n = 0; n < 4; n++)
283 regs[n] = rd32(E1000_RDLEN(n));
284 break;
285 case E1000_RDH(0):
286 for (n = 0; n < 4; n++)
287 regs[n] = rd32(E1000_RDH(n));
288 break;
289 case E1000_RDT(0):
290 for (n = 0; n < 4; n++)
291 regs[n] = rd32(E1000_RDT(n));
292 break;
293 case E1000_RXDCTL(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RXDCTL(n));
296 break;
297 case E1000_RDBAL(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_RDBAL(n));
300 break;
301 case E1000_RDBAH(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDBAH(n));
304 break;
305 case E1000_TDBAL(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RDBAL(n));
308 break;
309 case E1000_TDBAH(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_TDBAH(n));
312 break;
313 case E1000_TDLEN(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_TDLEN(n));
316 break;
317 case E1000_TDH(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_TDH(n));
320 break;
321 case E1000_TDT(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_TDT(n));
324 break;
325 case E1000_TXDCTL(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_TXDCTL(n));
328 break;
329 default:
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000330 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
Taku Izumic97ec422010-04-27 14:39:30 +0000331 return;
332 }
333
334 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000335 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
336 regs[2], regs[3]);
Taku Izumic97ec422010-04-27 14:39:30 +0000337}
338
339/*
340 * igb_dump - Print registers, tx-rings and rx-rings
341 */
342static void igb_dump(struct igb_adapter *adapter)
343{
344 struct net_device *netdev = adapter->netdev;
345 struct e1000_hw *hw = &adapter->hw;
346 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000347 struct igb_ring *tx_ring;
348 union e1000_adv_tx_desc *tx_desc;
349 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000350 struct igb_ring *rx_ring;
351 union e1000_adv_rx_desc *rx_desc;
352 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000353 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000354
355 if (!netif_msg_hw(adapter))
356 return;
357
358 /* Print netdevice Info */
359 if (netdev) {
360 dev_info(&adapter->pdev->dev, "Net device Info\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000361 pr_info("Device Name state trans_start "
362 "last_rx\n");
363 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
364 netdev->state, netdev->trans_start, netdev->last_rx);
Taku Izumic97ec422010-04-27 14:39:30 +0000365 }
366
367 /* Print Registers */
368 dev_info(&adapter->pdev->dev, "Register Dump\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000369 pr_info(" Register Name Value\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000370 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
371 reginfo->name; reginfo++) {
372 igb_regdump(hw, reginfo);
373 }
374
375 /* Print TX Ring Summary */
376 if (!netdev || !netif_running(netdev))
377 goto exit;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000380 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000381 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000382 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000383 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000384 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000385 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
386 n, tx_ring->next_to_use, tx_ring->next_to_clean,
387 (u64)buffer_info->dma,
388 buffer_info->length,
389 buffer_info->next_to_watch,
390 (u64)buffer_info->time_stamp);
Taku Izumic97ec422010-04-27 14:39:30 +0000391 }
392
393 /* Print TX Rings */
394 if (!netif_msg_tx_done(adapter))
395 goto rx_ring_summary;
396
397 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
398
399 /* Transmit Descriptor Formats
400 *
401 * Advanced Transmit Descriptor
402 * +--------------------------------------------------------------+
403 * 0 | Buffer Address [63:0] |
404 * +--------------------------------------------------------------+
405 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
406 * +--------------------------------------------------------------+
407 * 63 46 45 40 39 38 36 35 32 31 24 15 0
408 */
409
410 for (n = 0; n < adapter->num_tx_queues; n++) {
411 tx_ring = adapter->tx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000412 pr_info("------------------------------------\n");
413 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
414 pr_info("------------------------------------\n");
415 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
416 "[bi->dma ] leng ntw timestamp "
417 "bi->skb\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000418
419 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000420 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000421 struct igb_tx_buffer *buffer_info;
Alexander Duyck60136902011-08-26 07:44:05 +0000422 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000423 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000424 u0 = (struct my_u0 *)tx_desc;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000425 if (i == tx_ring->next_to_use &&
426 i == tx_ring->next_to_clean)
427 next_desc = " NTC/U";
428 else if (i == tx_ring->next_to_use)
429 next_desc = " NTU";
430 else if (i == tx_ring->next_to_clean)
431 next_desc = " NTC";
432 else
433 next_desc = "";
434
435 pr_info("T [0x%03X] %016llX %016llX %016llX"
436 " %04X %p %016llX %p%s\n", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000437 le64_to_cpu(u0->a),
438 le64_to_cpu(u0->b),
439 (u64)buffer_info->dma,
440 buffer_info->length,
441 buffer_info->next_to_watch,
442 (u64)buffer_info->time_stamp,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000443 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000444
445 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
446 print_hex_dump(KERN_INFO, "",
447 DUMP_PREFIX_ADDRESS,
448 16, 1, phys_to_virt(buffer_info->dma),
449 buffer_info->length, true);
450 }
451 }
452
453 /* Print RX Rings Summary */
454rx_ring_summary:
455 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000456 pr_info("Queue [NTU] [NTC]\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000457 for (n = 0; n < adapter->num_rx_queues; n++) {
458 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000459 pr_info(" %5d %5X %5X\n",
460 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumic97ec422010-04-27 14:39:30 +0000461 }
462
463 /* Print RX Rings */
464 if (!netif_msg_rx_status(adapter))
465 goto exit;
466
467 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
468
469 /* Advanced Receive Descriptor (Read) Format
470 * 63 1 0
471 * +-----------------------------------------------------+
472 * 0 | Packet Buffer Address [63:1] |A0/NSE|
473 * +----------------------------------------------+------+
474 * 8 | Header Buffer Address [63:1] | DD |
475 * +-----------------------------------------------------+
476 *
477 *
478 * Advanced Receive Descriptor (Write-Back) Format
479 *
480 * 63 48 47 32 31 30 21 20 17 16 4 3 0
481 * +------------------------------------------------------+
482 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
483 * | Checksum Ident | | | | Type | Type |
484 * +------------------------------------------------------+
485 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
486 * +------------------------------------------------------+
487 * 63 48 47 32 31 20 19 0
488 */
489
490 for (n = 0; n < adapter->num_rx_queues; n++) {
491 rx_ring = adapter->rx_ring[n];
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000492 pr_info("------------------------------------\n");
493 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
494 pr_info("------------------------------------\n");
495 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
496 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
497 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
498 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
Taku Izumic97ec422010-04-27 14:39:30 +0000499
500 for (i = 0; i < rx_ring->count; i++) {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000501 const char *next_desc;
Alexander Duyck06034642011-08-26 07:44:22 +0000502 struct igb_rx_buffer *buffer_info;
503 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck60136902011-08-26 07:44:05 +0000504 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000505 u0 = (struct my_u0 *)rx_desc;
506 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000507
508 if (i == rx_ring->next_to_use)
509 next_desc = " NTU";
510 else if (i == rx_ring->next_to_clean)
511 next_desc = " NTC";
512 else
513 next_desc = "";
514
Taku Izumic97ec422010-04-27 14:39:30 +0000515 if (staterr & E1000_RXD_STAT_DD) {
516 /* Descriptor Done */
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000517 pr_info("%s[0x%03X] %016llX %016llX -------"
518 "--------- %p%s\n", "RWB", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000519 le64_to_cpu(u0->a),
520 le64_to_cpu(u0->b),
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000521 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000522 } else {
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000523 pr_info("%s[0x%03X] %016llX %016llX %016llX"
524 " %p%s\n", "R ", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000525 le64_to_cpu(u0->a),
526 le64_to_cpu(u0->b),
527 (u64)buffer_info->dma,
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000528 buffer_info->skb, next_desc);
Taku Izumic97ec422010-04-27 14:39:30 +0000529
530 if (netif_msg_pktdata(adapter)) {
531 print_hex_dump(KERN_INFO, "",
532 DUMP_PREFIX_ADDRESS,
533 16, 1,
534 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000535 IGB_RX_HDR_LEN, true);
536 print_hex_dump(KERN_INFO, "",
537 DUMP_PREFIX_ADDRESS,
538 16, 1,
539 phys_to_virt(
540 buffer_info->page_dma +
541 buffer_info->page_offset),
542 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000543 }
544 }
Taku Izumic97ec422010-04-27 14:39:30 +0000545 }
546 }
547
548exit:
549 return;
550}
551
552
Patrick Ohly38c845c2009-02-12 05:03:41 +0000553/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000554 * igb_read_clock - read raw cycle counter (to be used by time counter)
555 */
556static cycle_t igb_read_clock(const struct cyclecounter *tc)
557{
558 struct igb_adapter *adapter =
559 container_of(tc, struct igb_adapter, cycles);
560 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000561 u64 stamp = 0;
562 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000563
Alexander Duyck55cac242009-11-19 12:42:21 +0000564 /*
565 * The timestamp latches on lowest register read. For the 82580
566 * the lowest register is SYSTIMR instead of SYSTIML. However we never
567 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
568 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000569 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000570 stamp = rd32(E1000_SYSTIMR) >> 8;
571 shift = IGB_82580_TSYNC_SHIFT;
572 }
573
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000574 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
575 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000576 return stamp;
577}
578
Auke Kok9d5c8242008-01-24 02:22:38 -0800579/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000580 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800581 * used by hardware layer to print debugging information
582 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000583struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800584{
585 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000586 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800587}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000588
589/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800590 * igb_init_module - Driver Registration Routine
591 *
592 * igb_init_module is the first routine called when the driver is
593 * loaded. All it does is register with the PCI subsystem.
594 **/
595static int __init igb_init_module(void)
596{
597 int ret;
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000598 pr_info("%s - version %s\n",
Auke Kok9d5c8242008-01-24 02:22:38 -0800599 igb_driver_string, igb_driver_version);
600
Jeff Kirsher876d2d62011-10-21 20:01:34 +0000601 pr_info("%s\n", igb_copyright);
Auke Kok9d5c8242008-01-24 02:22:38 -0800602
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700603#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700604 dca_register_notify(&dca_notifier);
605#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800606 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800607 return ret;
608}
609
610module_init(igb_init_module);
611
612/**
613 * igb_exit_module - Driver Exit Cleanup Routine
614 *
615 * igb_exit_module is called just before the driver is removed
616 * from memory.
617 **/
618static void __exit igb_exit_module(void)
619{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700620#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700621 dca_unregister_notify(&dca_notifier);
622#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800623 pci_unregister_driver(&igb_driver);
624}
625
626module_exit(igb_exit_module);
627
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800628#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
629/**
630 * igb_cache_ring_register - Descriptor ring to register mapping
631 * @adapter: board private structure to initialize
632 *
633 * Once we know the feature-set enabled for the device, we'll cache
634 * the register offset the descriptor ring is assigned to.
635 **/
636static void igb_cache_ring_register(struct igb_adapter *adapter)
637{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000638 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000639 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800640
641 switch (adapter->hw.mac.type) {
642 case e1000_82576:
643 /* The queues are allocated for virtualization such that VF 0
644 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
645 * In order to avoid collision we start at the first free queue
646 * and continue consuming queues in the same sequence
647 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000648 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000649 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000650 adapter->rx_ring[i]->reg_idx = rbase_offset +
651 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000654 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000655 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000657 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000658 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000659 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000660 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800661 break;
662 }
663}
664
Alexander Duyck047e0032009-10-27 15:49:27 +0000665static void igb_free_queues(struct igb_adapter *adapter)
666{
Alexander Duyck3025a442010-02-17 01:02:39 +0000667 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000668
Alexander Duyck3025a442010-02-17 01:02:39 +0000669 for (i = 0; i < adapter->num_tx_queues; i++) {
670 kfree(adapter->tx_ring[i]);
671 adapter->tx_ring[i] = NULL;
672 }
673 for (i = 0; i < adapter->num_rx_queues; i++) {
674 kfree(adapter->rx_ring[i]);
675 adapter->rx_ring[i] = NULL;
676 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000677 adapter->num_rx_queues = 0;
678 adapter->num_tx_queues = 0;
679}
680
Auke Kok9d5c8242008-01-24 02:22:38 -0800681/**
682 * igb_alloc_queues - Allocate memory for all rings
683 * @adapter: board private structure to initialize
684 *
685 * We allocate one ring per queue at run-time since we don't know the
686 * number of queues at compile-time.
687 **/
688static int igb_alloc_queues(struct igb_adapter *adapter)
689{
Alexander Duyck3025a442010-02-17 01:02:39 +0000690 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800691 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000692 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800693
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700694 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000695 if (orig_node == -1) {
696 int cur_node = next_online_node(adapter->node);
697 if (cur_node == MAX_NUMNODES)
698 cur_node = first_online_node;
699 adapter->node = cur_node;
700 }
701 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
702 adapter->node);
703 if (!ring)
704 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000705 if (!ring)
706 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800707 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700708 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000709 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000710 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000711 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000712 /* For 82575, context index must be unique per ring. */
713 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000714 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000715 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700716 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000717 /* Restore the adapter's original node */
718 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000719
Auke Kok9d5c8242008-01-24 02:22:38 -0800720 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000721 if (orig_node == -1) {
722 int cur_node = next_online_node(adapter->node);
723 if (cur_node == MAX_NUMNODES)
724 cur_node = first_online_node;
725 adapter->node = cur_node;
726 }
727 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
728 adapter->node);
729 if (!ring)
730 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000731 if (!ring)
732 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800733 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700734 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000735 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000736 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000737 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000738 /* set flag indicating ring supports SCTP checksum offload */
739 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000740 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000741
742 /* On i350, loopback VLAN packets have the tag byte-swapped. */
743 if (adapter->hw.mac.type == e1000_i350)
744 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
745
Alexander Duyck3025a442010-02-17 01:02:39 +0000746 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000748 /* Restore the adapter's original node */
749 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800750
751 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000752
Auke Kok9d5c8242008-01-24 02:22:38 -0800753 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800754
Alexander Duyck047e0032009-10-27 15:49:27 +0000755err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000756 /* Restore the adapter's original node */
757 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000758 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700759
Alexander Duyck047e0032009-10-27 15:49:27 +0000760 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700761}
762
Alexander Duyck4be000c2011-08-26 07:45:52 +0000763/**
764 * igb_write_ivar - configure ivar for given MSI-X vector
765 * @hw: pointer to the HW structure
766 * @msix_vector: vector number we are allocating to a given ring
767 * @index: row index of IVAR register to write within IVAR table
768 * @offset: column offset of in IVAR, should be multiple of 8
769 *
770 * This function is intended to handle the writing of the IVAR register
771 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
772 * each containing an cause allocation for an Rx and Tx ring, and a
773 * variable number of rows depending on the number of queues supported.
774 **/
775static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
776 int index, int offset)
777{
778 u32 ivar = array_rd32(E1000_IVAR0, index);
779
780 /* clear any bits that are currently set */
781 ivar &= ~((u32)0xFF << offset);
782
783 /* write vector and valid bit */
784 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
785
786 array_wr32(E1000_IVAR0, index, ivar);
787}
788
Auke Kok9d5c8242008-01-24 02:22:38 -0800789#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000790static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800791{
Alexander Duyck047e0032009-10-27 15:49:27 +0000792 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800793 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000794 int rx_queue = IGB_N0_QUEUE;
795 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000796 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000797
Alexander Duyck0ba82992011-08-26 07:45:47 +0000798 if (q_vector->rx.ring)
799 rx_queue = q_vector->rx.ring->reg_idx;
800 if (q_vector->tx.ring)
801 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700802
803 switch (hw->mac.type) {
804 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800805 /* The 82575 assigns vectors using a bitmask, which matches the
806 bitmask for the EICR/EIMS/EIMC registers. To assign one
807 or more queues to a vector, we write the appropriate bits
808 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000809 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800810 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000811 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800812 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000813 if (!adapter->msix_entries && msix_vector == 0)
814 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800815 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000816 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700817 break;
818 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000819 /*
820 * 82576 uses a table that essentially consists of 2 columns
821 * with 8 rows. The ordering is column-major so we use the
822 * lower 3 bits as the row index, and the 4th bit as the
823 * column offset.
824 */
825 if (rx_queue > IGB_N0_QUEUE)
826 igb_write_ivar(hw, msix_vector,
827 rx_queue & 0x7,
828 (rx_queue & 0x8) << 1);
829 if (tx_queue > IGB_N0_QUEUE)
830 igb_write_ivar(hw, msix_vector,
831 tx_queue & 0x7,
832 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000833 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700834 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000835 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000836 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000837 /*
838 * On 82580 and newer adapters the scheme is similar to 82576
839 * however instead of ordering column-major we have things
840 * ordered row-major. So we traverse the table by using
841 * bit 0 as the column offset, and the remaining bits as the
842 * row index.
843 */
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000852 q_vector->eims_value = 1 << msix_vector;
853 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700854 default:
855 BUG();
856 break;
857 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000858
859 /* add q_vector eims value to global eims_enable_mask */
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862 /* configure q_vector to set itr on first interrupt */
863 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800864}
865
866/**
867 * igb_configure_msix - Configure MSI-X hardware
868 *
869 * igb_configure_msix sets up the hardware to properly
870 * generate MSI-X interrupts.
871 **/
872static void igb_configure_msix(struct igb_adapter *adapter)
873{
874 u32 tmp;
875 int i, vector = 0;
876 struct e1000_hw *hw = &adapter->hw;
877
878 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800879
880 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700881 switch (hw->mac.type) {
882 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800883 tmp = rd32(E1000_CTRL_EXT);
884 /* enable MSI-X PBA support*/
885 tmp |= E1000_CTRL_EXT_PBA_CLR;
886
887 /* Auto-Mask interrupts upon ICR read. */
888 tmp |= E1000_CTRL_EXT_EIAME;
889 tmp |= E1000_CTRL_EXT_IRCA;
890
891 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000892
893 /* enable msix_other interrupt */
894 array_wr32(E1000_MSIXBM(0), vector++,
895 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700896 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800897
Alexander Duyck2d064c02008-07-08 15:10:12 -0700898 break;
899
900 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000901 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000902 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000903 /* Turn on MSI-X capability first, or our settings
904 * won't stick. And it will take days to debug. */
905 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
906 E1000_GPIE_PBA | E1000_GPIE_EIAME |
907 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700908
Alexander Duyck047e0032009-10-27 15:49:27 +0000909 /* enable msix_other interrupt */
910 adapter->eims_other = 1 << vector;
911 tmp = (vector++ | E1000_IVAR_VALID) << 8;
912
913 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700914 break;
915 default:
916 /* do nothing, since nothing else supports MSI-X */
917 break;
918 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000919
920 adapter->eims_enable_mask |= adapter->eims_other;
921
Alexander Duyck26b39272010-02-17 01:00:41 +0000922 for (i = 0; i < adapter->num_q_vectors; i++)
923 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000924
Auke Kok9d5c8242008-01-24 02:22:38 -0800925 wrfl();
926}
927
928/**
929 * igb_request_msix - Initialize MSI-X interrupts
930 *
931 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
932 * kernel.
933 **/
934static int igb_request_msix(struct igb_adapter *adapter)
935{
936 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000937 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800938 int i, err = 0, vector = 0;
939
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800941 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 if (err)
943 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000944 vector++;
945
946 for (i = 0; i < adapter->num_q_vectors; i++) {
947 struct igb_q_vector *q_vector = adapter->q_vector[i];
948
949 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
950
Alexander Duyck0ba82992011-08-26 07:45:47 +0000951 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000952 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000953 q_vector->rx.ring->queue_index);
954 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000956 q_vector->tx.ring->queue_index);
957 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000958 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000959 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000960 else
961 sprintf(q_vector->name, "%s-unused", netdev->name);
962
963 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800964 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000965 q_vector);
966 if (err)
967 goto out;
968 vector++;
969 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800970
Auke Kok9d5c8242008-01-24 02:22:38 -0800971 igb_configure_msix(adapter);
972 return 0;
973out:
974 return err;
975}
976
977static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
978{
979 if (adapter->msix_entries) {
980 pci_disable_msix(adapter->pdev);
981 kfree(adapter->msix_entries);
982 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000983 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800984 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000985 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800986}
987
Alexander Duyck047e0032009-10-27 15:49:27 +0000988/**
989 * igb_free_q_vectors - Free memory allocated for interrupt vectors
990 * @adapter: board private structure to initialize
991 *
992 * This function frees the memory allocated to the q_vectors. In addition if
993 * NAPI is enabled it will delete any references to the NAPI struct prior
994 * to freeing the q_vector.
995 **/
996static void igb_free_q_vectors(struct igb_adapter *adapter)
997{
998 int v_idx;
999
1000 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1001 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1002 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001003 if (!q_vector)
1004 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001005 netif_napi_del(&q_vector->napi);
1006 kfree(q_vector);
1007 }
1008 adapter->num_q_vectors = 0;
1009}
1010
1011/**
1012 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1013 *
1014 * This function resets the device so that it has 0 rx queues, tx queues, and
1015 * MSI-X interrupts allocated.
1016 */
1017static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1018{
1019 igb_free_queues(adapter);
1020 igb_free_q_vectors(adapter);
1021 igb_reset_interrupt_capability(adapter);
1022}
Auke Kok9d5c8242008-01-24 02:22:38 -08001023
1024/**
1025 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1026 *
1027 * Attempt to configure interrupts using the best available
1028 * capabilities of the hardware and kernel.
1029 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001030static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001031{
1032 int err;
1033 int numvecs, i;
1034
Alexander Duyck83b71802009-02-06 23:15:45 +00001035 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001036 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001037 if (adapter->vfs_allocated_count)
1038 adapter->num_tx_queues = 1;
1039 else
1040 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001041
Alexander Duyck047e0032009-10-27 15:49:27 +00001042 /* start with one vector for every rx queue */
1043 numvecs = adapter->num_rx_queues;
1044
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001045 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001046 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1047 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001048
1049 /* store the number of vectors reserved for queues */
1050 adapter->num_q_vectors = numvecs;
1051
1052 /* add 1 vector for link status interrupts */
1053 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001054 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1055 GFP_KERNEL);
1056 if (!adapter->msix_entries)
1057 goto msi_only;
1058
1059 for (i = 0; i < numvecs; i++)
1060 adapter->msix_entries[i].entry = i;
1061
1062 err = pci_enable_msix(adapter->pdev,
1063 adapter->msix_entries,
1064 numvecs);
1065 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001066 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001067
1068 igb_reset_interrupt_capability(adapter);
1069
1070 /* If we can't do MSI-X, try MSI */
1071msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001072#ifdef CONFIG_PCI_IOV
1073 /* disable SR-IOV for non MSI-X configurations */
1074 if (adapter->vf_data) {
1075 struct e1000_hw *hw = &adapter->hw;
1076 /* disable iov and allow time for transactions to clear */
1077 pci_disable_sriov(adapter->pdev);
1078 msleep(500);
1079
1080 kfree(adapter->vf_data);
1081 adapter->vf_data = NULL;
1082 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001083 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001084 msleep(100);
1085 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1086 }
1087#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001088 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001089 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001090 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001091 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001092 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001093 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001094 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001095 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001096out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001097 /* Notify the stack of the (possibly) reduced queue counts. */
1098 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1099 return netif_set_real_num_rx_queues(adapter->netdev,
1100 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001101}
1102
1103/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001104 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1105 * @adapter: board private structure to initialize
1106 *
1107 * We allocate one q_vector per queue interrupt. If allocation fails we
1108 * return -ENOMEM.
1109 **/
1110static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1111{
1112 struct igb_q_vector *q_vector;
1113 struct e1000_hw *hw = &adapter->hw;
1114 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001115 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001116
1117 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001118 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1119 adapter->num_tx_queues)) &&
1120 (adapter->num_rx_queues == v_idx))
1121 adapter->node = orig_node;
1122 if (orig_node == -1) {
1123 int cur_node = next_online_node(adapter->node);
1124 if (cur_node == MAX_NUMNODES)
1125 cur_node = first_online_node;
1126 adapter->node = cur_node;
1127 }
1128 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1129 adapter->node);
1130 if (!q_vector)
1131 q_vector = kzalloc(sizeof(struct igb_q_vector),
1132 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001133 if (!q_vector)
1134 goto err_out;
1135 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1137 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001138 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1139 adapter->q_vector[v_idx] = q_vector;
1140 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001141 /* Restore the adapter's original node */
1142 adapter->node = orig_node;
1143
Alexander Duyck047e0032009-10-27 15:49:27 +00001144 return 0;
1145
1146err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001147 /* Restore the adapter's original node */
1148 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001149 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001150 return -ENOMEM;
1151}
1152
1153static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1154 int ring_idx, int v_idx)
1155{
Alexander Duyck3025a442010-02-17 01:02:39 +00001156 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001157
Alexander Duyck0ba82992011-08-26 07:45:47 +00001158 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1159 q_vector->rx.ring->q_vector = q_vector;
1160 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001161 q_vector->itr_val = adapter->rx_itr_setting;
1162 if (q_vector->itr_val && q_vector->itr_val <= 3)
1163 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001164}
1165
1166static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1167 int ring_idx, int v_idx)
1168{
Alexander Duyck3025a442010-02-17 01:02:39 +00001169 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001170
Alexander Duyck0ba82992011-08-26 07:45:47 +00001171 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1172 q_vector->tx.ring->q_vector = q_vector;
1173 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001174 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001175 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001176 if (q_vector->itr_val && q_vector->itr_val <= 3)
1177 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001178}
1179
1180/**
1181 * igb_map_ring_to_vector - maps allocated queues to vectors
1182 *
1183 * This function maps the recently allocated queues to vectors.
1184 **/
1185static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1186{
1187 int i;
1188 int v_idx = 0;
1189
1190 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1191 (adapter->num_q_vectors < adapter->num_tx_queues))
1192 return -ENOMEM;
1193
1194 if (adapter->num_q_vectors >=
1195 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1196 for (i = 0; i < adapter->num_rx_queues; i++)
1197 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1198 for (i = 0; i < adapter->num_tx_queues; i++)
1199 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1200 } else {
1201 for (i = 0; i < adapter->num_rx_queues; i++) {
1202 if (i < adapter->num_tx_queues)
1203 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1204 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1205 }
1206 for (; i < adapter->num_tx_queues; i++)
1207 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1208 }
1209 return 0;
1210}
1211
1212/**
1213 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1214 *
1215 * This function initializes the interrupts and allocates all of the queues.
1216 **/
1217static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1218{
1219 struct pci_dev *pdev = adapter->pdev;
1220 int err;
1221
Ben Hutchings21adef32010-09-27 08:28:39 +00001222 err = igb_set_interrupt_capability(adapter);
1223 if (err)
1224 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001225
1226 err = igb_alloc_q_vectors(adapter);
1227 if (err) {
1228 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1229 goto err_alloc_q_vectors;
1230 }
1231
1232 err = igb_alloc_queues(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1235 goto err_alloc_queues;
1236 }
1237
1238 err = igb_map_ring_to_vector(adapter);
1239 if (err) {
1240 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1241 goto err_map_queues;
1242 }
1243
1244
1245 return 0;
1246err_map_queues:
1247 igb_free_queues(adapter);
1248err_alloc_queues:
1249 igb_free_q_vectors(adapter);
1250err_alloc_q_vectors:
1251 igb_reset_interrupt_capability(adapter);
1252 return err;
1253}
1254
1255/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001256 * igb_request_irq - initialize interrupts
1257 *
1258 * Attempts to configure interrupts using the best available
1259 * capabilities of the hardware and kernel.
1260 **/
1261static int igb_request_irq(struct igb_adapter *adapter)
1262{
1263 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001264 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001265 int err = 0;
1266
1267 if (adapter->msix_entries) {
1268 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001269 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001270 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001272 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001273 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001274 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001275 igb_free_all_tx_resources(adapter);
1276 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001277 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001278 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001279 adapter->num_q_vectors = 1;
1280 err = igb_alloc_q_vectors(adapter);
1281 if (err) {
1282 dev_err(&pdev->dev,
1283 "Unable to allocate memory for vectors\n");
1284 goto request_done;
1285 }
1286 err = igb_alloc_queues(adapter);
1287 if (err) {
1288 dev_err(&pdev->dev,
1289 "Unable to allocate memory for queues\n");
1290 igb_free_q_vectors(adapter);
1291 goto request_done;
1292 }
1293 igb_setup_all_tx_resources(adapter);
1294 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001295 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001296
Alexander Duyckc74d5882011-08-26 07:46:45 +00001297 igb_assign_vector(adapter->q_vector[0], 0);
1298
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001299 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001300 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001301 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001302 if (!err)
1303 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001304
Auke Kok9d5c8242008-01-24 02:22:38 -08001305 /* fall back to legacy interrupts */
1306 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001307 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 }
1309
Alexander Duyckc74d5882011-08-26 07:46:45 +00001310 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001311 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001312
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001313 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001314 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001315 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001316
1317request_done:
1318 return err;
1319}
1320
1321static void igb_free_irq(struct igb_adapter *adapter)
1322{
Auke Kok9d5c8242008-01-24 02:22:38 -08001323 if (adapter->msix_entries) {
1324 int vector = 0, i;
1325
Alexander Duyck047e0032009-10-27 15:49:27 +00001326 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001327
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001328 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001329 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001330 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001331 } else {
1332 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001333 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001334}
1335
1336/**
1337 * igb_irq_disable - Mask off interrupt generation on the NIC
1338 * @adapter: board private structure
1339 **/
1340static void igb_irq_disable(struct igb_adapter *adapter)
1341{
1342 struct e1000_hw *hw = &adapter->hw;
1343
Alexander Duyck25568a52009-10-27 23:49:59 +00001344 /*
1345 * we need to be careful when disabling interrupts. The VFs are also
1346 * mapped into these registers and so clearing the bits can cause
1347 * issues on the VF drivers so we only need to clear what we set
1348 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001349 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001350 u32 regval = rd32(E1000_EIAM);
1351 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1352 wr32(E1000_EIMC, adapter->eims_enable_mask);
1353 regval = rd32(E1000_EIAC);
1354 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001355 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001356
1357 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001358 wr32(E1000_IMC, ~0);
1359 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001360 if (adapter->msix_entries) {
1361 int i;
1362 for (i = 0; i < adapter->num_q_vectors; i++)
1363 synchronize_irq(adapter->msix_entries[i].vector);
1364 } else {
1365 synchronize_irq(adapter->pdev->irq);
1366 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001367}
1368
1369/**
1370 * igb_irq_enable - Enable default interrupt generation settings
1371 * @adapter: board private structure
1372 **/
1373static void igb_irq_enable(struct igb_adapter *adapter)
1374{
1375 struct e1000_hw *hw = &adapter->hw;
1376
1377 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001378 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001379 u32 regval = rd32(E1000_EIAC);
1380 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1381 regval = rd32(E1000_EIAM);
1382 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001383 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001384 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001385 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001386 ims |= E1000_IMS_VMMB;
1387 }
1388 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001389 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001390 wr32(E1000_IMS, IMS_ENABLE_MASK |
1391 E1000_IMS_DRSTA);
1392 wr32(E1000_IAM, IMS_ENABLE_MASK |
1393 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001394 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001395}
1396
1397static void igb_update_mng_vlan(struct igb_adapter *adapter)
1398{
Alexander Duyck51466232009-10-27 23:47:35 +00001399 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001400 u16 vid = adapter->hw.mng_cookie.vlan_id;
1401 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001402
Alexander Duyck51466232009-10-27 23:47:35 +00001403 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1404 /* add VID to filter table */
1405 igb_vfta_set(hw, vid, true);
1406 adapter->mng_vlan_id = vid;
1407 } else {
1408 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1409 }
1410
1411 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1412 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001413 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001414 /* remove VID from filter table */
1415 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001416 }
1417}
1418
1419/**
1420 * igb_release_hw_control - release control of the h/w to f/w
1421 * @adapter: address of board private structure
1422 *
1423 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1424 * For ASF and Pass Through versions of f/w this means that the
1425 * driver is no longer loaded.
1426 *
1427 **/
1428static void igb_release_hw_control(struct igb_adapter *adapter)
1429{
1430 struct e1000_hw *hw = &adapter->hw;
1431 u32 ctrl_ext;
1432
1433 /* Let firmware take over control of h/w */
1434 ctrl_ext = rd32(E1000_CTRL_EXT);
1435 wr32(E1000_CTRL_EXT,
1436 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1437}
1438
Auke Kok9d5c8242008-01-24 02:22:38 -08001439/**
1440 * igb_get_hw_control - get control of the h/w from f/w
1441 * @adapter: address of board private structure
1442 *
1443 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1444 * For ASF and Pass Through versions of f/w this means that
1445 * the driver is loaded.
1446 *
1447 **/
1448static void igb_get_hw_control(struct igb_adapter *adapter)
1449{
1450 struct e1000_hw *hw = &adapter->hw;
1451 u32 ctrl_ext;
1452
1453 /* Let firmware know the driver has taken over */
1454 ctrl_ext = rd32(E1000_CTRL_EXT);
1455 wr32(E1000_CTRL_EXT,
1456 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1457}
1458
Auke Kok9d5c8242008-01-24 02:22:38 -08001459/**
1460 * igb_configure - configure the hardware for RX and TX
1461 * @adapter: private board structure
1462 **/
1463static void igb_configure(struct igb_adapter *adapter)
1464{
1465 struct net_device *netdev = adapter->netdev;
1466 int i;
1467
1468 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001469 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001470
1471 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472
Alexander Duyck85b430b2009-10-27 15:50:29 +00001473 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001474 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001476
1477 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001478 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001479
1480 igb_rx_fifo_flush_82575(&adapter->hw);
1481
Alexander Duyckc493ea42009-03-20 00:16:50 +00001482 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001483 * at least 1 descriptor unused to make sure
1484 * next_to_use != next_to_clean */
1485 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001486 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001487 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001488 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001489}
1490
Nick Nunley88a268c2010-02-17 01:01:59 +00001491/**
1492 * igb_power_up_link - Power up the phy/serdes link
1493 * @adapter: address of board private structure
1494 **/
1495void igb_power_up_link(struct igb_adapter *adapter)
1496{
1497 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1498 igb_power_up_phy_copper(&adapter->hw);
1499 else
1500 igb_power_up_serdes_link_82575(&adapter->hw);
1501}
1502
1503/**
1504 * igb_power_down_link - Power down the phy/serdes link
1505 * @adapter: address of board private structure
1506 */
1507static void igb_power_down_link(struct igb_adapter *adapter)
1508{
1509 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1510 igb_power_down_phy_copper_82575(&adapter->hw);
1511 else
1512 igb_shutdown_serdes_link_82575(&adapter->hw);
1513}
Auke Kok9d5c8242008-01-24 02:22:38 -08001514
1515/**
1516 * igb_up - Open the interface and prepare it to handle traffic
1517 * @adapter: board private structure
1518 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001519int igb_up(struct igb_adapter *adapter)
1520{
1521 struct e1000_hw *hw = &adapter->hw;
1522 int i;
1523
1524 /* hardware has been reset, we need to reload some things */
1525 igb_configure(adapter);
1526
1527 clear_bit(__IGB_DOWN, &adapter->state);
1528
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001529 for (i = 0; i < adapter->num_q_vectors; i++)
1530 napi_enable(&(adapter->q_vector[i]->napi));
1531
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001532 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001533 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001534 else
1535 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001536
1537 /* Clear any pending interrupts. */
1538 rd32(E1000_ICR);
1539 igb_irq_enable(adapter);
1540
Alexander Duyckd4960302009-10-27 15:53:45 +00001541 /* notify VFs that reset has been completed */
1542 if (adapter->vfs_allocated_count) {
1543 u32 reg_data = rd32(E1000_CTRL_EXT);
1544 reg_data |= E1000_CTRL_EXT_PFRSTD;
1545 wr32(E1000_CTRL_EXT, reg_data);
1546 }
1547
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001548 netif_tx_start_all_queues(adapter->netdev);
1549
Alexander Duyck25568a52009-10-27 23:49:59 +00001550 /* start the watchdog. */
1551 hw->mac.get_link_status = 1;
1552 schedule_work(&adapter->watchdog_task);
1553
Auke Kok9d5c8242008-01-24 02:22:38 -08001554 return 0;
1555}
1556
1557void igb_down(struct igb_adapter *adapter)
1558{
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001560 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001561 u32 tctl, rctl;
1562 int i;
1563
1564 /* signal that we're down so the interrupt handler does not
1565 * reschedule our watchdog timer */
1566 set_bit(__IGB_DOWN, &adapter->state);
1567
1568 /* disable receives in the hardware */
1569 rctl = rd32(E1000_RCTL);
1570 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1571 /* flush and sleep below */
1572
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001573 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001574
1575 /* disable transmits in the hardware */
1576 tctl = rd32(E1000_TCTL);
1577 tctl &= ~E1000_TCTL_EN;
1578 wr32(E1000_TCTL, tctl);
1579 /* flush both disables and wait for them to finish */
1580 wrfl();
1581 msleep(10);
1582
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001583 for (i = 0; i < adapter->num_q_vectors; i++)
1584 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001585
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 igb_irq_disable(adapter);
1587
1588 del_timer_sync(&adapter->watchdog_timer);
1589 del_timer_sync(&adapter->phy_info_timer);
1590
Auke Kok9d5c8242008-01-24 02:22:38 -08001591 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001592
1593 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001594 spin_lock(&adapter->stats64_lock);
1595 igb_update_stats(adapter, &adapter->stats64);
1596 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001597
Auke Kok9d5c8242008-01-24 02:22:38 -08001598 adapter->link_speed = 0;
1599 adapter->link_duplex = 0;
1600
Jeff Kirsher30236822008-06-24 17:01:15 -07001601 if (!pci_channel_offline(adapter->pdev))
1602 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 igb_clean_all_tx_rings(adapter);
1604 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001605#ifdef CONFIG_IGB_DCA
1606
1607 /* since we reset the hardware DCA settings were cleared */
1608 igb_setup_dca(adapter);
1609#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001610}
1611
1612void igb_reinit_locked(struct igb_adapter *adapter)
1613{
1614 WARN_ON(in_interrupt());
1615 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1616 msleep(1);
1617 igb_down(adapter);
1618 igb_up(adapter);
1619 clear_bit(__IGB_RESETTING, &adapter->state);
1620}
1621
1622void igb_reset(struct igb_adapter *adapter)
1623{
Alexander Duyck090b1792009-10-27 23:51:55 +00001624 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001625 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001626 struct e1000_mac_info *mac = &hw->mac;
1627 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1629 u16 hwm;
1630
1631 /* Repartition Pba for greater than 9k mtu
1632 * To take effect CTRL.RST is required.
1633 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001634 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001635 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001636 case e1000_82580:
1637 pba = rd32(E1000_RXPBS);
1638 pba = igb_rxpbs_adjust_82580(pba);
1639 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001640 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001641 pba = rd32(E1000_RXPBS);
1642 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 break;
1644 case e1000_82575:
1645 default:
1646 pba = E1000_PBA_34K;
1647 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001648 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001649
Alexander Duyck2d064c02008-07-08 15:10:12 -07001650 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1651 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001652 /* adjust PBA for jumbo frames */
1653 wr32(E1000_PBA, pba);
1654
1655 /* To maintain wire speed transmits, the Tx FIFO should be
1656 * large enough to accommodate two full transmit packets,
1657 * rounded up to the next 1KB and expressed in KB. Likewise,
1658 * the Rx FIFO should be large enough to accommodate at least
1659 * one full receive packet and is similarly rounded up and
1660 * expressed in KB. */
1661 pba = rd32(E1000_PBA);
1662 /* upper 16 bits has Tx packet buffer allocation size in KB */
1663 tx_space = pba >> 16;
1664 /* lower 16 bits has Rx packet buffer allocation size in KB */
1665 pba &= 0xffff;
1666 /* the tx fifo also stores 16 bytes of information about the tx
1667 * but don't include ethernet FCS because hardware appends it */
1668 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001669 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001670 ETH_FCS_LEN) * 2;
1671 min_tx_space = ALIGN(min_tx_space, 1024);
1672 min_tx_space >>= 10;
1673 /* software strips receive CRC, so leave room for it */
1674 min_rx_space = adapter->max_frame_size;
1675 min_rx_space = ALIGN(min_rx_space, 1024);
1676 min_rx_space >>= 10;
1677
1678 /* If current Tx allocation is less than the min Tx FIFO size,
1679 * and the min Tx FIFO size is less than the current Rx FIFO
1680 * allocation, take space away from current Rx allocation */
1681 if (tx_space < min_tx_space &&
1682 ((min_tx_space - tx_space) < pba)) {
1683 pba = pba - (min_tx_space - tx_space);
1684
1685 /* if short on rx space, rx wins and must trump tx
1686 * adjustment */
1687 if (pba < min_rx_space)
1688 pba = min_rx_space;
1689 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001690 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001691 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001692
1693 /* flow control settings */
1694 /* The high water mark must be low enough to fit one full frame
1695 * (or the size used for early receive) above it in the Rx FIFO.
1696 * Set it to the lower of:
1697 * - 90% of the Rx FIFO size, or
1698 * - the full Rx FIFO size minus one full frame */
1699 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001701
Alexander Duyckd405ea32009-12-23 13:21:27 +00001702 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1703 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001704 fc->pause_time = 0xFFFF;
1705 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001706 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001707
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001708 /* disable receive for all VFs and wait one second */
1709 if (adapter->vfs_allocated_count) {
1710 int i;
1711 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001712 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001713
1714 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001715 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001716
1717 /* disable transmits and receives */
1718 wr32(E1000_VFRE, 0);
1719 wr32(E1000_VFTE, 0);
1720 }
1721
Auke Kok9d5c8242008-01-24 02:22:38 -08001722 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001723 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001724 wr32(E1000_WUC, 0);
1725
Alexander Duyck330a6d62009-10-27 23:51:35 +00001726 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001727 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001728
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001729 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001730 if (!netif_running(adapter->netdev))
1731 igb_power_down_link(adapter);
1732
Auke Kok9d5c8242008-01-24 02:22:38 -08001733 igb_update_mng_vlan(adapter);
1734
1735 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1736 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1737
Alexander Duyck330a6d62009-10-27 23:51:35 +00001738 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001739}
1740
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001741static netdev_features_t igb_fix_features(struct net_device *netdev,
1742 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001743{
1744 /*
1745 * Since there is no support for separate rx/tx vlan accel
1746 * enable/disable make sure tx flag is always in same state as rx.
1747 */
1748 if (features & NETIF_F_HW_VLAN_RX)
1749 features |= NETIF_F_HW_VLAN_TX;
1750 else
1751 features &= ~NETIF_F_HW_VLAN_TX;
1752
1753 return features;
1754}
1755
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001756static int igb_set_features(struct net_device *netdev,
1757 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001758{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001759 netdev_features_t changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001760
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001761 if (changed & NETIF_F_HW_VLAN_RX)
1762 igb_vlan_mode(netdev, features);
1763
Michał Mirosławac52caa2011-06-08 08:38:01 +00001764 return 0;
1765}
1766
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001767static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001768 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001769 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001770 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001771 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001772 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001773 .ndo_set_mac_address = igb_set_mac,
1774 .ndo_change_mtu = igb_change_mtu,
1775 .ndo_do_ioctl = igb_ioctl,
1776 .ndo_tx_timeout = igb_tx_timeout,
1777 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001778 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1779 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001780 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1781 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1782 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1783 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001784#ifdef CONFIG_NET_POLL_CONTROLLER
1785 .ndo_poll_controller = igb_netpoll,
1786#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001787 .ndo_fix_features = igb_fix_features,
1788 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001789};
1790
Taku Izumi42bfd332008-06-20 12:10:30 +09001791/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001792 * igb_probe - Device Initialization Routine
1793 * @pdev: PCI device information struct
1794 * @ent: entry in igb_pci_tbl
1795 *
1796 * Returns 0 on success, negative on failure
1797 *
1798 * igb_probe initializes an adapter identified by a pci_dev structure.
1799 * The OS initialization, configuring of the adapter private structure,
1800 * and a hardware reset occur.
1801 **/
1802static int __devinit igb_probe(struct pci_dev *pdev,
1803 const struct pci_device_id *ent)
1804{
1805 struct net_device *netdev;
1806 struct igb_adapter *adapter;
1807 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001808 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001809 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001810 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001811 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1812 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001813 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001814 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001815 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001816
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001817 /* Catch broken hardware that put the wrong VF device ID in
1818 * the PCIe SR-IOV capability.
1819 */
1820 if (pdev->is_virtfn) {
1821 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1822 pci_name(pdev), pdev->vendor, pdev->device);
1823 return -EINVAL;
1824 }
1825
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001826 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001827 if (err)
1828 return err;
1829
1830 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001831 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001832 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001833 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001834 if (!err)
1835 pci_using_dac = 1;
1836 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001837 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001838 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001839 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001840 if (err) {
1841 dev_err(&pdev->dev, "No usable DMA "
1842 "configuration, aborting\n");
1843 goto err_dma;
1844 }
1845 }
1846 }
1847
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001848 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1849 IORESOURCE_MEM),
1850 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001851 if (err)
1852 goto err_pci_reg;
1853
Frans Pop19d5afd2009-10-02 10:04:12 -07001854 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001855
Auke Kok9d5c8242008-01-24 02:22:38 -08001856 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001857 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001858
1859 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001860 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001861 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001862 if (!netdev)
1863 goto err_alloc_etherdev;
1864
1865 SET_NETDEV_DEV(netdev, &pdev->dev);
1866
1867 pci_set_drvdata(pdev, netdev);
1868 adapter = netdev_priv(netdev);
1869 adapter->netdev = netdev;
1870 adapter->pdev = pdev;
1871 hw = &adapter->hw;
1872 hw->back = adapter;
1873 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1874
1875 mmio_start = pci_resource_start(pdev, 0);
1876 mmio_len = pci_resource_len(pdev, 0);
1877
1878 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001879 hw->hw_addr = ioremap(mmio_start, mmio_len);
1880 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001881 goto err_ioremap;
1882
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001883 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001884 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001886
1887 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1888
1889 netdev->mem_start = mmio_start;
1890 netdev->mem_end = mmio_start + mmio_len;
1891
Auke Kok9d5c8242008-01-24 02:22:38 -08001892 /* PCI config space info */
1893 hw->vendor_id = pdev->vendor;
1894 hw->device_id = pdev->device;
1895 hw->revision_id = pdev->revision;
1896 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1897 hw->subsystem_device_id = pdev->subsystem_device;
1898
Auke Kok9d5c8242008-01-24 02:22:38 -08001899 /* Copy the default MAC, PHY and NVM function pointers */
1900 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1901 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1902 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1903 /* Initialize skew-specific constants */
1904 err = ei->get_invariants(hw);
1905 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001906 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001907
Alexander Duyck450c87c2009-02-06 23:22:11 +00001908 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001909 err = igb_sw_init(adapter);
1910 if (err)
1911 goto err_sw_init;
1912
1913 igb_get_bus_info_pcie(hw);
1914
1915 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001916
1917 /* Copper options */
1918 if (hw->phy.media_type == e1000_media_type_copper) {
1919 hw->phy.mdix = AUTO_ALL_MODES;
1920 hw->phy.disable_polarity_correction = false;
1921 hw->phy.ms_type = e1000_ms_hw_default;
1922 }
1923
1924 if (igb_check_reset_block(hw))
1925 dev_info(&pdev->dev,
1926 "PHY reset is blocked due to SOL/IDER session.\n");
1927
Alexander Duyck077887c2011-08-26 07:46:29 +00001928 /*
1929 * features is initialized to 0 in allocation, it might have bits
1930 * set by igb_sw_init so we should use an or instead of an
1931 * assignment.
1932 */
1933 netdev->features |= NETIF_F_SG |
1934 NETIF_F_IP_CSUM |
1935 NETIF_F_IPV6_CSUM |
1936 NETIF_F_TSO |
1937 NETIF_F_TSO6 |
1938 NETIF_F_RXHASH |
1939 NETIF_F_RXCSUM |
1940 NETIF_F_HW_VLAN_RX |
1941 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001942
Alexander Duyck077887c2011-08-26 07:46:29 +00001943 /* copy netdev features into list of user selectable features */
1944 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001945
Alexander Duyck077887c2011-08-26 07:46:29 +00001946 /* set this bit last since it cannot be part of hw_features */
1947 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1948
1949 netdev->vlan_features |= NETIF_F_TSO |
1950 NETIF_F_TSO6 |
1951 NETIF_F_IP_CSUM |
1952 NETIF_F_IPV6_CSUM |
1953 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001954
Yi Zou7b872a52010-09-22 17:57:58 +00001955 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001956 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001957 netdev->vlan_features |= NETIF_F_HIGHDMA;
1958 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001959
Michał Mirosławac52caa2011-06-08 08:38:01 +00001960 if (hw->mac.type >= e1000_82576) {
1961 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001962 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001963 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001964
Jiri Pirko01789342011-08-16 06:29:00 +00001965 netdev->priv_flags |= IFF_UNICAST_FLT;
1966
Alexander Duyck330a6d62009-10-27 23:51:35 +00001967 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001968
1969 /* before reading the NVM, reset the controller to put the device in a
1970 * known good starting state */
1971 hw->mac.ops.reset_hw(hw);
1972
1973 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001974 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001975 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1976 err = -EIO;
1977 goto err_eeprom;
1978 }
1979
1980 /* copy the MAC address out of the NVM */
1981 if (hw->mac.ops.read_mac_addr(hw))
1982 dev_err(&pdev->dev, "NVM Read Error\n");
1983
1984 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1985 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1986
1987 if (!is_valid_ether_addr(netdev->perm_addr)) {
1988 dev_err(&pdev->dev, "Invalid MAC Address\n");
1989 err = -EIO;
1990 goto err_eeprom;
1991 }
1992
Joe Perchesc061b182010-08-23 18:20:03 +00001993 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00001994 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00001995 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00001996 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001997
1998 INIT_WORK(&adapter->reset_task, igb_reset_task);
1999 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2000
Alexander Duyck450c87c2009-02-06 23:22:11 +00002001 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002002 adapter->fc_autoneg = true;
2003 hw->mac.autoneg = true;
2004 hw->phy.autoneg_advertised = 0x2f;
2005
Alexander Duyck0cce1192009-07-23 18:10:24 +00002006 hw->fc.requested_mode = e1000_fc_default;
2007 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002008
Auke Kok9d5c8242008-01-24 02:22:38 -08002009 igb_validate_mdi_setting(hw);
2010
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2012 * enable the ACPI Magic Packet filter
2013 */
2014
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002015 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002016 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002017 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002018 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2019 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2020 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002021 else if (hw->bus.func == 1)
2022 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002023
2024 if (eeprom_data & eeprom_apme_mask)
2025 adapter->eeprom_wol |= E1000_WUFC_MAG;
2026
2027 /* now that we have the eeprom settings, apply the special cases where
2028 * the eeprom may be wrong or the board simply won't support wake on
2029 * lan on a particular port */
2030 switch (pdev->device) {
2031 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2032 adapter->eeprom_wol = 0;
2033 break;
2034 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002035 case E1000_DEV_ID_82576_FIBER:
2036 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002037 /* Wake events only supported on port A for dual fiber
2038 * regardless of eeprom setting */
2039 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2040 adapter->eeprom_wol = 0;
2041 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002042 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002043 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002044 /* if quad port adapter, disable WoL on all but port A */
2045 if (global_quad_port_a != 0)
2046 adapter->eeprom_wol = 0;
2047 else
2048 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2049 /* Reset for multiple quad port adapters */
2050 if (++global_quad_port_a == 4)
2051 global_quad_port_a = 0;
2052 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002053 }
2054
2055 /* initialize the wol settings based on the eeprom settings */
2056 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002057 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002058
2059 /* reset the hardware with the new settings */
2060 igb_reset(adapter);
2061
2062 /* let the f/w know that the h/w is now under the control of the
2063 * driver. */
2064 igb_get_hw_control(adapter);
2065
Auke Kok9d5c8242008-01-24 02:22:38 -08002066 strcpy(netdev->name, "eth%d");
2067 err = register_netdev(netdev);
2068 if (err)
2069 goto err_register;
2070
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002071 /* carrier off reporting is important to ethtool even BEFORE open */
2072 netif_carrier_off(netdev);
2073
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002074#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002075 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002076 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002077 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002078 igb_setup_dca(adapter);
2079 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002080
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002081#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002082 /* do hw tstamp init after resetting */
2083 igb_init_hw_timer(adapter);
2084
Auke Kok9d5c8242008-01-24 02:22:38 -08002085 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2086 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002087 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002088 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002089 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002090 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002091 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002092 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2093 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2094 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2095 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002096 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002097
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002098 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2099 if (ret_val)
2100 strcpy(part_str, "Unknown");
2101 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002102 dev_info(&pdev->dev,
2103 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2104 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002105 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002106 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002107 switch (hw->mac.type) {
2108 case e1000_i350:
2109 igb_set_eee_i350(hw);
2110 break;
2111 default:
2112 break;
2113 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002114 return 0;
2115
2116err_register:
2117 igb_release_hw_control(adapter);
2118err_eeprom:
2119 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002120 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002121
2122 if (hw->flash_address)
2123 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002124err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002125 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002126 iounmap(hw->hw_addr);
2127err_ioremap:
2128 free_netdev(netdev);
2129err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002130 pci_release_selected_regions(pdev,
2131 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002132err_pci_reg:
2133err_dma:
2134 pci_disable_device(pdev);
2135 return err;
2136}
2137
2138/**
2139 * igb_remove - Device Removal Routine
2140 * @pdev: PCI device information struct
2141 *
2142 * igb_remove is called by the PCI subsystem to alert the driver
2143 * that it should release a PCI device. The could be caused by a
2144 * Hot-Plug event, or because the driver is going to be removed from
2145 * memory.
2146 **/
2147static void __devexit igb_remove(struct pci_dev *pdev)
2148{
2149 struct net_device *netdev = pci_get_drvdata(pdev);
2150 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002151 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002152
Tejun Heo760141a2010-12-12 16:45:14 +01002153 /*
2154 * The watchdog timer may be rescheduled, so explicitly
2155 * disable watchdog from being rescheduled.
2156 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002157 set_bit(__IGB_DOWN, &adapter->state);
2158 del_timer_sync(&adapter->watchdog_timer);
2159 del_timer_sync(&adapter->phy_info_timer);
2160
Tejun Heo760141a2010-12-12 16:45:14 +01002161 cancel_work_sync(&adapter->reset_task);
2162 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002163
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002164#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002165 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002166 dev_info(&pdev->dev, "DCA disabled\n");
2167 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002168 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002169 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002170 }
2171#endif
2172
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2174 * would have already happened in close and is redundant. */
2175 igb_release_hw_control(adapter);
2176
2177 unregister_netdev(netdev);
2178
Alexander Duyck047e0032009-10-27 15:49:27 +00002179 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002180
Alexander Duyck37680112009-02-19 20:40:30 -08002181#ifdef CONFIG_PCI_IOV
2182 /* reclaim resources allocated to VFs */
2183 if (adapter->vf_data) {
2184 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002185 if (!igb_check_vf_assignment(adapter)) {
2186 pci_disable_sriov(pdev);
2187 msleep(500);
2188 } else {
2189 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2190 }
Alexander Duyck37680112009-02-19 20:40:30 -08002191
2192 kfree(adapter->vf_data);
2193 adapter->vf_data = NULL;
2194 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002195 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002196 msleep(100);
2197 dev_info(&pdev->dev, "IOV Disabled\n");
2198 }
2199#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002200
Alexander Duyck28b07592009-02-06 23:20:31 +00002201 iounmap(hw->hw_addr);
2202 if (hw->flash_address)
2203 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002204 pci_release_selected_regions(pdev,
2205 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002206
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002207 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002208 free_netdev(netdev);
2209
Frans Pop19d5afd2009-10-02 10:04:12 -07002210 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002211
Auke Kok9d5c8242008-01-24 02:22:38 -08002212 pci_disable_device(pdev);
2213}
2214
2215/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002216 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2217 * @adapter: board private structure to initialize
2218 *
2219 * This function initializes the vf specific data storage and then attempts to
2220 * allocate the VFs. The reason for ordering it this way is because it is much
2221 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2222 * the memory for the VFs.
2223 **/
2224static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2225{
2226#ifdef CONFIG_PCI_IOV
2227 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002228 int old_vfs = igb_find_enabled_vfs(adapter);
2229 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002230
Greg Rose0224d662011-10-14 02:57:14 +00002231 if (old_vfs) {
2232 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2233 "max_vfs setting of %d\n", old_vfs, max_vfs);
2234 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002235 }
2236
Greg Rose0224d662011-10-14 02:57:14 +00002237 if (!adapter->vfs_allocated_count)
2238 return;
2239
2240 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2241 sizeof(struct vf_data_storage), GFP_KERNEL);
2242 /* if allocation failed then we do not support SR-IOV */
2243 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002244 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002245 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2246 "Data Storage\n");
2247 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002248 }
Greg Rose0224d662011-10-14 02:57:14 +00002249
2250 if (!old_vfs) {
2251 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2252 goto err_out;
2253 }
2254 dev_info(&pdev->dev, "%d VFs allocated\n",
2255 adapter->vfs_allocated_count);
2256 for (i = 0; i < adapter->vfs_allocated_count; i++)
2257 igb_vf_configure(adapter, i);
2258
2259 /* DMA Coalescing is not supported in IOV mode. */
2260 adapter->flags &= ~IGB_FLAG_DMAC;
2261 goto out;
2262err_out:
2263 kfree(adapter->vf_data);
2264 adapter->vf_data = NULL;
2265 adapter->vfs_allocated_count = 0;
2266out:
2267 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002268#endif /* CONFIG_PCI_IOV */
2269}
2270
Alexander Duyck115f4592009-11-12 18:37:00 +00002271/**
2272 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2273 * @adapter: board private structure to initialize
2274 *
2275 * igb_init_hw_timer initializes the function pointer and values for the hw
2276 * timer found in hardware.
2277 **/
2278static void igb_init_hw_timer(struct igb_adapter *adapter)
2279{
2280 struct e1000_hw *hw = &adapter->hw;
2281
2282 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002283 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002284 case e1000_82580:
2285 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2286 adapter->cycles.read = igb_read_clock;
2287 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2288 adapter->cycles.mult = 1;
2289 /*
2290 * The 82580 timesync updates the system timer every 8ns by 8ns
2291 * and the value cannot be shifted. Instead we need to shift
2292 * the registers to generate a 64bit timer value. As a result
2293 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2294 * 24 in order to generate a larger value for synchronization.
2295 */
2296 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2297 /* disable system timer temporarily by setting bit 31 */
2298 wr32(E1000_TSAUXC, 0x80000000);
2299 wrfl();
2300
2301 /* Set registers so that rollover occurs soon to test this. */
2302 wr32(E1000_SYSTIMR, 0x00000000);
2303 wr32(E1000_SYSTIML, 0x80000000);
2304 wr32(E1000_SYSTIMH, 0x000000FF);
2305 wrfl();
2306
2307 /* enable system timer by clearing bit 31 */
2308 wr32(E1000_TSAUXC, 0x0);
2309 wrfl();
2310
2311 timecounter_init(&adapter->clock,
2312 &adapter->cycles,
2313 ktime_to_ns(ktime_get_real()));
2314 /*
2315 * Synchronize our NIC clock against system wall clock. NIC
2316 * time stamp reading requires ~3us per sample, each sample
2317 * was pretty stable even under load => only require 10
2318 * samples for each offset comparison.
2319 */
2320 memset(&adapter->compare, 0, sizeof(adapter->compare));
2321 adapter->compare.source = &adapter->clock;
2322 adapter->compare.target = ktime_get_real;
2323 adapter->compare.num_samples = 10;
2324 timecompare_update(&adapter->compare, 0);
2325 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002326 case e1000_82576:
2327 /*
2328 * Initialize hardware timer: we keep it running just in case
2329 * that some program needs it later on.
2330 */
2331 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2332 adapter->cycles.read = igb_read_clock;
2333 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2334 adapter->cycles.mult = 1;
2335 /**
2336 * Scale the NIC clock cycle by a large factor so that
2337 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002338 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002339 * factor are a) that the clock register overflows more quickly
2340 * (not such a big deal) and b) that the increment per tick has
2341 * to fit into 24 bits. As a result we need to use a shift of
2342 * 19 so we can fit a value of 16 into the TIMINCA register.
2343 */
2344 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2345 wr32(E1000_TIMINCA,
2346 (1 << E1000_TIMINCA_16NS_SHIFT) |
2347 (16 << IGB_82576_TSYNC_SHIFT));
2348
2349 /* Set registers so that rollover occurs soon to test this. */
2350 wr32(E1000_SYSTIML, 0x00000000);
2351 wr32(E1000_SYSTIMH, 0xFF800000);
2352 wrfl();
2353
2354 timecounter_init(&adapter->clock,
2355 &adapter->cycles,
2356 ktime_to_ns(ktime_get_real()));
2357 /*
2358 * Synchronize our NIC clock against system wall clock. NIC
2359 * time stamp reading requires ~3us per sample, each sample
2360 * was pretty stable even under load => only require 10
2361 * samples for each offset comparison.
2362 */
2363 memset(&adapter->compare, 0, sizeof(adapter->compare));
2364 adapter->compare.source = &adapter->clock;
2365 adapter->compare.target = ktime_get_real;
2366 adapter->compare.num_samples = 10;
2367 timecompare_update(&adapter->compare, 0);
2368 break;
2369 case e1000_82575:
2370 /* 82575 does not support timesync */
2371 default:
2372 break;
2373 }
2374
2375}
2376
Alexander Duycka6b623e2009-10-27 23:47:53 +00002377/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002378 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2379 * @adapter: board private structure to initialize
2380 *
2381 * igb_sw_init initializes the Adapter private data structure.
2382 * Fields are initialized based on PCI device information and
2383 * OS network device settings (MTU size).
2384 **/
2385static int __devinit igb_sw_init(struct igb_adapter *adapter)
2386{
2387 struct e1000_hw *hw = &adapter->hw;
2388 struct net_device *netdev = adapter->netdev;
2389 struct pci_dev *pdev = adapter->pdev;
2390
2391 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2392
Alexander Duyck13fde972011-10-05 13:35:24 +00002393 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002394 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2395 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002396
2397 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002398 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2399 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2400
Alexander Duyck13fde972011-10-05 13:35:24 +00002401 /* set default work limits */
2402 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2403
Alexander Duyck153285f2011-08-26 07:43:32 +00002404 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2405 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002406 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2407
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002408 adapter->node = -1;
2409
Eric Dumazet12dcd862010-10-15 17:27:10 +00002410 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002411#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002412 switch (hw->mac.type) {
2413 case e1000_82576:
2414 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002415 if (max_vfs > 7) {
2416 dev_warn(&pdev->dev,
2417 "Maximum of 7 VFs per PF, using max\n");
2418 adapter->vfs_allocated_count = 7;
2419 } else
2420 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002421 break;
2422 default:
2423 break;
2424 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002425#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002426 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002427 /* i350 cannot do RSS and SR-IOV at the same time */
2428 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2429 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002430
2431 /*
2432 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2433 * then we should combine the queues into a queue pair in order to
2434 * conserve interrupts due to limited supply
2435 */
2436 if ((adapter->rss_queues > 4) ||
2437 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2438 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2439
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002440 /* Setup and initialize a copy of the hw vlan table array */
2441 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2442 E1000_VLAN_FILTER_TBL_SIZE,
2443 GFP_ATOMIC);
2444
Alexander Duycka6b623e2009-10-27 23:47:53 +00002445 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002446 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002447 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2448 return -ENOMEM;
2449 }
2450
Alexander Duycka6b623e2009-10-27 23:47:53 +00002451 igb_probe_vfs(adapter);
2452
Auke Kok9d5c8242008-01-24 02:22:38 -08002453 /* Explicitly disable IRQ since the NIC can be in any state. */
2454 igb_irq_disable(adapter);
2455
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002456 if (hw->mac.type == e1000_i350)
2457 adapter->flags &= ~IGB_FLAG_DMAC;
2458
Auke Kok9d5c8242008-01-24 02:22:38 -08002459 set_bit(__IGB_DOWN, &adapter->state);
2460 return 0;
2461}
2462
2463/**
2464 * igb_open - Called when a network interface is made active
2465 * @netdev: network interface device structure
2466 *
2467 * Returns 0 on success, negative value on failure
2468 *
2469 * The open entry point is called when a network interface is made
2470 * active by the system (IFF_UP). At this point all resources needed
2471 * for transmit and receive operations are allocated, the interrupt
2472 * handler is registered with the OS, the watchdog timer is started,
2473 * and the stack is notified that the interface is ready.
2474 **/
2475static int igb_open(struct net_device *netdev)
2476{
2477 struct igb_adapter *adapter = netdev_priv(netdev);
2478 struct e1000_hw *hw = &adapter->hw;
2479 int err;
2480 int i;
2481
2482 /* disallow open during test */
2483 if (test_bit(__IGB_TESTING, &adapter->state))
2484 return -EBUSY;
2485
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002486 netif_carrier_off(netdev);
2487
Auke Kok9d5c8242008-01-24 02:22:38 -08002488 /* allocate transmit descriptors */
2489 err = igb_setup_all_tx_resources(adapter);
2490 if (err)
2491 goto err_setup_tx;
2492
2493 /* allocate receive descriptors */
2494 err = igb_setup_all_rx_resources(adapter);
2495 if (err)
2496 goto err_setup_rx;
2497
Nick Nunley88a268c2010-02-17 01:01:59 +00002498 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002499
Auke Kok9d5c8242008-01-24 02:22:38 -08002500 /* before we allocate an interrupt, we must be ready to handle it.
2501 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2502 * as soon as we call pci_request_irq, so we have to setup our
2503 * clean_rx handler before we do so. */
2504 igb_configure(adapter);
2505
2506 err = igb_request_irq(adapter);
2507 if (err)
2508 goto err_req_irq;
2509
2510 /* From here on the code is the same as igb_up() */
2511 clear_bit(__IGB_DOWN, &adapter->state);
2512
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002513 for (i = 0; i < adapter->num_q_vectors; i++)
2514 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002515
2516 /* Clear any pending interrupts. */
2517 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002518
2519 igb_irq_enable(adapter);
2520
Alexander Duyckd4960302009-10-27 15:53:45 +00002521 /* notify VFs that reset has been completed */
2522 if (adapter->vfs_allocated_count) {
2523 u32 reg_data = rd32(E1000_CTRL_EXT);
2524 reg_data |= E1000_CTRL_EXT_PFRSTD;
2525 wr32(E1000_CTRL_EXT, reg_data);
2526 }
2527
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002528 netif_tx_start_all_queues(netdev);
2529
Alexander Duyck25568a52009-10-27 23:49:59 +00002530 /* start the watchdog. */
2531 hw->mac.get_link_status = 1;
2532 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002533
2534 return 0;
2535
2536err_req_irq:
2537 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002538 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002539 igb_free_all_rx_resources(adapter);
2540err_setup_rx:
2541 igb_free_all_tx_resources(adapter);
2542err_setup_tx:
2543 igb_reset(adapter);
2544
2545 return err;
2546}
2547
2548/**
2549 * igb_close - Disables a network interface
2550 * @netdev: network interface device structure
2551 *
2552 * Returns 0, this is not allowed to fail
2553 *
2554 * The close entry point is called when an interface is de-activated
2555 * by the OS. The hardware is still under the driver's control, but
2556 * needs to be disabled. A global MAC reset is issued to stop the
2557 * hardware, and all transmit and receive resources are freed.
2558 **/
2559static int igb_close(struct net_device *netdev)
2560{
2561 struct igb_adapter *adapter = netdev_priv(netdev);
2562
2563 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2564 igb_down(adapter);
2565
2566 igb_free_irq(adapter);
2567
2568 igb_free_all_tx_resources(adapter);
2569 igb_free_all_rx_resources(adapter);
2570
Auke Kok9d5c8242008-01-24 02:22:38 -08002571 return 0;
2572}
2573
2574/**
2575 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002576 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2577 *
2578 * Return 0 on success, negative on failure
2579 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002580int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002581{
Alexander Duyck59d71982010-04-27 13:09:25 +00002582 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002583 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002584 int size;
2585
Alexander Duyck06034642011-08-26 07:44:22 +00002586 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002587 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2588 if (!tx_ring->tx_buffer_info)
2589 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002590 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002591 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002592
2593 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002594 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002595 tx_ring->size = ALIGN(tx_ring->size, 4096);
2596
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002597 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002598 tx_ring->desc = dma_alloc_coherent(dev,
2599 tx_ring->size,
2600 &tx_ring->dma,
2601 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002602 set_dev_node(dev, orig_node);
2603 if (!tx_ring->desc)
2604 tx_ring->desc = dma_alloc_coherent(dev,
2605 tx_ring->size,
2606 &tx_ring->dma,
2607 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002608
2609 if (!tx_ring->desc)
2610 goto err;
2611
Auke Kok9d5c8242008-01-24 02:22:38 -08002612 tx_ring->next_to_use = 0;
2613 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002614
Auke Kok9d5c8242008-01-24 02:22:38 -08002615 return 0;
2616
2617err:
Alexander Duyck06034642011-08-26 07:44:22 +00002618 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002619 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002620 "Unable to allocate memory for the transmit descriptor ring\n");
2621 return -ENOMEM;
2622}
2623
2624/**
2625 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2626 * (Descriptors) for all queues
2627 * @adapter: board private structure
2628 *
2629 * Return 0 on success, negative on failure
2630 **/
2631static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2632{
Alexander Duyck439705e2009-10-27 23:49:20 +00002633 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002634 int i, err = 0;
2635
2636 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002637 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002639 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002640 "Allocation for Tx Queue %u failed\n", i);
2641 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002642 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002643 break;
2644 }
2645 }
2646
2647 return err;
2648}
2649
2650/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002651 * igb_setup_tctl - configure the transmit control registers
2652 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002653 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002654void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002655{
Auke Kok9d5c8242008-01-24 02:22:38 -08002656 struct e1000_hw *hw = &adapter->hw;
2657 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002658
Alexander Duyck85b430b2009-10-27 15:50:29 +00002659 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2660 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002661
2662 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002663 tctl = rd32(E1000_TCTL);
2664 tctl &= ~E1000_TCTL_CT;
2665 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2666 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2667
2668 igb_config_collision_dist(hw);
2669
Auke Kok9d5c8242008-01-24 02:22:38 -08002670 /* Enable transmits */
2671 tctl |= E1000_TCTL_EN;
2672
2673 wr32(E1000_TCTL, tctl);
2674}
2675
2676/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002677 * igb_configure_tx_ring - Configure transmit ring after Reset
2678 * @adapter: board private structure
2679 * @ring: tx ring to configure
2680 *
2681 * Configure a transmit ring after a reset.
2682 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002683void igb_configure_tx_ring(struct igb_adapter *adapter,
2684 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002685{
2686 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002687 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002688 u64 tdba = ring->dma;
2689 int reg_idx = ring->reg_idx;
2690
2691 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002692 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002693 wrfl();
2694 mdelay(10);
2695
2696 wr32(E1000_TDLEN(reg_idx),
2697 ring->count * sizeof(union e1000_adv_tx_desc));
2698 wr32(E1000_TDBAL(reg_idx),
2699 tdba & 0x00000000ffffffffULL);
2700 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2701
Alexander Duyckfce99e32009-10-27 15:51:27 +00002702 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002703 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002704 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002705
2706 txdctl |= IGB_TX_PTHRESH;
2707 txdctl |= IGB_TX_HTHRESH << 8;
2708 txdctl |= IGB_TX_WTHRESH << 16;
2709
2710 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2711 wr32(E1000_TXDCTL(reg_idx), txdctl);
2712}
2713
2714/**
2715 * igb_configure_tx - Configure transmit Unit after Reset
2716 * @adapter: board private structure
2717 *
2718 * Configure the Tx unit of the MAC after a reset.
2719 **/
2720static void igb_configure_tx(struct igb_adapter *adapter)
2721{
2722 int i;
2723
2724 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002725 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002726}
2727
2728/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002729 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002730 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2731 *
2732 * Returns 0 on success, negative on failure
2733 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002734int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002735{
Alexander Duyck59d71982010-04-27 13:09:25 +00002736 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002737 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002738 int size, desc_len;
2739
Alexander Duyck06034642011-08-26 07:44:22 +00002740 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002741 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2742 if (!rx_ring->rx_buffer_info)
2743 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002744 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002745 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002746
2747 desc_len = sizeof(union e1000_adv_rx_desc);
2748
2749 /* Round up to nearest 4K */
2750 rx_ring->size = rx_ring->count * desc_len;
2751 rx_ring->size = ALIGN(rx_ring->size, 4096);
2752
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002753 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002754 rx_ring->desc = dma_alloc_coherent(dev,
2755 rx_ring->size,
2756 &rx_ring->dma,
2757 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002758 set_dev_node(dev, orig_node);
2759 if (!rx_ring->desc)
2760 rx_ring->desc = dma_alloc_coherent(dev,
2761 rx_ring->size,
2762 &rx_ring->dma,
2763 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002764
2765 if (!rx_ring->desc)
2766 goto err;
2767
2768 rx_ring->next_to_clean = 0;
2769 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002770
Auke Kok9d5c8242008-01-24 02:22:38 -08002771 return 0;
2772
2773err:
Alexander Duyck06034642011-08-26 07:44:22 +00002774 vfree(rx_ring->rx_buffer_info);
2775 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002776 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2777 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002778 return -ENOMEM;
2779}
2780
2781/**
2782 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2783 * (Descriptors) for all queues
2784 * @adapter: board private structure
2785 *
2786 * Return 0 on success, negative on failure
2787 **/
2788static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2789{
Alexander Duyck439705e2009-10-27 23:49:20 +00002790 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002791 int i, err = 0;
2792
2793 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002794 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002795 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002796 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002797 "Allocation for Rx Queue %u failed\n", i);
2798 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002799 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002800 break;
2801 }
2802 }
2803
2804 return err;
2805}
2806
2807/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002808 * igb_setup_mrqc - configure the multiple receive queue control registers
2809 * @adapter: Board private structure
2810 **/
2811static void igb_setup_mrqc(struct igb_adapter *adapter)
2812{
2813 struct e1000_hw *hw = &adapter->hw;
2814 u32 mrqc, rxcsum;
2815 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2816 union e1000_reta {
2817 u32 dword;
2818 u8 bytes[4];
2819 } reta;
2820 static const u8 rsshash[40] = {
2821 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2822 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2823 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2824 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2825
2826 /* Fill out hash function seeds */
2827 for (j = 0; j < 10; j++) {
2828 u32 rsskey = rsshash[(j * 4)];
2829 rsskey |= rsshash[(j * 4) + 1] << 8;
2830 rsskey |= rsshash[(j * 4) + 2] << 16;
2831 rsskey |= rsshash[(j * 4) + 3] << 24;
2832 array_wr32(E1000_RSSRK(0), j, rsskey);
2833 }
2834
Alexander Duycka99955f2009-11-12 18:37:19 +00002835 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002836
2837 if (adapter->vfs_allocated_count) {
2838 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2839 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002840 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002841 case e1000_82580:
2842 num_rx_queues = 1;
2843 shift = 0;
2844 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002845 case e1000_82576:
2846 shift = 3;
2847 num_rx_queues = 2;
2848 break;
2849 case e1000_82575:
2850 shift = 2;
2851 shift2 = 6;
2852 default:
2853 break;
2854 }
2855 } else {
2856 if (hw->mac.type == e1000_82575)
2857 shift = 6;
2858 }
2859
2860 for (j = 0; j < (32 * 4); j++) {
2861 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2862 if (shift2)
2863 reta.bytes[j & 3] |= num_rx_queues << shift2;
2864 if ((j & 3) == 3)
2865 wr32(E1000_RETA(j >> 2), reta.dword);
2866 }
2867
2868 /*
2869 * Disable raw packet checksumming so that RSS hash is placed in
2870 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2871 * offloads as they are enabled by default
2872 */
2873 rxcsum = rd32(E1000_RXCSUM);
2874 rxcsum |= E1000_RXCSUM_PCSD;
2875
2876 if (adapter->hw.mac.type >= e1000_82576)
2877 /* Enable Receive Checksum Offload for SCTP */
2878 rxcsum |= E1000_RXCSUM_CRCOFL;
2879
2880 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2881 wr32(E1000_RXCSUM, rxcsum);
2882
2883 /* If VMDq is enabled then we set the appropriate mode for that, else
2884 * we default to RSS so that an RSS hash is calculated per packet even
2885 * if we are only using one queue */
2886 if (adapter->vfs_allocated_count) {
2887 if (hw->mac.type > e1000_82575) {
2888 /* Set the default pool for the PF's first queue */
2889 u32 vtctl = rd32(E1000_VT_CTL);
2890 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2891 E1000_VT_CTL_DISABLE_DEF_POOL);
2892 vtctl |= adapter->vfs_allocated_count <<
2893 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2894 wr32(E1000_VT_CTL, vtctl);
2895 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002896 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002897 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2898 else
2899 mrqc = E1000_MRQC_ENABLE_VMDQ;
2900 } else {
2901 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2902 }
2903 igb_vmm_control(adapter);
2904
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002905 /*
2906 * Generate RSS hash based on TCP port numbers and/or
2907 * IPv4/v6 src and dst addresses since UDP cannot be
2908 * hashed reliably due to IP fragmentation
2909 */
2910 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2911 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2912 E1000_MRQC_RSS_FIELD_IPV6 |
2913 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2914 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002915
2916 wr32(E1000_MRQC, mrqc);
2917}
2918
2919/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002920 * igb_setup_rctl - configure the receive control registers
2921 * @adapter: Board private structure
2922 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002923void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002924{
2925 struct e1000_hw *hw = &adapter->hw;
2926 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002927
2928 rctl = rd32(E1000_RCTL);
2929
2930 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002931 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002932
Alexander Duyck69d728b2008-11-25 01:04:03 -08002933 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002934 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002935
Auke Kok87cb7e82008-07-08 15:08:29 -07002936 /*
2937 * enable stripping of CRC. It's unlikely this will break BMC
2938 * redirection as it did with e1000. Newer features require
2939 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002940 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002941 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002942
Alexander Duyck559e9c42009-10-27 23:52:50 +00002943 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002944 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002945
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002946 /* enable LPE to prevent packets larger than max_frame_size */
2947 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002948
Alexander Duyck952f72a2009-10-27 15:51:07 +00002949 /* disable queue 0 to prevent tail write w/o re-config */
2950 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002951
Alexander Duycke1739522009-02-19 20:39:44 -08002952 /* Attention!!! For SR-IOV PF driver operations you must enable
2953 * queue drop for all VF and PF queues to prevent head of line blocking
2954 * if an un-trusted VF does not provide descriptors to hardware.
2955 */
2956 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002957 /* set all queue drop enable bits */
2958 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002959 }
2960
Auke Kok9d5c8242008-01-24 02:22:38 -08002961 wr32(E1000_RCTL, rctl);
2962}
2963
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002964static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2965 int vfn)
2966{
2967 struct e1000_hw *hw = &adapter->hw;
2968 u32 vmolr;
2969
2970 /* if it isn't the PF check to see if VFs are enabled and
2971 * increase the size to support vlan tags */
2972 if (vfn < adapter->vfs_allocated_count &&
2973 adapter->vf_data[vfn].vlans_enabled)
2974 size += VLAN_TAG_SIZE;
2975
2976 vmolr = rd32(E1000_VMOLR(vfn));
2977 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2978 vmolr |= size | E1000_VMOLR_LPE;
2979 wr32(E1000_VMOLR(vfn), vmolr);
2980
2981 return 0;
2982}
2983
Auke Kok9d5c8242008-01-24 02:22:38 -08002984/**
Alexander Duycke1739522009-02-19 20:39:44 -08002985 * igb_rlpml_set - set maximum receive packet size
2986 * @adapter: board private structure
2987 *
2988 * Configure maximum receivable packet size.
2989 **/
2990static void igb_rlpml_set(struct igb_adapter *adapter)
2991{
Alexander Duyck153285f2011-08-26 07:43:32 +00002992 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002993 struct e1000_hw *hw = &adapter->hw;
2994 u16 pf_id = adapter->vfs_allocated_count;
2995
Alexander Duycke1739522009-02-19 20:39:44 -08002996 if (pf_id) {
2997 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00002998 /*
2999 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3000 * to our max jumbo frame size, in case we need to enable
3001 * jumbo frames on one of the rings later.
3002 * This will not pass over-length frames into the default
3003 * queue because it's gated by the VMOLR.RLPML.
3004 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003005 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003006 }
3007
3008 wr32(E1000_RLPML, max_frame_size);
3009}
3010
Williams, Mitch A8151d292010-02-10 01:44:24 +00003011static inline void igb_set_vmolr(struct igb_adapter *adapter,
3012 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003013{
3014 struct e1000_hw *hw = &adapter->hw;
3015 u32 vmolr;
3016
3017 /*
3018 * This register exists only on 82576 and newer so if we are older then
3019 * we should exit and do nothing
3020 */
3021 if (hw->mac.type < e1000_82576)
3022 return;
3023
3024 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003025 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3026 if (aupe)
3027 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3028 else
3029 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003030
3031 /* clear all bits that might not be set */
3032 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3033
Alexander Duycka99955f2009-11-12 18:37:19 +00003034 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003035 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3036 /*
3037 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3038 * multicast packets
3039 */
3040 if (vfn <= adapter->vfs_allocated_count)
3041 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3042
3043 wr32(E1000_VMOLR(vfn), vmolr);
3044}
3045
Alexander Duycke1739522009-02-19 20:39:44 -08003046/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003047 * igb_configure_rx_ring - Configure a receive ring after Reset
3048 * @adapter: board private structure
3049 * @ring: receive ring to be configured
3050 *
3051 * Configure the Rx unit of the MAC after a reset.
3052 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003053void igb_configure_rx_ring(struct igb_adapter *adapter,
3054 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003055{
3056 struct e1000_hw *hw = &adapter->hw;
3057 u64 rdba = ring->dma;
3058 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003059 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003060
3061 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003062 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003063
3064 /* Set DMA base address registers */
3065 wr32(E1000_RDBAL(reg_idx),
3066 rdba & 0x00000000ffffffffULL);
3067 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3068 wr32(E1000_RDLEN(reg_idx),
3069 ring->count * sizeof(union e1000_adv_rx_desc));
3070
3071 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003072 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003073 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003074 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003075
Alexander Duyck952f72a2009-10-27 15:51:07 +00003076 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003077 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003078#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003079 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003080#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003081 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003082#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003083 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003084 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003085 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003086 /* Only set Drop Enable if we are supporting multiple queues */
3087 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3088 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003089
3090 wr32(E1000_SRRCTL(reg_idx), srrctl);
3091
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003092 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003093 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003094
Alexander Duyck85b430b2009-10-27 15:50:29 +00003095 rxdctl |= IGB_RX_PTHRESH;
3096 rxdctl |= IGB_RX_HTHRESH << 8;
3097 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003098
3099 /* enable receive descriptor fetching */
3100 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003101 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3102}
3103
3104/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003105 * igb_configure_rx - Configure receive Unit after Reset
3106 * @adapter: board private structure
3107 *
3108 * Configure the Rx unit of the MAC after a reset.
3109 **/
3110static void igb_configure_rx(struct igb_adapter *adapter)
3111{
Hannes Eder91075842009-02-18 19:36:04 -08003112 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003113
Alexander Duyck68d480c2009-10-05 06:33:08 +00003114 /* set UTA to appropriate mode */
3115 igb_set_uta(adapter);
3116
Alexander Duyck26ad9172009-10-05 06:32:49 +00003117 /* set the correct pool for the PF default MAC address in entry 0 */
3118 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3119 adapter->vfs_allocated_count);
3120
Alexander Duyck06cf2662009-10-27 15:53:25 +00003121 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3122 * the Base and Length of the Rx Descriptor Ring */
3123 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003124 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003125}
3126
3127/**
3128 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003129 * @tx_ring: Tx descriptor ring for a specific queue
3130 *
3131 * Free all transmit software resources
3132 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003133void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003134{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003135 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003136
Alexander Duyck06034642011-08-26 07:44:22 +00003137 vfree(tx_ring->tx_buffer_info);
3138 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003139
Alexander Duyck439705e2009-10-27 23:49:20 +00003140 /* if not set, then don't free */
3141 if (!tx_ring->desc)
3142 return;
3143
Alexander Duyck59d71982010-04-27 13:09:25 +00003144 dma_free_coherent(tx_ring->dev, tx_ring->size,
3145 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003146
3147 tx_ring->desc = NULL;
3148}
3149
3150/**
3151 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3152 * @adapter: board private structure
3153 *
3154 * Free all transmit software resources
3155 **/
3156static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3157{
3158 int i;
3159
3160 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003161 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003162}
3163
Alexander Duyckebe42d12011-08-26 07:45:09 +00003164void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3165 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003166{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003167 if (tx_buffer->skb) {
3168 dev_kfree_skb_any(tx_buffer->skb);
3169 if (tx_buffer->dma)
3170 dma_unmap_single(ring->dev,
3171 tx_buffer->dma,
3172 tx_buffer->length,
3173 DMA_TO_DEVICE);
3174 } else if (tx_buffer->dma) {
3175 dma_unmap_page(ring->dev,
3176 tx_buffer->dma,
3177 tx_buffer->length,
3178 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003179 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003180 tx_buffer->next_to_watch = NULL;
3181 tx_buffer->skb = NULL;
3182 tx_buffer->dma = 0;
3183 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003184}
3185
3186/**
3187 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003188 * @tx_ring: ring to be cleaned
3189 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003190static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003191{
Alexander Duyck06034642011-08-26 07:44:22 +00003192 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003193 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003194 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003195
Alexander Duyck06034642011-08-26 07:44:22 +00003196 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003197 return;
3198 /* Free all the Tx ring sk_buffs */
3199
3200 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003201 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003202 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003203 }
3204
Alexander Duyck06034642011-08-26 07:44:22 +00003205 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3206 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003207
3208 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003209 memset(tx_ring->desc, 0, tx_ring->size);
3210
3211 tx_ring->next_to_use = 0;
3212 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003213}
3214
3215/**
3216 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3217 * @adapter: board private structure
3218 **/
3219static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3220{
3221 int i;
3222
3223 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003224 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003225}
3226
3227/**
3228 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003229 * @rx_ring: ring to clean the resources from
3230 *
3231 * Free all receive software resources
3232 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003233void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003234{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003235 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003236
Alexander Duyck06034642011-08-26 07:44:22 +00003237 vfree(rx_ring->rx_buffer_info);
3238 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003239
Alexander Duyck439705e2009-10-27 23:49:20 +00003240 /* if not set, then don't free */
3241 if (!rx_ring->desc)
3242 return;
3243
Alexander Duyck59d71982010-04-27 13:09:25 +00003244 dma_free_coherent(rx_ring->dev, rx_ring->size,
3245 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003246
3247 rx_ring->desc = NULL;
3248}
3249
3250/**
3251 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3252 * @adapter: board private structure
3253 *
3254 * Free all receive software resources
3255 **/
3256static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3257{
3258 int i;
3259
3260 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003261 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003262}
3263
3264/**
3265 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003266 * @rx_ring: ring to free buffers from
3267 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003268static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003269{
Auke Kok9d5c8242008-01-24 02:22:38 -08003270 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003271 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003272
Alexander Duyck06034642011-08-26 07:44:22 +00003273 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003274 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003275
Auke Kok9d5c8242008-01-24 02:22:38 -08003276 /* Free all the Rx ring sk_buffs */
3277 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003278 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003279 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003280 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003281 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003282 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003283 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003284 buffer_info->dma = 0;
3285 }
3286
3287 if (buffer_info->skb) {
3288 dev_kfree_skb(buffer_info->skb);
3289 buffer_info->skb = NULL;
3290 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003291 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003292 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003293 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003294 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003295 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003296 buffer_info->page_dma = 0;
3297 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003298 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003299 put_page(buffer_info->page);
3300 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003301 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003302 }
3303 }
3304
Alexander Duyck06034642011-08-26 07:44:22 +00003305 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3306 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003307
3308 /* Zero out the descriptor ring */
3309 memset(rx_ring->desc, 0, rx_ring->size);
3310
3311 rx_ring->next_to_clean = 0;
3312 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003313}
3314
3315/**
3316 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3317 * @adapter: board private structure
3318 **/
3319static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3320{
3321 int i;
3322
3323 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003324 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003325}
3326
3327/**
3328 * igb_set_mac - Change the Ethernet Address of the NIC
3329 * @netdev: network interface device structure
3330 * @p: pointer to an address structure
3331 *
3332 * Returns 0 on success, negative on failure
3333 **/
3334static int igb_set_mac(struct net_device *netdev, void *p)
3335{
3336 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003337 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003338 struct sockaddr *addr = p;
3339
3340 if (!is_valid_ether_addr(addr->sa_data))
3341 return -EADDRNOTAVAIL;
3342
3343 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003344 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003345
Alexander Duyck26ad9172009-10-05 06:32:49 +00003346 /* set the correct pool for the new PF MAC address in entry 0 */
3347 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3348 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003349
Auke Kok9d5c8242008-01-24 02:22:38 -08003350 return 0;
3351}
3352
3353/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003354 * igb_write_mc_addr_list - write multicast addresses to MTA
3355 * @netdev: network interface device structure
3356 *
3357 * Writes multicast address list to the MTA hash table.
3358 * Returns: -ENOMEM on failure
3359 * 0 on no addresses written
3360 * X on writing X addresses to MTA
3361 **/
3362static int igb_write_mc_addr_list(struct net_device *netdev)
3363{
3364 struct igb_adapter *adapter = netdev_priv(netdev);
3365 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad2010-04-01 21:22:57 +00003366 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003367 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003368 int i;
3369
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003370 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003371 /* nothing to program, so clear mc list */
3372 igb_update_mc_addr_list(hw, NULL, 0);
3373 igb_restore_vf_multicasts(adapter);
3374 return 0;
3375 }
3376
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003377 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003378 if (!mta_list)
3379 return -ENOMEM;
3380
Alexander Duyck68d480c2009-10-05 06:33:08 +00003381 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003382 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +00003383 netdev_for_each_mc_addr(ha, netdev)
3384 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003385
Alexander Duyck68d480c2009-10-05 06:33:08 +00003386 igb_update_mc_addr_list(hw, mta_list, i);
3387 kfree(mta_list);
3388
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003389 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003390}
3391
3392/**
3393 * igb_write_uc_addr_list - write unicast addresses to RAR table
3394 * @netdev: network interface device structure
3395 *
3396 * Writes unicast address list to the RAR table.
3397 * Returns: -ENOMEM on failure/insufficient address space
3398 * 0 on no addresses written
3399 * X on writing X addresses to the RAR table
3400 **/
3401static int igb_write_uc_addr_list(struct net_device *netdev)
3402{
3403 struct igb_adapter *adapter = netdev_priv(netdev);
3404 struct e1000_hw *hw = &adapter->hw;
3405 unsigned int vfn = adapter->vfs_allocated_count;
3406 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3407 int count = 0;
3408
3409 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003410 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003411 return -ENOMEM;
3412
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003413 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003414 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003415
3416 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003417 if (!rar_entries)
3418 break;
3419 igb_rar_set_qsel(adapter, ha->addr,
3420 rar_entries--,
3421 vfn);
3422 count++;
3423 }
3424 }
3425 /* write the addresses in reverse order to avoid write combining */
3426 for (; rar_entries > 0 ; rar_entries--) {
3427 wr32(E1000_RAH(rar_entries), 0);
3428 wr32(E1000_RAL(rar_entries), 0);
3429 }
3430 wrfl();
3431
3432 return count;
3433}
3434
3435/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003436 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003437 * @netdev: network interface device structure
3438 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003439 * The set_rx_mode entry point is called whenever the unicast or multicast
3440 * address lists or the network interface flags are updated. This routine is
3441 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003442 * promiscuous mode, and all-multi behavior.
3443 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003444static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003445{
3446 struct igb_adapter *adapter = netdev_priv(netdev);
3447 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003448 unsigned int vfn = adapter->vfs_allocated_count;
3449 u32 rctl, vmolr = 0;
3450 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003451
3452 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003453 rctl = rd32(E1000_RCTL);
3454
Alexander Duyck68d480c2009-10-05 06:33:08 +00003455 /* clear the effected bits */
3456 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3457
Patrick McHardy746b9f02008-07-16 20:15:45 -07003458 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003459 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003460 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003461 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003462 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003463 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003464 vmolr |= E1000_VMOLR_MPME;
3465 } else {
3466 /*
3467 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003468 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003469 * that we can at least receive multicast traffic
3470 */
3471 count = igb_write_mc_addr_list(netdev);
3472 if (count < 0) {
3473 rctl |= E1000_RCTL_MPE;
3474 vmolr |= E1000_VMOLR_MPME;
3475 } else if (count) {
3476 vmolr |= E1000_VMOLR_ROMPE;
3477 }
3478 }
3479 /*
3480 * Write addresses to available RAR registers, if there is not
3481 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003482 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003483 */
3484 count = igb_write_uc_addr_list(netdev);
3485 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003486 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003487 vmolr |= E1000_VMOLR_ROPE;
3488 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003489 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003490 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003491 wr32(E1000_RCTL, rctl);
3492
Alexander Duyck68d480c2009-10-05 06:33:08 +00003493 /*
3494 * In order to support SR-IOV and eventually VMDq it is necessary to set
3495 * the VMOLR to enable the appropriate modes. Without this workaround
3496 * we will have issues with VLAN tag stripping not being done for frames
3497 * that are only arriving because we are the default pool
3498 */
3499 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003500 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003501
Alexander Duyck68d480c2009-10-05 06:33:08 +00003502 vmolr |= rd32(E1000_VMOLR(vfn)) &
3503 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3504 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003505 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003506}
3507
Greg Rose13800462010-11-06 02:08:26 +00003508static void igb_check_wvbr(struct igb_adapter *adapter)
3509{
3510 struct e1000_hw *hw = &adapter->hw;
3511 u32 wvbr = 0;
3512
3513 switch (hw->mac.type) {
3514 case e1000_82576:
3515 case e1000_i350:
3516 if (!(wvbr = rd32(E1000_WVBR)))
3517 return;
3518 break;
3519 default:
3520 break;
3521 }
3522
3523 adapter->wvbr |= wvbr;
3524}
3525
3526#define IGB_STAGGERED_QUEUE_OFFSET 8
3527
3528static void igb_spoof_check(struct igb_adapter *adapter)
3529{
3530 int j;
3531
3532 if (!adapter->wvbr)
3533 return;
3534
3535 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3536 if (adapter->wvbr & (1 << j) ||
3537 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3538 dev_warn(&adapter->pdev->dev,
3539 "Spoof event(s) detected on VF %d\n", j);
3540 adapter->wvbr &=
3541 ~((1 << j) |
3542 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3543 }
3544 }
3545}
3546
Auke Kok9d5c8242008-01-24 02:22:38 -08003547/* Need to wait a few seconds after link up to get diagnostic information from
3548 * the phy */
3549static void igb_update_phy_info(unsigned long data)
3550{
3551 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003552 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003553}
3554
3555/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003556 * igb_has_link - check shared code for link and determine up/down
3557 * @adapter: pointer to driver private info
3558 **/
Nick Nunley31455352010-02-17 01:01:21 +00003559bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003560{
3561 struct e1000_hw *hw = &adapter->hw;
3562 bool link_active = false;
3563 s32 ret_val = 0;
3564
3565 /* get_link_status is set on LSC (link status) interrupt or
3566 * rx sequence error interrupt. get_link_status will stay
3567 * false until the e1000_check_for_link establishes link
3568 * for copper adapters ONLY
3569 */
3570 switch (hw->phy.media_type) {
3571 case e1000_media_type_copper:
3572 if (hw->mac.get_link_status) {
3573 ret_val = hw->mac.ops.check_for_link(hw);
3574 link_active = !hw->mac.get_link_status;
3575 } else {
3576 link_active = true;
3577 }
3578 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003579 case e1000_media_type_internal_serdes:
3580 ret_val = hw->mac.ops.check_for_link(hw);
3581 link_active = hw->mac.serdes_has_link;
3582 break;
3583 default:
3584 case e1000_media_type_unknown:
3585 break;
3586 }
3587
3588 return link_active;
3589}
3590
Stefan Assmann563988d2011-04-05 04:27:15 +00003591static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3592{
3593 bool ret = false;
3594 u32 ctrl_ext, thstat;
3595
3596 /* check for thermal sensor event on i350, copper only */
3597 if (hw->mac.type == e1000_i350) {
3598 thstat = rd32(E1000_THSTAT);
3599 ctrl_ext = rd32(E1000_CTRL_EXT);
3600
3601 if ((hw->phy.media_type == e1000_media_type_copper) &&
3602 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3603 ret = !!(thstat & event);
3604 }
3605 }
3606
3607 return ret;
3608}
3609
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003610/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003611 * igb_watchdog - Timer Call-back
3612 * @data: pointer to adapter cast into an unsigned long
3613 **/
3614static void igb_watchdog(unsigned long data)
3615{
3616 struct igb_adapter *adapter = (struct igb_adapter *)data;
3617 /* Do the rest outside of interrupt context */
3618 schedule_work(&adapter->watchdog_task);
3619}
3620
3621static void igb_watchdog_task(struct work_struct *work)
3622{
3623 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003624 struct igb_adapter,
3625 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003626 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003627 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003628 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003629 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003630
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003631 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003632 if (link) {
3633 if (!netif_carrier_ok(netdev)) {
3634 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003635 hw->mac.ops.get_speed_and_duplex(hw,
3636 &adapter->link_speed,
3637 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003638
3639 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003640 /* Links status message must follow this format */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003641 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3642 "Duplex, Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003643 netdev->name,
3644 adapter->link_speed,
3645 adapter->link_duplex == FULL_DUPLEX ?
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003646 "Full" : "Half",
3647 (ctrl & E1000_CTRL_TFCE) &&
3648 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3649 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3650 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
Auke Kok9d5c8242008-01-24 02:22:38 -08003651
Stefan Assmann563988d2011-04-05 04:27:15 +00003652 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003653 if (igb_thermal_sensor_event(hw,
3654 E1000_THSTAT_LINK_THROTTLE)) {
3655 netdev_info(netdev, "The network adapter link "
3656 "speed was downshifted because it "
3657 "overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003658 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003659
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003660 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003661 adapter->tx_timeout_factor = 1;
3662 switch (adapter->link_speed) {
3663 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003664 adapter->tx_timeout_factor = 14;
3665 break;
3666 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003667 /* maybe add some timeout factor ? */
3668 break;
3669 }
3670
3671 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003672
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003673 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003674 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003675
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003676 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 if (!test_bit(__IGB_DOWN, &adapter->state))
3678 mod_timer(&adapter->phy_info_timer,
3679 round_jiffies(jiffies + 2 * HZ));
3680 }
3681 } else {
3682 if (netif_carrier_ok(netdev)) {
3683 adapter->link_speed = 0;
3684 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003685
3686 /* check for thermal sensor event */
Jeff Kirsher876d2d62011-10-21 20:01:34 +00003687 if (igb_thermal_sensor_event(hw,
3688 E1000_THSTAT_PWR_DOWN)) {
3689 netdev_err(netdev, "The network adapter was "
3690 "stopped because it overheated\n");
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003691 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003692
Alexander Duyck527d47c2008-11-27 00:21:39 -08003693 /* Links status message must follow this format */
3694 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3695 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003696 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003697
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003698 igb_ping_all_vfs(adapter);
3699
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003700 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003701 if (!test_bit(__IGB_DOWN, &adapter->state))
3702 mod_timer(&adapter->phy_info_timer,
3703 round_jiffies(jiffies + 2 * HZ));
3704 }
3705 }
3706
Eric Dumazet12dcd862010-10-15 17:27:10 +00003707 spin_lock(&adapter->stats64_lock);
3708 igb_update_stats(adapter, &adapter->stats64);
3709 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003710
Alexander Duyckdbabb062009-11-12 18:38:16 +00003711 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003712 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003713 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003714 /* We've lost link, so the controller stops DMA,
3715 * but we've got queued Tx work that's never going
3716 * to get done, so reset controller to flush Tx.
3717 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003718 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3719 adapter->tx_timeout_count++;
3720 schedule_work(&adapter->reset_task);
3721 /* return immediately since reset is imminent */
3722 return;
3723 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003724 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003725
Alexander Duyckdbabb062009-11-12 18:38:16 +00003726 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003727 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003728 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003729
Auke Kok9d5c8242008-01-24 02:22:38 -08003730 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003731 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003732 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003733 for (i = 0; i < adapter->num_q_vectors; i++)
3734 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003735 wr32(E1000_EICS, eics);
3736 } else {
3737 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3738 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003739
Greg Rose13800462010-11-06 02:08:26 +00003740 igb_spoof_check(adapter);
3741
Auke Kok9d5c8242008-01-24 02:22:38 -08003742 /* Reset the timer */
3743 if (!test_bit(__IGB_DOWN, &adapter->state))
3744 mod_timer(&adapter->watchdog_timer,
3745 round_jiffies(jiffies + 2 * HZ));
3746}
3747
3748enum latency_range {
3749 lowest_latency = 0,
3750 low_latency = 1,
3751 bulk_latency = 2,
3752 latency_invalid = 255
3753};
3754
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003755/**
3756 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3757 *
3758 * Stores a new ITR value based on strictly on packet size. This
3759 * algorithm is less sophisticated than that used in igb_update_itr,
3760 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003761 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003762 * were determined based on theoretical maximum wire speed and testing
3763 * data, in order to minimize response time while increasing bulk
3764 * throughput.
3765 * This functionality is controlled by the InterruptThrottleRate module
3766 * parameter (see igb_param.c)
3767 * NOTE: This function is called only when operating in a multiqueue
3768 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003769 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003770 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003771static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003772{
Alexander Duyck047e0032009-10-27 15:49:27 +00003773 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003774 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003775 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003776 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003777
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003778 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3779 * ints/sec - ITR timer value of 120 ticks.
3780 */
3781 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003782 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003783 goto set_itr_val;
3784 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003785
Alexander Duyck0ba82992011-08-26 07:45:47 +00003786 packets = q_vector->rx.total_packets;
3787 if (packets)
3788 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003789
Alexander Duyck0ba82992011-08-26 07:45:47 +00003790 packets = q_vector->tx.total_packets;
3791 if (packets)
3792 avg_wire_size = max_t(u32, avg_wire_size,
3793 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003794
3795 /* if avg_wire_size isn't set no work was done */
3796 if (!avg_wire_size)
3797 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003798
3799 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3800 avg_wire_size += 24;
3801
3802 /* Don't starve jumbo frames */
3803 avg_wire_size = min(avg_wire_size, 3000);
3804
3805 /* Give a little boost to mid-size frames */
3806 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3807 new_val = avg_wire_size / 3;
3808 else
3809 new_val = avg_wire_size / 2;
3810
Alexander Duyck0ba82992011-08-26 07:45:47 +00003811 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3812 if (new_val < IGB_20K_ITR &&
3813 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3814 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3815 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003816
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003817set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003818 if (new_val != q_vector->itr_val) {
3819 q_vector->itr_val = new_val;
3820 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003821 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003822clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003823 q_vector->rx.total_bytes = 0;
3824 q_vector->rx.total_packets = 0;
3825 q_vector->tx.total_bytes = 0;
3826 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003827}
3828
3829/**
3830 * igb_update_itr - update the dynamic ITR value based on statistics
3831 * Stores a new ITR value based on packets and byte
3832 * counts during the last interrupt. The advantage of per interrupt
3833 * computation is faster updates and more accurate ITR for the current
3834 * traffic pattern. Constants in this function were computed
3835 * based on theoretical maximum wire speed and thresholds were set based
3836 * on testing data as well as attempting to minimize response time
3837 * while increasing bulk throughput.
3838 * this functionality is controlled by the InterruptThrottleRate module
3839 * parameter (see igb_param.c)
3840 * NOTE: These calculations are only valid when operating in a single-
3841 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003842 * @q_vector: pointer to q_vector
3843 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003844 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003845static void igb_update_itr(struct igb_q_vector *q_vector,
3846 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003847{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003848 unsigned int packets = ring_container->total_packets;
3849 unsigned int bytes = ring_container->total_bytes;
3850 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003851
Alexander Duyck0ba82992011-08-26 07:45:47 +00003852 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003853 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003854 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003855
Alexander Duyck0ba82992011-08-26 07:45:47 +00003856 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003857 case lowest_latency:
3858 /* handle TSO and jumbo frames */
3859 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003860 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003861 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003862 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003863 break;
3864 case low_latency: /* 50 usec aka 20000 ints/s */
3865 if (bytes > 10000) {
3866 /* this if handles the TSO accounting */
3867 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003868 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003869 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003870 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003871 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003872 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003873 }
3874 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003875 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003876 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003877 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003878 }
3879 break;
3880 case bulk_latency: /* 250 usec aka 4000 ints/s */
3881 if (bytes > 25000) {
3882 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003883 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003884 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003885 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003886 }
3887 break;
3888 }
3889
Alexander Duyck0ba82992011-08-26 07:45:47 +00003890 /* clear work counters since we have the values we need */
3891 ring_container->total_bytes = 0;
3892 ring_container->total_packets = 0;
3893
3894 /* write updated itr to ring container */
3895 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003896}
3897
Alexander Duyck0ba82992011-08-26 07:45:47 +00003898static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003899{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003900 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003901 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003902 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003903
3904 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3905 if (adapter->link_speed != SPEED_1000) {
3906 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 goto set_itr_now;
3909 }
3910
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 igb_update_itr(q_vector, &q_vector->tx);
3912 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003913
Alexander Duyck0ba82992011-08-26 07:45:47 +00003914 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003915
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003916 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 if (current_itr == lowest_latency &&
3918 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3919 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003920 current_itr = low_latency;
3921
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 switch (current_itr) {
3923 /* counts and packets in update_itr are dependent on these numbers */
3924 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003925 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003926 break;
3927 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003928 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003929 break;
3930 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003931 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003932 break;
3933 default:
3934 break;
3935 }
3936
3937set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003938 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003939 /* this attempts to bias the interrupt rate towards Bulk
3940 * by adding intermediate steps when interrupt rate is
3941 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003942 new_itr = new_itr > q_vector->itr_val ?
3943 max((new_itr * q_vector->itr_val) /
3944 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003945 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003946 new_itr;
3947 /* Don't write the value here; it resets the adapter's
3948 * internal timer, and causes us to delay far longer than
3949 * we should between interrupts. Instead, we write the ITR
3950 * value at the beginning of the next interrupt so the timing
3951 * ends up being correct.
3952 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003953 q_vector->itr_val = new_itr;
3954 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003955 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003956}
3957
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003958void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3959 u32 type_tucmd, u32 mss_l4len_idx)
3960{
3961 struct e1000_adv_tx_context_desc *context_desc;
3962 u16 i = tx_ring->next_to_use;
3963
3964 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3965
3966 i++;
3967 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3968
3969 /* set bits to identify this as an advanced context descriptor */
3970 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3971
3972 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00003973 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003974 mss_l4len_idx |= tx_ring->reg_idx << 4;
3975
3976 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3977 context_desc->seqnum_seed = 0;
3978 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3979 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3980}
3981
Alexander Duyck7af40ad2011-08-26 07:45:15 +00003982static int igb_tso(struct igb_ring *tx_ring,
3983 struct igb_tx_buffer *first,
3984 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003985{
Alexander Duyck7af40ad2011-08-26 07:45:15 +00003986 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003987 u32 vlan_macip_lens, type_tucmd;
3988 u32 mss_l4len_idx, l4len;
3989
3990 if (!skb_is_gso(skb))
3991 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003992
3993 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad2011-08-26 07:45:15 +00003994 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08003995 if (err)
3996 return err;
3997 }
3998
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003999 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4000 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004001
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004002 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004003 struct iphdr *iph = ip_hdr(skb);
4004 iph->tot_len = 0;
4005 iph->check = 0;
4006 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4007 iph->daddr, 0,
4008 IPPROTO_TCP,
4009 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004010 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004011 first->tx_flags |= IGB_TX_FLAGS_TSO |
4012 IGB_TX_FLAGS_CSUM |
4013 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004014 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004015 ipv6_hdr(skb)->payload_len = 0;
4016 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4017 &ipv6_hdr(skb)->daddr,
4018 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004019 first->tx_flags |= IGB_TX_FLAGS_TSO |
4020 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004021 }
4022
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004023 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004024 l4len = tcp_hdrlen(skb);
4025 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004026
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004027 /* update gso size and bytecount with header size */
4028 first->gso_segs = skb_shinfo(skb)->gso_segs;
4029 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4030
Auke Kok9d5c8242008-01-24 02:22:38 -08004031 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004032 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4033 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004034
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004035 /* VLAN MACLEN IPLEN */
4036 vlan_macip_lens = skb_network_header_len(skb);
4037 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004038 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004039
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004040 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004041
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004042 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004043}
4044
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004045static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004046{
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004047 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004048 u32 vlan_macip_lens = 0;
4049 u32 mss_l4len_idx = 0;
4050 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004051
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004052 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004053 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4054 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004055 } else {
4056 u8 l4_hdr = 0;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004057 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004058 case __constant_htons(ETH_P_IP):
4059 vlan_macip_lens |= skb_network_header_len(skb);
4060 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4061 l4_hdr = ip_hdr(skb)->protocol;
4062 break;
4063 case __constant_htons(ETH_P_IPV6):
4064 vlan_macip_lens |= skb_network_header_len(skb);
4065 l4_hdr = ipv6_hdr(skb)->nexthdr;
4066 break;
4067 default:
4068 if (unlikely(net_ratelimit())) {
4069 dev_warn(tx_ring->dev,
4070 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004071 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004072 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004073 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004074 }
4075
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004076 switch (l4_hdr) {
4077 case IPPROTO_TCP:
4078 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4079 mss_l4len_idx = tcp_hdrlen(skb) <<
4080 E1000_ADVTXD_L4LEN_SHIFT;
4081 break;
4082 case IPPROTO_SCTP:
4083 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4084 mss_l4len_idx = sizeof(struct sctphdr) <<
4085 E1000_ADVTXD_L4LEN_SHIFT;
4086 break;
4087 case IPPROTO_UDP:
4088 mss_l4len_idx = sizeof(struct udphdr) <<
4089 E1000_ADVTXD_L4LEN_SHIFT;
4090 break;
4091 default:
4092 if (unlikely(net_ratelimit())) {
4093 dev_warn(tx_ring->dev,
4094 "partial checksum but l4 proto=%x!\n",
4095 l4_hdr);
4096 }
4097 break;
4098 }
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004099
4100 /* update TX checksum flag */
4101 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004102 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004103
4104 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004105 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004106
4107 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004108}
4109
Alexander Duycke032afc2011-08-26 07:44:48 +00004110static __le32 igb_tx_cmd_type(u32 tx_flags)
4111{
4112 /* set type for advanced descriptor with frame checksum insertion */
4113 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4114 E1000_ADVTXD_DCMD_IFCS |
4115 E1000_ADVTXD_DCMD_DEXT);
4116
4117 /* set HW vlan bit if vlan is present */
4118 if (tx_flags & IGB_TX_FLAGS_VLAN)
4119 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4120
4121 /* set timestamp bit if present */
4122 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4123 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4124
4125 /* set segmentation bits for TSO */
4126 if (tx_flags & IGB_TX_FLAGS_TSO)
4127 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4128
4129 return cmd_type;
4130}
4131
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004132static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4133 union e1000_adv_tx_desc *tx_desc,
4134 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004135{
4136 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4137
4138 /* 82575 requires a unique index per ring if any offload is enabled */
4139 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004140 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004141 olinfo_status |= tx_ring->reg_idx << 4;
4142
4143 /* insert L4 checksum */
4144 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4145 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4146
4147 /* insert IPv4 checksum */
4148 if (tx_flags & IGB_TX_FLAGS_IPV4)
4149 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4150 }
4151
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004152 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004153}
4154
Alexander Duyckebe42d12011-08-26 07:45:09 +00004155/*
4156 * The largest size we can write to the descriptor is 65535. In order to
4157 * maintain a power of two alignment we have to limit ourselves to 32K.
4158 */
4159#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004160#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004161
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004162static void igb_tx_map(struct igb_ring *tx_ring,
4163 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004164 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004165{
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004166 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004167 struct igb_tx_buffer *tx_buffer_info;
4168 union e1000_adv_tx_desc *tx_desc;
4169 dma_addr_t dma;
4170 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4171 unsigned int data_len = skb->data_len;
4172 unsigned int size = skb_headlen(skb);
4173 unsigned int paylen = skb->len - hdr_len;
4174 __le32 cmd_type;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004175 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004176 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004177
4178 tx_desc = IGB_TX_DESC(tx_ring, i);
4179
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004180 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004181 cmd_type = igb_tx_cmd_type(tx_flags);
4182
4183 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4184 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004185 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004186
Alexander Duyckebe42d12011-08-26 07:45:09 +00004187 /* record length, and DMA address */
4188 first->length = size;
4189 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004190 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004191
Alexander Duyckebe42d12011-08-26 07:45:09 +00004192 for (;;) {
4193 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4194 tx_desc->read.cmd_type_len =
4195 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004196
Alexander Duyckebe42d12011-08-26 07:45:09 +00004197 i++;
4198 tx_desc++;
4199 if (i == tx_ring->count) {
4200 tx_desc = IGB_TX_DESC(tx_ring, 0);
4201 i = 0;
4202 }
4203
4204 dma += IGB_MAX_DATA_PER_TXD;
4205 size -= IGB_MAX_DATA_PER_TXD;
4206
4207 tx_desc->read.olinfo_status = 0;
4208 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4209 }
4210
4211 if (likely(!data_len))
4212 break;
4213
4214 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4215
Alexander Duyck65689fe2009-03-20 00:17:43 +00004216 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004217 tx_desc++;
4218 if (i == tx_ring->count) {
4219 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004220 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004221 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004222
Eric Dumazet9e903e02011-10-18 21:00:24 +00004223 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004224 data_len -= size;
4225
4226 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4227 size, DMA_TO_DEVICE);
4228 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004229 goto dma_error;
4230
Alexander Duyckebe42d12011-08-26 07:45:09 +00004231 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4232 tx_buffer_info->length = size;
4233 tx_buffer_info->dma = dma;
4234
4235 tx_desc->read.olinfo_status = 0;
4236 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4237
4238 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004239 }
4240
Alexander Duyckebe42d12011-08-26 07:45:09 +00004241 /* write last descriptor with RS and EOP bits */
4242 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4243 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004244
4245 /* set the timestamp */
4246 first->time_stamp = jiffies;
4247
Alexander Duyckebe42d12011-08-26 07:45:09 +00004248 /*
4249 * Force memory writes to complete before letting h/w know there
4250 * are new descriptors to fetch. (Only applicable for weak-ordered
4251 * memory model archs, such as IA-64).
4252 *
4253 * We also need this memory barrier to make certain all of the
4254 * status bits have been updated before next_to_watch is written.
4255 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004256 wmb();
4257
Alexander Duyckebe42d12011-08-26 07:45:09 +00004258 /* set next_to_watch value indicating a packet is present */
4259 first->next_to_watch = tx_desc;
4260
4261 i++;
4262 if (i == tx_ring->count)
4263 i = 0;
4264
Auke Kok9d5c8242008-01-24 02:22:38 -08004265 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004266
Alexander Duyckfce99e32009-10-27 15:51:27 +00004267 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004268
Auke Kok9d5c8242008-01-24 02:22:38 -08004269 /* we need this if more than one processor can write to our tail
4270 * at a time, it syncronizes IO on IA64/Altix systems */
4271 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004272
4273 return;
4274
4275dma_error:
4276 dev_err(tx_ring->dev, "TX DMA map failed\n");
4277
4278 /* clear dma mappings for failed tx_buffer_info map */
4279 for (;;) {
4280 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4281 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4282 if (tx_buffer_info == first)
4283 break;
4284 if (i == 0)
4285 i = tx_ring->count;
4286 i--;
4287 }
4288
4289 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004290}
4291
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004292static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004293{
Alexander Duycke694e962009-10-27 15:53:06 +00004294 struct net_device *netdev = tx_ring->netdev;
4295
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004296 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004297
Auke Kok9d5c8242008-01-24 02:22:38 -08004298 /* Herbert's original patch had:
4299 * smp_mb__after_netif_stop_queue();
4300 * but since that doesn't exist yet, just open code it. */
4301 smp_mb();
4302
4303 /* We need to check again in a case another CPU has just
4304 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004305 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004306 return -EBUSY;
4307
4308 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004309 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004310
4311 u64_stats_update_begin(&tx_ring->tx_syncp2);
4312 tx_ring->tx_stats.restart_queue2++;
4313 u64_stats_update_end(&tx_ring->tx_syncp2);
4314
Auke Kok9d5c8242008-01-24 02:22:38 -08004315 return 0;
4316}
4317
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004318static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004319{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004320 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004321 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004322 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004323}
4324
Alexander Duyckcd392f52011-08-26 07:43:59 +00004325netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4326 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004327{
Alexander Duyck8542db02011-08-26 07:44:43 +00004328 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004329 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004330 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004331 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004332 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004333
Auke Kok9d5c8242008-01-24 02:22:38 -08004334 /* need: 1 descriptor per page,
4335 * + 2 desc gap to keep tail from touching head,
4336 * + 1 desc for skb->data,
4337 * + 1 desc for context descriptor,
4338 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004339 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004340 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004341 return NETDEV_TX_BUSY;
4342 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004343
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004344 /* record the location of the first descriptor for this packet */
4345 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4346 first->skb = skb;
4347 first->bytecount = skb->len;
4348 first->gso_segs = 1;
4349
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004350 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4351 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004352 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004353 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004354
Jesse Grosseab6d182010-10-20 13:56:03 +00004355 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004356 tx_flags |= IGB_TX_FLAGS_VLAN;
4357 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4358 }
4359
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004360 /* record initial flags and protocol */
4361 first->tx_flags = tx_flags;
4362 first->protocol = protocol;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004363
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004364 tso = igb_tso(tx_ring, first, &hdr_len);
4365 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004366 goto out_drop;
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004367 else if (!tso)
4368 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004369
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004370 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004371
4372 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004373 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004374
Auke Kok9d5c8242008-01-24 02:22:38 -08004375 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004376
4377out_drop:
Alexander Duyck7af40ad2011-08-26 07:45:15 +00004378 igb_unmap_and_free_tx_resource(tx_ring, first);
4379
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004380 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004381}
4382
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004383static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4384 struct sk_buff *skb)
4385{
4386 unsigned int r_idx = skb->queue_mapping;
4387
4388 if (r_idx >= adapter->num_tx_queues)
4389 r_idx = r_idx % adapter->num_tx_queues;
4390
4391 return adapter->tx_ring[r_idx];
4392}
4393
Alexander Duyckcd392f52011-08-26 07:43:59 +00004394static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4395 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004396{
4397 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004398
4399 if (test_bit(__IGB_DOWN, &adapter->state)) {
4400 dev_kfree_skb_any(skb);
4401 return NETDEV_TX_OK;
4402 }
4403
4404 if (skb->len <= 0) {
4405 dev_kfree_skb_any(skb);
4406 return NETDEV_TX_OK;
4407 }
4408
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004409 /*
4410 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4411 * in order to meet this minimum size requirement.
4412 */
4413 if (skb->len < 17) {
4414 if (skb_padto(skb, 17))
4415 return NETDEV_TX_OK;
4416 skb->len = 17;
4417 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004418
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004419 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004420}
4421
4422/**
4423 * igb_tx_timeout - Respond to a Tx Hang
4424 * @netdev: network interface device structure
4425 **/
4426static void igb_tx_timeout(struct net_device *netdev)
4427{
4428 struct igb_adapter *adapter = netdev_priv(netdev);
4429 struct e1000_hw *hw = &adapter->hw;
4430
4431 /* Do the reset outside of interrupt context */
4432 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004433
Alexander Duyck06218a82011-08-26 07:46:55 +00004434 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004435 hw->dev_spec._82575.global_device_reset = true;
4436
Auke Kok9d5c8242008-01-24 02:22:38 -08004437 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004438 wr32(E1000_EICS,
4439 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004440}
4441
4442static void igb_reset_task(struct work_struct *work)
4443{
4444 struct igb_adapter *adapter;
4445 adapter = container_of(work, struct igb_adapter, reset_task);
4446
Taku Izumic97ec422010-04-27 14:39:30 +00004447 igb_dump(adapter);
4448 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004449 igb_reinit_locked(adapter);
4450}
4451
4452/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004453 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004454 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004455 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004456 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004457 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004458static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4459 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004460{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004461 struct igb_adapter *adapter = netdev_priv(netdev);
4462
4463 spin_lock(&adapter->stats64_lock);
4464 igb_update_stats(adapter, &adapter->stats64);
4465 memcpy(stats, &adapter->stats64, sizeof(*stats));
4466 spin_unlock(&adapter->stats64_lock);
4467
4468 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004469}
4470
4471/**
4472 * igb_change_mtu - Change the Maximum Transfer Unit
4473 * @netdev: network interface device structure
4474 * @new_mtu: new value for maximum frame size
4475 *
4476 * Returns 0 on success, negative on failure
4477 **/
4478static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4479{
4480 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004481 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004482 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004483
Alexander Duyckc809d222009-10-27 23:52:13 +00004484 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004485 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004486 return -EINVAL;
4487 }
4488
Alexander Duyck153285f2011-08-26 07:43:32 +00004489#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004490 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004491 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004492 return -EINVAL;
4493 }
4494
4495 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4496 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004497
Auke Kok9d5c8242008-01-24 02:22:38 -08004498 /* igb_down has a dependency on max_frame_size */
4499 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004500
Alexander Duyck4c844852009-10-27 15:52:07 +00004501 if (netif_running(netdev))
4502 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004503
Alexander Duyck090b1792009-10-27 23:51:55 +00004504 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004505 netdev->mtu, new_mtu);
4506 netdev->mtu = new_mtu;
4507
4508 if (netif_running(netdev))
4509 igb_up(adapter);
4510 else
4511 igb_reset(adapter);
4512
4513 clear_bit(__IGB_RESETTING, &adapter->state);
4514
4515 return 0;
4516}
4517
4518/**
4519 * igb_update_stats - Update the board statistics counters
4520 * @adapter: board private structure
4521 **/
4522
Eric Dumazet12dcd862010-10-15 17:27:10 +00004523void igb_update_stats(struct igb_adapter *adapter,
4524 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004525{
4526 struct e1000_hw *hw = &adapter->hw;
4527 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004528 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004529 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004530 int i;
4531 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004532 unsigned int start;
4533 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004534
4535#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4536
4537 /*
4538 * Prevent stats update while adapter is being reset, or if the pci
4539 * connection is down.
4540 */
4541 if (adapter->link_speed == 0)
4542 return;
4543 if (pci_channel_offline(pdev))
4544 return;
4545
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004546 bytes = 0;
4547 packets = 0;
4548 for (i = 0; i < adapter->num_rx_queues; i++) {
4549 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004550 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004551
Alexander Duyck3025a442010-02-17 01:02:39 +00004552 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004553 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004554
4555 do {
4556 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4557 _bytes = ring->rx_stats.bytes;
4558 _packets = ring->rx_stats.packets;
4559 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4560 bytes += _bytes;
4561 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004562 }
4563
Alexander Duyck128e45e2009-11-12 18:37:38 +00004564 net_stats->rx_bytes = bytes;
4565 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004566
4567 bytes = 0;
4568 packets = 0;
4569 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004570 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004571 do {
4572 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4573 _bytes = ring->tx_stats.bytes;
4574 _packets = ring->tx_stats.packets;
4575 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4576 bytes += _bytes;
4577 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004578 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004579 net_stats->tx_bytes = bytes;
4580 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004581
4582 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004583 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4584 adapter->stats.gprc += rd32(E1000_GPRC);
4585 adapter->stats.gorc += rd32(E1000_GORCL);
4586 rd32(E1000_GORCH); /* clear GORCL */
4587 adapter->stats.bprc += rd32(E1000_BPRC);
4588 adapter->stats.mprc += rd32(E1000_MPRC);
4589 adapter->stats.roc += rd32(E1000_ROC);
4590
4591 adapter->stats.prc64 += rd32(E1000_PRC64);
4592 adapter->stats.prc127 += rd32(E1000_PRC127);
4593 adapter->stats.prc255 += rd32(E1000_PRC255);
4594 adapter->stats.prc511 += rd32(E1000_PRC511);
4595 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4596 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4597 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4598 adapter->stats.sec += rd32(E1000_SEC);
4599
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004600 mpc = rd32(E1000_MPC);
4601 adapter->stats.mpc += mpc;
4602 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004603 adapter->stats.scc += rd32(E1000_SCC);
4604 adapter->stats.ecol += rd32(E1000_ECOL);
4605 adapter->stats.mcc += rd32(E1000_MCC);
4606 adapter->stats.latecol += rd32(E1000_LATECOL);
4607 adapter->stats.dc += rd32(E1000_DC);
4608 adapter->stats.rlec += rd32(E1000_RLEC);
4609 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4610 adapter->stats.xontxc += rd32(E1000_XONTXC);
4611 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4612 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4613 adapter->stats.fcruc += rd32(E1000_FCRUC);
4614 adapter->stats.gptc += rd32(E1000_GPTC);
4615 adapter->stats.gotc += rd32(E1000_GOTCL);
4616 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004617 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004618 adapter->stats.ruc += rd32(E1000_RUC);
4619 adapter->stats.rfc += rd32(E1000_RFC);
4620 adapter->stats.rjc += rd32(E1000_RJC);
4621 adapter->stats.tor += rd32(E1000_TORH);
4622 adapter->stats.tot += rd32(E1000_TOTH);
4623 adapter->stats.tpr += rd32(E1000_TPR);
4624
4625 adapter->stats.ptc64 += rd32(E1000_PTC64);
4626 adapter->stats.ptc127 += rd32(E1000_PTC127);
4627 adapter->stats.ptc255 += rd32(E1000_PTC255);
4628 adapter->stats.ptc511 += rd32(E1000_PTC511);
4629 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4630 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4631
4632 adapter->stats.mptc += rd32(E1000_MPTC);
4633 adapter->stats.bptc += rd32(E1000_BPTC);
4634
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004635 adapter->stats.tpt += rd32(E1000_TPT);
4636 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004637
4638 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004639 /* read internal phy specific stats */
4640 reg = rd32(E1000_CTRL_EXT);
4641 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4642 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4643 adapter->stats.tncrs += rd32(E1000_TNCRS);
4644 }
4645
Auke Kok9d5c8242008-01-24 02:22:38 -08004646 adapter->stats.tsctc += rd32(E1000_TSCTC);
4647 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4648
4649 adapter->stats.iac += rd32(E1000_IAC);
4650 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4651 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4652 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4653 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4654 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4655 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4656 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4657 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4658
4659 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004660 net_stats->multicast = adapter->stats.mprc;
4661 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004662
4663 /* Rx Errors */
4664
4665 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004666 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004667 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004668 adapter->stats.crcerrs + adapter->stats.algnerrc +
4669 adapter->stats.ruc + adapter->stats.roc +
4670 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004671 net_stats->rx_length_errors = adapter->stats.ruc +
4672 adapter->stats.roc;
4673 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4674 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4675 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004676
4677 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004678 net_stats->tx_errors = adapter->stats.ecol +
4679 adapter->stats.latecol;
4680 net_stats->tx_aborted_errors = adapter->stats.ecol;
4681 net_stats->tx_window_errors = adapter->stats.latecol;
4682 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004683
4684 /* Tx Dropped needs to be maintained elsewhere */
4685
4686 /* Phy Stats */
4687 if (hw->phy.media_type == e1000_media_type_copper) {
4688 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004689 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004690 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4691 adapter->phy_stats.idle_errors += phy_tmp;
4692 }
4693 }
4694
4695 /* Management Stats */
4696 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4697 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4698 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004699
4700 /* OS2BMC Stats */
4701 reg = rd32(E1000_MANC);
4702 if (reg & E1000_MANC_EN_BMC2OS) {
4703 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4704 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4705 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4706 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4707 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004708}
4709
Auke Kok9d5c8242008-01-24 02:22:38 -08004710static irqreturn_t igb_msix_other(int irq, void *data)
4711{
Alexander Duyck047e0032009-10-27 15:49:27 +00004712 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004713 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004714 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004715 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004716
Alexander Duyck7f081d42010-01-07 17:41:00 +00004717 if (icr & E1000_ICR_DRSTA)
4718 schedule_work(&adapter->reset_task);
4719
Alexander Duyck047e0032009-10-27 15:49:27 +00004720 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004721 /* HW is reporting DMA is out of sync */
4722 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004723 /* The DMA Out of Sync is also indication of a spoof event
4724 * in IOV mode. Check the Wrong VM Behavior register to
4725 * see if it is really a spoof event. */
4726 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004727 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004728
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004729 /* Check for a mailbox event */
4730 if (icr & E1000_ICR_VMMB)
4731 igb_msg_task(adapter);
4732
4733 if (icr & E1000_ICR_LSC) {
4734 hw->mac.get_link_status = 1;
4735 /* guard against interrupt when we're going down */
4736 if (!test_bit(__IGB_DOWN, &adapter->state))
4737 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4738 }
4739
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004740 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004741
4742 return IRQ_HANDLED;
4743}
4744
Alexander Duyck047e0032009-10-27 15:49:27 +00004745static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004746{
Alexander Duyck26b39272010-02-17 01:00:41 +00004747 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004748 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004749
Alexander Duyck047e0032009-10-27 15:49:27 +00004750 if (!q_vector->set_itr)
4751 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004752
Alexander Duyck047e0032009-10-27 15:49:27 +00004753 if (!itr_val)
4754 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004755
Alexander Duyck26b39272010-02-17 01:00:41 +00004756 if (adapter->hw.mac.type == e1000_82575)
4757 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004758 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004759 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004760
4761 writel(itr_val, q_vector->itr_register);
4762 q_vector->set_itr = 0;
4763}
4764
4765static irqreturn_t igb_msix_ring(int irq, void *data)
4766{
4767 struct igb_q_vector *q_vector = data;
4768
4769 /* Write the ITR value calculated from the previous interrupt. */
4770 igb_write_itr(q_vector);
4771
4772 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004773
Auke Kok9d5c8242008-01-24 02:22:38 -08004774 return IRQ_HANDLED;
4775}
4776
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004777#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004778static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004779{
Alexander Duyck047e0032009-10-27 15:49:27 +00004780 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004781 struct e1000_hw *hw = &adapter->hw;
4782 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004783
Alexander Duyck047e0032009-10-27 15:49:27 +00004784 if (q_vector->cpu == cpu)
4785 goto out_no_update;
4786
Alexander Duyck0ba82992011-08-26 07:45:47 +00004787 if (q_vector->tx.ring) {
4788 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004789 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4790 if (hw->mac.type == e1000_82575) {
4791 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4792 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4793 } else {
4794 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4795 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4796 E1000_DCA_TXCTRL_CPUID_SHIFT;
4797 }
4798 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4799 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4800 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004801 if (q_vector->rx.ring) {
4802 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004803 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4804 if (hw->mac.type == e1000_82575) {
4805 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4806 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4807 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004808 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004809 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004810 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004811 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004812 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4813 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4814 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4815 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004816 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004817 q_vector->cpu = cpu;
4818out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004819 put_cpu();
4820}
4821
4822static void igb_setup_dca(struct igb_adapter *adapter)
4823{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004824 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004825 int i;
4826
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004827 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004828 return;
4829
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004830 /* Always use CB2 mode, difference is masked in the CB driver. */
4831 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4832
Alexander Duyck047e0032009-10-27 15:49:27 +00004833 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004834 adapter->q_vector[i]->cpu = -1;
4835 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004836 }
4837}
4838
4839static int __igb_notify_dca(struct device *dev, void *data)
4840{
4841 struct net_device *netdev = dev_get_drvdata(dev);
4842 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004843 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004844 struct e1000_hw *hw = &adapter->hw;
4845 unsigned long event = *(unsigned long *)data;
4846
4847 switch (event) {
4848 case DCA_PROVIDER_ADD:
4849 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004850 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004851 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004852 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004853 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004854 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004855 igb_setup_dca(adapter);
4856 break;
4857 }
4858 /* Fall Through since DCA is disabled. */
4859 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004860 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004861 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004862 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004863 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004864 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004865 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004866 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004867 }
4868 break;
4869 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004870
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004871 return 0;
4872}
4873
4874static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4875 void *p)
4876{
4877 int ret_val;
4878
4879 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4880 __igb_notify_dca);
4881
4882 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4883}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004884#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004885
Greg Rose0224d662011-10-14 02:57:14 +00004886#ifdef CONFIG_PCI_IOV
4887static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4888{
4889 unsigned char mac_addr[ETH_ALEN];
4890 struct pci_dev *pdev = adapter->pdev;
4891 struct e1000_hw *hw = &adapter->hw;
4892 struct pci_dev *pvfdev;
4893 unsigned int device_id;
4894 u16 thisvf_devfn;
4895
4896 random_ether_addr(mac_addr);
4897 igb_set_vf_mac(adapter, vf, mac_addr);
4898
4899 switch (adapter->hw.mac.type) {
4900 case e1000_82576:
4901 device_id = IGB_82576_VF_DEV_ID;
4902 /* VF Stride for 82576 is 2 */
4903 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4904 (pdev->devfn & 1);
4905 break;
4906 case e1000_i350:
4907 device_id = IGB_I350_VF_DEV_ID;
4908 /* VF Stride for I350 is 4 */
4909 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4910 (pdev->devfn & 3);
4911 break;
4912 default:
4913 device_id = 0;
4914 thisvf_devfn = 0;
4915 break;
4916 }
4917
4918 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4919 while (pvfdev) {
4920 if (pvfdev->devfn == thisvf_devfn)
4921 break;
4922 pvfdev = pci_get_device(hw->vendor_id,
4923 device_id, pvfdev);
4924 }
4925
4926 if (pvfdev)
4927 adapter->vf_data[vf].vfdev = pvfdev;
4928 else
4929 dev_err(&pdev->dev,
4930 "Couldn't find pci dev ptr for VF %4.4x\n",
4931 thisvf_devfn);
4932 return pvfdev != NULL;
4933}
4934
4935static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4936{
4937 struct e1000_hw *hw = &adapter->hw;
4938 struct pci_dev *pdev = adapter->pdev;
4939 struct pci_dev *pvfdev;
4940 u16 vf_devfn = 0;
4941 u16 vf_stride;
4942 unsigned int device_id;
4943 int vfs_found = 0;
4944
4945 switch (adapter->hw.mac.type) {
4946 case e1000_82576:
4947 device_id = IGB_82576_VF_DEV_ID;
4948 /* VF Stride for 82576 is 2 */
4949 vf_stride = 2;
4950 break;
4951 case e1000_i350:
4952 device_id = IGB_I350_VF_DEV_ID;
4953 /* VF Stride for I350 is 4 */
4954 vf_stride = 4;
4955 break;
4956 default:
4957 device_id = 0;
4958 vf_stride = 0;
4959 break;
4960 }
4961
4962 vf_devfn = pdev->devfn + 0x80;
4963 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4964 while (pvfdev) {
4965 if (pvfdev->devfn == vf_devfn)
4966 vfs_found++;
4967 vf_devfn += vf_stride;
4968 pvfdev = pci_get_device(hw->vendor_id,
4969 device_id, pvfdev);
4970 }
4971
4972 return vfs_found;
4973}
4974
4975static int igb_check_vf_assignment(struct igb_adapter *adapter)
4976{
4977 int i;
4978 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4979 if (adapter->vf_data[i].vfdev) {
4980 if (adapter->vf_data[i].vfdev->dev_flags &
4981 PCI_DEV_FLAGS_ASSIGNED)
4982 return true;
4983 }
4984 }
4985 return false;
4986}
4987
4988#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004989static void igb_ping_all_vfs(struct igb_adapter *adapter)
4990{
4991 struct e1000_hw *hw = &adapter->hw;
4992 u32 ping;
4993 int i;
4994
4995 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4996 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004997 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004998 ping |= E1000_VT_MSGTYPE_CTS;
4999 igb_write_mbx(hw, &ping, 1, i);
5000 }
5001}
5002
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005003static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5004{
5005 struct e1000_hw *hw = &adapter->hw;
5006 u32 vmolr = rd32(E1000_VMOLR(vf));
5007 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5008
Alexander Duyckd85b90042010-09-22 17:56:20 +00005009 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005010 IGB_VF_FLAG_MULTI_PROMISC);
5011 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5012
5013 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5014 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005015 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005016 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5017 } else {
5018 /*
5019 * if we have hashes and we are clearing a multicast promisc
5020 * flag we need to write the hashes to the MTA as this step
5021 * was previously skipped
5022 */
5023 if (vf_data->num_vf_mc_hashes > 30) {
5024 vmolr |= E1000_VMOLR_MPME;
5025 } else if (vf_data->num_vf_mc_hashes) {
5026 int j;
5027 vmolr |= E1000_VMOLR_ROMPE;
5028 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5029 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5030 }
5031 }
5032
5033 wr32(E1000_VMOLR(vf), vmolr);
5034
5035 /* there are flags left unprocessed, likely not supported */
5036 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5037 return -EINVAL;
5038
5039 return 0;
5040
5041}
5042
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005043static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5044 u32 *msgbuf, u32 vf)
5045{
5046 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5047 u16 *hash_list = (u16 *)&msgbuf[1];
5048 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5049 int i;
5050
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005051 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005052 * to this VF for later use to restore when the PF multi cast
5053 * list changes
5054 */
5055 vf_data->num_vf_mc_hashes = n;
5056
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005057 /* only up to 30 hash values supported */
5058 if (n > 30)
5059 n = 30;
5060
5061 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005062 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005063 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005064
5065 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005066 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005067
5068 return 0;
5069}
5070
5071static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5072{
5073 struct e1000_hw *hw = &adapter->hw;
5074 struct vf_data_storage *vf_data;
5075 int i, j;
5076
5077 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005078 u32 vmolr = rd32(E1000_VMOLR(i));
5079 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5080
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005081 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005082
5083 if ((vf_data->num_vf_mc_hashes > 30) ||
5084 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5085 vmolr |= E1000_VMOLR_MPME;
5086 } else if (vf_data->num_vf_mc_hashes) {
5087 vmolr |= E1000_VMOLR_ROMPE;
5088 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5089 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5090 }
5091 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005092 }
5093}
5094
5095static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5096{
5097 struct e1000_hw *hw = &adapter->hw;
5098 u32 pool_mask, reg, vid;
5099 int i;
5100
5101 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5102
5103 /* Find the vlan filter for this id */
5104 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5105 reg = rd32(E1000_VLVF(i));
5106
5107 /* remove the vf from the pool */
5108 reg &= ~pool_mask;
5109
5110 /* if pool is empty then remove entry from vfta */
5111 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5112 (reg & E1000_VLVF_VLANID_ENABLE)) {
5113 reg = 0;
5114 vid = reg & E1000_VLVF_VLANID_MASK;
5115 igb_vfta_set(hw, vid, false);
5116 }
5117
5118 wr32(E1000_VLVF(i), reg);
5119 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005120
5121 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005122}
5123
5124static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5125{
5126 struct e1000_hw *hw = &adapter->hw;
5127 u32 reg, i;
5128
Alexander Duyck51466232009-10-27 23:47:35 +00005129 /* The vlvf table only exists on 82576 hardware and newer */
5130 if (hw->mac.type < e1000_82576)
5131 return -1;
5132
5133 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005134 if (!adapter->vfs_allocated_count)
5135 return -1;
5136
5137 /* Find the vlan filter for this id */
5138 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5139 reg = rd32(E1000_VLVF(i));
5140 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5141 vid == (reg & E1000_VLVF_VLANID_MASK))
5142 break;
5143 }
5144
5145 if (add) {
5146 if (i == E1000_VLVF_ARRAY_SIZE) {
5147 /* Did not find a matching VLAN ID entry that was
5148 * enabled. Search for a free filter entry, i.e.
5149 * one without the enable bit set
5150 */
5151 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5152 reg = rd32(E1000_VLVF(i));
5153 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5154 break;
5155 }
5156 }
5157 if (i < E1000_VLVF_ARRAY_SIZE) {
5158 /* Found an enabled/available entry */
5159 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5160
5161 /* if !enabled we need to set this up in vfta */
5162 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005163 /* add VID to filter table */
5164 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005165 reg |= E1000_VLVF_VLANID_ENABLE;
5166 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005167 reg &= ~E1000_VLVF_VLANID_MASK;
5168 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005169 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005170
5171 /* do not modify RLPML for PF devices */
5172 if (vf >= adapter->vfs_allocated_count)
5173 return 0;
5174
5175 if (!adapter->vf_data[vf].vlans_enabled) {
5176 u32 size;
5177 reg = rd32(E1000_VMOLR(vf));
5178 size = reg & E1000_VMOLR_RLPML_MASK;
5179 size += 4;
5180 reg &= ~E1000_VMOLR_RLPML_MASK;
5181 reg |= size;
5182 wr32(E1000_VMOLR(vf), reg);
5183 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005184
Alexander Duyck51466232009-10-27 23:47:35 +00005185 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005186 }
5187 } else {
5188 if (i < E1000_VLVF_ARRAY_SIZE) {
5189 /* remove vf from the pool */
5190 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5191 /* if pool is empty then remove entry from vfta */
5192 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5193 reg = 0;
5194 igb_vfta_set(hw, vid, false);
5195 }
5196 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005197
5198 /* do not modify RLPML for PF devices */
5199 if (vf >= adapter->vfs_allocated_count)
5200 return 0;
5201
5202 adapter->vf_data[vf].vlans_enabled--;
5203 if (!adapter->vf_data[vf].vlans_enabled) {
5204 u32 size;
5205 reg = rd32(E1000_VMOLR(vf));
5206 size = reg & E1000_VMOLR_RLPML_MASK;
5207 size -= 4;
5208 reg &= ~E1000_VMOLR_RLPML_MASK;
5209 reg |= size;
5210 wr32(E1000_VMOLR(vf), reg);
5211 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005212 }
5213 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005214 return 0;
5215}
5216
5217static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5218{
5219 struct e1000_hw *hw = &adapter->hw;
5220
5221 if (vid)
5222 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5223 else
5224 wr32(E1000_VMVIR(vf), 0);
5225}
5226
5227static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5228 int vf, u16 vlan, u8 qos)
5229{
5230 int err = 0;
5231 struct igb_adapter *adapter = netdev_priv(netdev);
5232
5233 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5234 return -EINVAL;
5235 if (vlan || qos) {
5236 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5237 if (err)
5238 goto out;
5239 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5240 igb_set_vmolr(adapter, vf, !vlan);
5241 adapter->vf_data[vf].pf_vlan = vlan;
5242 adapter->vf_data[vf].pf_qos = qos;
5243 dev_info(&adapter->pdev->dev,
5244 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5245 if (test_bit(__IGB_DOWN, &adapter->state)) {
5246 dev_warn(&adapter->pdev->dev,
5247 "The VF VLAN has been set,"
5248 " but the PF device is not up.\n");
5249 dev_warn(&adapter->pdev->dev,
5250 "Bring the PF device up before"
5251 " attempting to use the VF device.\n");
5252 }
5253 } else {
5254 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5255 false, vf);
5256 igb_set_vmvir(adapter, vlan, vf);
5257 igb_set_vmolr(adapter, vf, true);
5258 adapter->vf_data[vf].pf_vlan = 0;
5259 adapter->vf_data[vf].pf_qos = 0;
5260 }
5261out:
5262 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005263}
5264
5265static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5266{
5267 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5268 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5269
5270 return igb_vlvf_set(adapter, vid, add, vf);
5271}
5272
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005273static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005274{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005275 /* clear flags - except flag that indicates PF has set the MAC */
5276 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005277 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005278
5279 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005280 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005281
5282 /* reset vlans for device */
5283 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005284 if (adapter->vf_data[vf].pf_vlan)
5285 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5286 adapter->vf_data[vf].pf_vlan,
5287 adapter->vf_data[vf].pf_qos);
5288 else
5289 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005290
5291 /* reset multicast table array for vf */
5292 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5293
5294 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005295 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005296}
5297
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005298static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5299{
5300 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5301
5302 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005303 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5304 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005305
5306 /* process remaining reset events */
5307 igb_vf_reset(adapter, vf);
5308}
5309
5310static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005311{
5312 struct e1000_hw *hw = &adapter->hw;
5313 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005314 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005315 u32 reg, msgbuf[3];
5316 u8 *addr = (u8 *)(&msgbuf[1]);
5317
5318 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005319 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005320
5321 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005322 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005323
5324 /* enable transmit and receive for vf */
5325 reg = rd32(E1000_VFTE);
5326 wr32(E1000_VFTE, reg | (1 << vf));
5327 reg = rd32(E1000_VFRE);
5328 wr32(E1000_VFRE, reg | (1 << vf));
5329
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005330 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005331
5332 /* reply to reset with ack and vf mac address */
5333 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5334 memcpy(addr, vf_mac, 6);
5335 igb_write_mbx(hw, msgbuf, 3, vf);
5336}
5337
5338static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5339{
Greg Rosede42edd2010-07-01 13:39:23 +00005340 /*
5341 * The VF MAC Address is stored in a packed array of bytes
5342 * starting at the second 32 bit word of the msg array
5343 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005344 unsigned char *addr = (char *)&msg[1];
5345 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005346
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005347 if (is_valid_ether_addr(addr))
5348 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005349
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005350 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005351}
5352
5353static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5354{
5355 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005356 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005357 u32 msg = E1000_VT_MSGTYPE_NACK;
5358
5359 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005360 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5361 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005362 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005363 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005364 }
5365}
5366
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005367static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005368{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005369 struct pci_dev *pdev = adapter->pdev;
5370 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005371 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005372 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005373 s32 retval;
5374
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005375 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005376
Alexander Duyckfef45f42009-12-11 22:57:34 -08005377 if (retval) {
5378 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005379 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005380 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5381 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5382 return;
5383 goto out;
5384 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005385
5386 /* this is a message we already processed, do nothing */
5387 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005388 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389
5390 /*
5391 * until the vf completes a reset it should not be
5392 * allowed to start any configuration.
5393 */
5394
5395 if (msgbuf[0] == E1000_VF_RESET) {
5396 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005397 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005398 }
5399
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005400 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005401 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5402 return;
5403 retval = -1;
5404 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005405 }
5406
5407 switch ((msgbuf[0] & 0xFFFF)) {
5408 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005409 retval = -EINVAL;
5410 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5411 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5412 else
5413 dev_warn(&pdev->dev,
5414 "VF %d attempted to override administratively "
5415 "set MAC address\nReload the VF driver to "
5416 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005417 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005418 case E1000_VF_SET_PROMISC:
5419 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5420 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005421 case E1000_VF_SET_MULTICAST:
5422 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5423 break;
5424 case E1000_VF_SET_LPE:
5425 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5426 break;
5427 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005428 retval = -1;
5429 if (vf_data->pf_vlan)
5430 dev_warn(&pdev->dev,
5431 "VF %d attempted to override administratively "
5432 "set VLAN tag\nReload the VF driver to "
5433 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005434 else
5435 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005436 break;
5437 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005438 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005439 retval = -1;
5440 break;
5441 }
5442
Alexander Duyckfef45f42009-12-11 22:57:34 -08005443 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5444out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005445 /* notify the VF of the results of what it sent us */
5446 if (retval)
5447 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5448 else
5449 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5450
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005451 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005452}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005453
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005454static void igb_msg_task(struct igb_adapter *adapter)
5455{
5456 struct e1000_hw *hw = &adapter->hw;
5457 u32 vf;
5458
5459 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5460 /* process any reset requests */
5461 if (!igb_check_for_rst(hw, vf))
5462 igb_vf_reset_event(adapter, vf);
5463
5464 /* process any messages pending */
5465 if (!igb_check_for_msg(hw, vf))
5466 igb_rcv_msg_from_vf(adapter, vf);
5467
5468 /* process any acks */
5469 if (!igb_check_for_ack(hw, vf))
5470 igb_rcv_ack_from_vf(adapter, vf);
5471 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005472}
5473
Auke Kok9d5c8242008-01-24 02:22:38 -08005474/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005475 * igb_set_uta - Set unicast filter table address
5476 * @adapter: board private structure
5477 *
5478 * The unicast table address is a register array of 32-bit registers.
5479 * The table is meant to be used in a way similar to how the MTA is used
5480 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005481 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5482 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005483 **/
5484static void igb_set_uta(struct igb_adapter *adapter)
5485{
5486 struct e1000_hw *hw = &adapter->hw;
5487 int i;
5488
5489 /* The UTA table only exists on 82576 hardware and newer */
5490 if (hw->mac.type < e1000_82576)
5491 return;
5492
5493 /* we only need to do this if VMDq is enabled */
5494 if (!adapter->vfs_allocated_count)
5495 return;
5496
5497 for (i = 0; i < hw->mac.uta_reg_count; i++)
5498 array_wr32(E1000_UTA, i, ~0);
5499}
5500
5501/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005502 * igb_intr_msi - Interrupt Handler
5503 * @irq: interrupt number
5504 * @data: pointer to a network interface device structure
5505 **/
5506static irqreturn_t igb_intr_msi(int irq, void *data)
5507{
Alexander Duyck047e0032009-10-27 15:49:27 +00005508 struct igb_adapter *adapter = data;
5509 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005510 struct e1000_hw *hw = &adapter->hw;
5511 /* read ICR disables interrupts using IAM */
5512 u32 icr = rd32(E1000_ICR);
5513
Alexander Duyck047e0032009-10-27 15:49:27 +00005514 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005515
Alexander Duyck7f081d42010-01-07 17:41:00 +00005516 if (icr & E1000_ICR_DRSTA)
5517 schedule_work(&adapter->reset_task);
5518
Alexander Duyck047e0032009-10-27 15:49:27 +00005519 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005520 /* HW is reporting DMA is out of sync */
5521 adapter->stats.doosync++;
5522 }
5523
Auke Kok9d5c8242008-01-24 02:22:38 -08005524 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5525 hw->mac.get_link_status = 1;
5526 if (!test_bit(__IGB_DOWN, &adapter->state))
5527 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5528 }
5529
Alexander Duyck047e0032009-10-27 15:49:27 +00005530 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005531
5532 return IRQ_HANDLED;
5533}
5534
5535/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005536 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005537 * @irq: interrupt number
5538 * @data: pointer to a network interface device structure
5539 **/
5540static irqreturn_t igb_intr(int irq, void *data)
5541{
Alexander Duyck047e0032009-10-27 15:49:27 +00005542 struct igb_adapter *adapter = data;
5543 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005544 struct e1000_hw *hw = &adapter->hw;
5545 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5546 * need for the IMC write */
5547 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005548
5549 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5550 * not set, then the adapter didn't send an interrupt */
5551 if (!(icr & E1000_ICR_INT_ASSERTED))
5552 return IRQ_NONE;
5553
Alexander Duyck0ba82992011-08-26 07:45:47 +00005554 igb_write_itr(q_vector);
5555
Alexander Duyck7f081d42010-01-07 17:41:00 +00005556 if (icr & E1000_ICR_DRSTA)
5557 schedule_work(&adapter->reset_task);
5558
Alexander Duyck047e0032009-10-27 15:49:27 +00005559 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005560 /* HW is reporting DMA is out of sync */
5561 adapter->stats.doosync++;
5562 }
5563
Auke Kok9d5c8242008-01-24 02:22:38 -08005564 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5565 hw->mac.get_link_status = 1;
5566 /* guard against interrupt when we're going down */
5567 if (!test_bit(__IGB_DOWN, &adapter->state))
5568 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5569 }
5570
Alexander Duyck047e0032009-10-27 15:49:27 +00005571 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005572
5573 return IRQ_HANDLED;
5574}
5575
Alexander Duyck0ba82992011-08-26 07:45:47 +00005576void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005577{
Alexander Duyck047e0032009-10-27 15:49:27 +00005578 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005579 struct e1000_hw *hw = &adapter->hw;
5580
Alexander Duyck0ba82992011-08-26 07:45:47 +00005581 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5582 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5583 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5584 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005585 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005586 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005587 }
5588
5589 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5590 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005591 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005592 else
5593 igb_irq_enable(adapter);
5594 }
5595}
5596
Auke Kok9d5c8242008-01-24 02:22:38 -08005597/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005598 * igb_poll - NAPI Rx polling callback
5599 * @napi: napi polling structure
5600 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005601 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005602static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005603{
Alexander Duyck047e0032009-10-27 15:49:27 +00005604 struct igb_q_vector *q_vector = container_of(napi,
5605 struct igb_q_vector,
5606 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005607 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005608
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005609#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005610 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5611 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005612#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005613 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005614 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005615
Alexander Duyck0ba82992011-08-26 07:45:47 +00005616 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005617 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005618
Alexander Duyck16eb8812011-08-26 07:43:54 +00005619 /* If all work not completed, return budget and keep polling */
5620 if (!clean_complete)
5621 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005622
Alexander Duyck46544252009-02-19 20:39:04 -08005623 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005624 napi_complete(napi);
5625 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005626
Alexander Duyck16eb8812011-08-26 07:43:54 +00005627 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005628}
Al Viro6d8126f2008-03-16 22:23:24 +00005629
Auke Kok9d5c8242008-01-24 02:22:38 -08005630/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005631 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005632 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005633 * @shhwtstamps: timestamp structure to update
5634 * @regval: unsigned 64bit system time value.
5635 *
5636 * We need to convert the system time value stored in the RX/TXSTMP registers
5637 * into a hwtstamp which can be used by the upper level timestamping functions
5638 */
5639static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5640 struct skb_shared_hwtstamps *shhwtstamps,
5641 u64 regval)
5642{
5643 u64 ns;
5644
Alexander Duyck55cac242009-11-19 12:42:21 +00005645 /*
5646 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5647 * 24 to match clock shift we setup earlier.
5648 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005649 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005650 regval <<= IGB_82580_TSYNC_SHIFT;
5651
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005652 ns = timecounter_cyc2time(&adapter->clock, regval);
5653 timecompare_update(&adapter->compare, ns);
5654 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5655 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5656 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5657}
5658
5659/**
5660 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5661 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005662 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005663 *
5664 * If we were asked to do hardware stamping and such a time stamp is
5665 * available, then it must have been for this skb here because we only
5666 * allow only one such packet into the queue.
5667 */
Alexander Duyck06034642011-08-26 07:44:22 +00005668static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5669 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005670{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005671 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005672 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005673 struct skb_shared_hwtstamps shhwtstamps;
5674 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005675
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005676 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005677 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005678 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5679 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005680
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005681 regval = rd32(E1000_TXSTMPL);
5682 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5683
5684 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005685 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005686}
5687
5688/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005689 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005690 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005691 * returns true if ring is completely cleaned
5692 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005693static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005694{
Alexander Duyck047e0032009-10-27 15:49:27 +00005695 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005696 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005697 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005698 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005699 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005700 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005701 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005702
Alexander Duyck13fde972011-10-05 13:35:24 +00005703 if (test_bit(__IGB_DOWN, &adapter->state))
5704 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005705
Alexander Duyck06034642011-08-26 07:44:22 +00005706 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005707 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005708 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005709
Alexander Duyck13fde972011-10-05 13:35:24 +00005710 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005711 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005712
Alexander Duyck8542db02011-08-26 07:44:43 +00005713 /* prevent any other reads prior to eop_desc */
5714 rmb();
5715
5716 /* if next_to_watch is not set then there is no work pending */
5717 if (!eop_desc)
5718 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005719
5720 /* if DD is not set pending work has not been completed */
5721 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5722 break;
5723
Alexander Duyck8542db02011-08-26 07:44:43 +00005724 /* clear next_to_watch to prevent false hangs */
5725 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005726
Alexander Duyckebe42d12011-08-26 07:45:09 +00005727 /* update the statistics for this packet */
5728 total_bytes += tx_buffer->bytecount;
5729 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005730
Alexander Duyckebe42d12011-08-26 07:45:09 +00005731 /* retrieve hardware timestamp */
5732 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005733
Alexander Duyckebe42d12011-08-26 07:45:09 +00005734 /* free the skb */
5735 dev_kfree_skb_any(tx_buffer->skb);
5736 tx_buffer->skb = NULL;
5737
5738 /* unmap skb header data */
5739 dma_unmap_single(tx_ring->dev,
5740 tx_buffer->dma,
5741 tx_buffer->length,
5742 DMA_TO_DEVICE);
5743
5744 /* clear last DMA location and unmap remaining buffers */
5745 while (tx_desc != eop_desc) {
5746 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005747
Alexander Duyck13fde972011-10-05 13:35:24 +00005748 tx_buffer++;
5749 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005750 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005751 if (unlikely(!i)) {
5752 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005753 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005754 tx_desc = IGB_TX_DESC(tx_ring, 0);
5755 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005756
5757 /* unmap any remaining paged data */
5758 if (tx_buffer->dma) {
5759 dma_unmap_page(tx_ring->dev,
5760 tx_buffer->dma,
5761 tx_buffer->length,
5762 DMA_TO_DEVICE);
5763 }
5764 }
5765
5766 /* clear last DMA location */
5767 tx_buffer->dma = 0;
5768
5769 /* move us one more past the eop_desc for start of next pkt */
5770 tx_buffer++;
5771 tx_desc++;
5772 i++;
5773 if (unlikely(!i)) {
5774 i -= tx_ring->count;
5775 tx_buffer = tx_ring->tx_buffer_info;
5776 tx_desc = IGB_TX_DESC(tx_ring, 0);
5777 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005778 }
5779
Alexander Duyck8542db02011-08-26 07:44:43 +00005780 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005781 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005782 u64_stats_update_begin(&tx_ring->tx_syncp);
5783 tx_ring->tx_stats.bytes += total_bytes;
5784 tx_ring->tx_stats.packets += total_packets;
5785 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005786 q_vector->tx.total_bytes += total_bytes;
5787 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005788
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005789 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005790 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005791
Alexander Duyck8542db02011-08-26 07:44:43 +00005792 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005793
Auke Kok9d5c8242008-01-24 02:22:38 -08005794 /* Detect a transmit hang in hardware, this serializes the
5795 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005796 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005797 if (eop_desc &&
5798 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005799 (adapter->tx_timeout_factor * HZ)) &&
5800 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005801
Auke Kok9d5c8242008-01-24 02:22:38 -08005802 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005803 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005804 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005805 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005806 " TDH <%x>\n"
5807 " TDT <%x>\n"
5808 " next_to_use <%x>\n"
5809 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005810 "buffer_info[next_to_clean]\n"
5811 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005812 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005813 " jiffies <%lx>\n"
5814 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005815 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005816 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005817 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005818 tx_ring->next_to_use,
5819 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005820 tx_buffer->time_stamp,
5821 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005822 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005823 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005824 netif_stop_subqueue(tx_ring->netdev,
5825 tx_ring->queue_index);
5826
5827 /* we are about to reset, no point in enabling stuff */
5828 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005829 }
5830 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005831
5832 if (unlikely(total_packets &&
5833 netif_carrier_ok(tx_ring->netdev) &&
5834 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5835 /* Make sure that anybody stopping the queue after this
5836 * sees the new next_to_clean.
5837 */
5838 smp_mb();
5839 if (__netif_subqueue_stopped(tx_ring->netdev,
5840 tx_ring->queue_index) &&
5841 !(test_bit(__IGB_DOWN, &adapter->state))) {
5842 netif_wake_subqueue(tx_ring->netdev,
5843 tx_ring->queue_index);
5844
5845 u64_stats_update_begin(&tx_ring->tx_syncp);
5846 tx_ring->tx_stats.restart_queue++;
5847 u64_stats_update_end(&tx_ring->tx_syncp);
5848 }
5849 }
5850
5851 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005852}
5853
Alexander Duyckcd392f52011-08-26 07:43:59 +00005854static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005855 union e1000_adv_rx_desc *rx_desc,
5856 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005857{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005858 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005859
Alexander Duyck294e7d72011-08-26 07:45:57 +00005860 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005861 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005862 return;
5863
5864 /* Rx checksum disabled via ethtool */
5865 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005866 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005867
Auke Kok9d5c8242008-01-24 02:22:38 -08005868 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005869 if (igb_test_staterr(rx_desc,
5870 E1000_RXDEXT_STATERR_TCPE |
5871 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005872 /*
5873 * work around errata with sctp packets where the TCPE aka
5874 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5875 * packets, (aka let the stack check the crc32c)
5876 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005877 if (!((skb->len == 60) &&
5878 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005879 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005880 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005881 u64_stats_update_end(&ring->rx_syncp);
5882 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005884 return;
5885 }
5886 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005887 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5888 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 skb->ip_summed = CHECKSUM_UNNECESSARY;
5890
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005891 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5892 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005893}
5894
Alexander Duyck077887c2011-08-26 07:46:29 +00005895static inline void igb_rx_hash(struct igb_ring *ring,
5896 union e1000_adv_rx_desc *rx_desc,
5897 struct sk_buff *skb)
5898{
5899 if (ring->netdev->features & NETIF_F_RXHASH)
5900 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5901}
5902
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005903static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5904 union e1000_adv_rx_desc *rx_desc,
5905 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005906{
5907 struct igb_adapter *adapter = q_vector->adapter;
5908 struct e1000_hw *hw = &adapter->hw;
5909 u64 regval;
5910
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005911 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5912 E1000_RXDADV_STAT_TS))
5913 return;
5914
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005915 /*
5916 * If this bit is set, then the RX registers contain the time stamp. No
5917 * other packet will be time stamped until we read these registers, so
5918 * read the registers to make them available again. Because only one
5919 * packet can be time stamped at a time, we know that the register
5920 * values must belong to this one here and therefore we don't need to
5921 * compare any of the additional attributes stored for it.
5922 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005923 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005924 * can turn into a skb_shared_hwtstamps.
5925 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005926 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005927 u32 *stamp = (u32 *)skb->data;
5928 regval = le32_to_cpu(*(stamp + 2));
5929 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5930 skb_pull(skb, IGB_TS_HDR_LEN);
5931 } else {
5932 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5933 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005934
Nick Nunley757b77e2010-03-26 11:36:47 +00005935 regval = rd32(E1000_RXSTMPL);
5936 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5937 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005938
5939 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5940}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005941
5942static void igb_rx_vlan(struct igb_ring *ring,
5943 union e1000_adv_rx_desc *rx_desc,
5944 struct sk_buff *skb)
5945{
5946 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5947 u16 vid;
5948 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5949 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5950 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5951 else
5952 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5953
5954 __vlan_hwaccel_put_tag(skb, vid);
5955 }
5956}
5957
Alexander Duyck44390ca2011-08-26 07:43:38 +00005958static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005959{
5960 /* HW will not DMA in data larger than the given buffer, even if it
5961 * parses the (NFS, of course) header to be larger. In that case, it
5962 * fills the header buffer and spills the rest into the page.
5963 */
5964 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5965 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005966 if (hlen > IGB_RX_HDR_LEN)
5967 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005968 return hlen;
5969}
5970
Alexander Duyckcd392f52011-08-26 07:43:59 +00005971static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005972{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005973 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005974 union e1000_adv_rx_desc *rx_desc;
5975 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005976 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005977 u16 cleaned_count = igb_desc_unused(rx_ring);
5978 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005979
Alexander Duyck60136902011-08-26 07:44:05 +00005980 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005981
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005982 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005983 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005984 struct sk_buff *skb = buffer_info->skb;
5985 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005986
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005987 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005988 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005989
5990 i++;
5991 if (i == rx_ring->count)
5992 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005993
Alexander Duyck60136902011-08-26 07:44:05 +00005994 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005995 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005996
Alexander Duyck16eb8812011-08-26 07:43:54 +00005997 /*
5998 * This memory barrier is needed to keep us from reading
5999 * any other fields out of the rx_desc until we know the
6000 * RXD_STAT_DD bit is set
6001 */
6002 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006003
Alexander Duyck16eb8812011-08-26 07:43:54 +00006004 if (!skb_is_nonlinear(skb)) {
6005 __skb_put(skb, igb_get_hlen(rx_desc));
6006 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006007 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006008 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006009 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006010 }
6011
Alexander Duyck16eb8812011-08-26 07:43:54 +00006012 if (rx_desc->wb.upper.length) {
6013 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006014
Koki Sanagiaa913402010-04-27 01:01:19 +00006015 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006016 buffer_info->page,
6017 buffer_info->page_offset,
6018 length);
6019
Alexander Duyck16eb8812011-08-26 07:43:54 +00006020 skb->len += length;
6021 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006022 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006023
Alexander Duyckd1eff352009-11-12 18:38:35 +00006024 if ((page_count(buffer_info->page) != 1) ||
6025 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006026 buffer_info->page = NULL;
6027 else
6028 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006029
Alexander Duyck16eb8812011-08-26 07:43:54 +00006030 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6031 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6032 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006033 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006034
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006035 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006036 struct igb_rx_buffer *next_buffer;
6037 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006038 buffer_info->skb = next_buffer->skb;
6039 buffer_info->dma = next_buffer->dma;
6040 next_buffer->skb = skb;
6041 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006042 goto next_desc;
6043 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006044
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006045 if (igb_test_staterr(rx_desc,
6046 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006047 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006048 goto next_desc;
6049 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006050
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006051 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006052 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006053 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006054 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006055
6056 total_bytes += skb->len;
6057 total_packets++;
6058
6059 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6060
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006061 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006062
Alexander Duyck16eb8812011-08-26 07:43:54 +00006063 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006064next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006065 if (!budget)
6066 break;
6067
6068 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006069 /* return some buffers to hardware, one at a time is too slow */
6070 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006071 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006072 cleaned_count = 0;
6073 }
6074
6075 /* use prefetched values */
6076 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006077 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006078
Auke Kok9d5c8242008-01-24 02:22:38 -08006079 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006080 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006081 rx_ring->rx_stats.packets += total_packets;
6082 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006083 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006084 q_vector->rx.total_packets += total_packets;
6085 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006086
6087 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006088 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006089
Alexander Duyck16eb8812011-08-26 07:43:54 +00006090 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006091}
6092
Alexander Duyckc023cd82011-08-26 07:43:43 +00006093static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006094 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006095{
6096 struct sk_buff *skb = bi->skb;
6097 dma_addr_t dma = bi->dma;
6098
6099 if (dma)
6100 return true;
6101
6102 if (likely(!skb)) {
6103 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6104 IGB_RX_HDR_LEN);
6105 bi->skb = skb;
6106 if (!skb) {
6107 rx_ring->rx_stats.alloc_failed++;
6108 return false;
6109 }
6110
6111 /* initialize skb for ring */
6112 skb_record_rx_queue(skb, rx_ring->queue_index);
6113 }
6114
6115 dma = dma_map_single(rx_ring->dev, skb->data,
6116 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6117
6118 if (dma_mapping_error(rx_ring->dev, dma)) {
6119 rx_ring->rx_stats.alloc_failed++;
6120 return false;
6121 }
6122
6123 bi->dma = dma;
6124 return true;
6125}
6126
6127static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006128 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006129{
6130 struct page *page = bi->page;
6131 dma_addr_t page_dma = bi->page_dma;
6132 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6133
6134 if (page_dma)
6135 return true;
6136
6137 if (!page) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +00006138 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006139 bi->page = page;
6140 if (unlikely(!page)) {
6141 rx_ring->rx_stats.alloc_failed++;
6142 return false;
6143 }
6144 }
6145
6146 page_dma = dma_map_page(rx_ring->dev, page,
6147 page_offset, PAGE_SIZE / 2,
6148 DMA_FROM_DEVICE);
6149
6150 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6151 rx_ring->rx_stats.alloc_failed++;
6152 return false;
6153 }
6154
6155 bi->page_dma = page_dma;
6156 bi->page_offset = page_offset;
6157 return true;
6158}
6159
Auke Kok9d5c8242008-01-24 02:22:38 -08006160/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006161 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006162 * @adapter: address of board private structure
6163 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006164void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006165{
Auke Kok9d5c8242008-01-24 02:22:38 -08006166 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006167 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006168 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006169
Alexander Duyck60136902011-08-26 07:44:05 +00006170 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006171 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006172 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006173
6174 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006175 if (!igb_alloc_mapped_skb(rx_ring, bi))
6176 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006177
Alexander Duyckc023cd82011-08-26 07:43:43 +00006178 /* Refresh the desc even if buffer_addrs didn't change
6179 * because each write-back erases this info. */
6180 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006181
Alexander Duyckc023cd82011-08-26 07:43:43 +00006182 if (!igb_alloc_mapped_page(rx_ring, bi))
6183 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006184
Alexander Duyckc023cd82011-08-26 07:43:43 +00006185 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006186
Alexander Duyckc023cd82011-08-26 07:43:43 +00006187 rx_desc++;
6188 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006189 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006190 if (unlikely(!i)) {
Alexander Duyck60136902011-08-26 07:44:05 +00006191 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006192 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006193 i -= rx_ring->count;
6194 }
6195
6196 /* clear the hdr_addr for the next_to_use descriptor */
6197 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006198 }
6199
Alexander Duyckc023cd82011-08-26 07:43:43 +00006200 i += rx_ring->count;
6201
Auke Kok9d5c8242008-01-24 02:22:38 -08006202 if (rx_ring->next_to_use != i) {
6203 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006204
6205 /* Force memory writes to complete before letting h/w
6206 * know there are new descriptors to fetch. (Only
6207 * applicable for weak-ordered memory model archs,
6208 * such as IA-64). */
6209 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006210 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006211 }
6212}
6213
6214/**
6215 * igb_mii_ioctl -
6216 * @netdev:
6217 * @ifreq:
6218 * @cmd:
6219 **/
6220static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6221{
6222 struct igb_adapter *adapter = netdev_priv(netdev);
6223 struct mii_ioctl_data *data = if_mii(ifr);
6224
6225 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6226 return -EOPNOTSUPP;
6227
6228 switch (cmd) {
6229 case SIOCGMIIPHY:
6230 data->phy_id = adapter->hw.phy.addr;
6231 break;
6232 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006233 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6234 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006235 return -EIO;
6236 break;
6237 case SIOCSMIIREG:
6238 default:
6239 return -EOPNOTSUPP;
6240 }
6241 return 0;
6242}
6243
6244/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006245 * igb_hwtstamp_ioctl - control hardware time stamping
6246 * @netdev:
6247 * @ifreq:
6248 * @cmd:
6249 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006250 * Outgoing time stamping can be enabled and disabled. Play nice and
6251 * disable it when requested, although it shouldn't case any overhead
6252 * when no packet needs it. At most one packet in the queue may be
6253 * marked for time stamping, otherwise it would be impossible to tell
6254 * for sure to which packet the hardware time stamp belongs.
6255 *
6256 * Incoming time stamping has to be configured via the hardware
6257 * filters. Not all combinations are supported, in particular event
6258 * type has to be specified. Matching the kind of event packet is
6259 * not supported, with the exception of "all V2 events regardless of
6260 * level 2 or 4".
6261 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006262 **/
6263static int igb_hwtstamp_ioctl(struct net_device *netdev,
6264 struct ifreq *ifr, int cmd)
6265{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006266 struct igb_adapter *adapter = netdev_priv(netdev);
6267 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006268 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006269 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6270 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006271 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006272 bool is_l4 = false;
6273 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006274 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006275
6276 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6277 return -EFAULT;
6278
6279 /* reserved for future extensions */
6280 if (config.flags)
6281 return -EINVAL;
6282
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006283 switch (config.tx_type) {
6284 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006285 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006286 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006287 break;
6288 default:
6289 return -ERANGE;
6290 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006291
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006292 switch (config.rx_filter) {
6293 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006294 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006295 break;
6296 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6297 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6298 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6299 case HWTSTAMP_FILTER_ALL:
6300 /*
6301 * register TSYNCRXCFG must be set, therefore it is not
6302 * possible to time stamp both Sync and Delay_Req messages
6303 * => fall back to time stamping all packets
6304 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006305 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006306 config.rx_filter = HWTSTAMP_FILTER_ALL;
6307 break;
6308 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006309 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006310 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006311 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006312 break;
6313 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006314 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006315 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006316 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 break;
6318 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6319 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006320 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006321 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006322 is_l2 = true;
6323 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006324 config.rx_filter = HWTSTAMP_FILTER_SOME;
6325 break;
6326 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6327 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006328 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006329 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006330 is_l2 = true;
6331 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006332 config.rx_filter = HWTSTAMP_FILTER_SOME;
6333 break;
6334 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6335 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6336 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006337 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006338 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006339 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006340 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006341 break;
6342 default:
6343 return -ERANGE;
6344 }
6345
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006346 if (hw->mac.type == e1000_82575) {
6347 if (tsync_rx_ctl | tsync_tx_ctl)
6348 return -EINVAL;
6349 return 0;
6350 }
6351
Nick Nunley757b77e2010-03-26 11:36:47 +00006352 /*
6353 * Per-packet timestamping only works if all packets are
6354 * timestamped, so enable timestamping in all packets as
6355 * long as one rx filter was configured.
6356 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006357 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006358 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6359 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6360 }
6361
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006362 /* enable/disable TX */
6363 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006364 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6365 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006366 wr32(E1000_TSYNCTXCTL, regval);
6367
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006368 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006369 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006370 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6371 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006372 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006373
6374 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006375 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6376
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006377 /* define ethertype filter for timestamped packets */
6378 if (is_l2)
6379 wr32(E1000_ETQF(3),
6380 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6381 E1000_ETQF_1588 | /* enable timestamping */
6382 ETH_P_1588)); /* 1588 eth protocol type */
6383 else
6384 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006385
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006386#define PTP_PORT 319
6387 /* L4 Queue Filter[3]: filter by destination port and protocol */
6388 if (is_l4) {
6389 u32 ftqf = (IPPROTO_UDP /* UDP */
6390 | E1000_FTQF_VF_BP /* VF not compared */
6391 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6392 | E1000_FTQF_MASK); /* mask all inputs */
6393 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006394
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006395 wr32(E1000_IMIR(3), htons(PTP_PORT));
6396 wr32(E1000_IMIREXT(3),
6397 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6398 if (hw->mac.type == e1000_82576) {
6399 /* enable source port check */
6400 wr32(E1000_SPQF(3), htons(PTP_PORT));
6401 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6402 }
6403 wr32(E1000_FTQF(3), ftqf);
6404 } else {
6405 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6406 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006407 wrfl();
6408
6409 adapter->hwtstamp_config = config;
6410
6411 /* clear TX/RX time stamp registers, just to be sure */
6412 regval = rd32(E1000_TXSTMPH);
6413 regval = rd32(E1000_RXSTMPH);
6414
6415 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6416 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006417}
6418
6419/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006420 * igb_ioctl -
6421 * @netdev:
6422 * @ifreq:
6423 * @cmd:
6424 **/
6425static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6426{
6427 switch (cmd) {
6428 case SIOCGMIIPHY:
6429 case SIOCGMIIREG:
6430 case SIOCSMIIREG:
6431 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006432 case SIOCSHWTSTAMP:
6433 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006434 default:
6435 return -EOPNOTSUPP;
6436 }
6437}
6438
Alexander Duyck009bc062009-07-23 18:08:35 +00006439s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6440{
6441 struct igb_adapter *adapter = hw->back;
6442 u16 cap_offset;
6443
Jon Masonbdaae042011-06-27 07:44:01 +00006444 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006445 if (!cap_offset)
6446 return -E1000_ERR_CONFIG;
6447
6448 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6449
6450 return 0;
6451}
6452
6453s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6454{
6455 struct igb_adapter *adapter = hw->back;
6456 u16 cap_offset;
6457
Jon Masonbdaae042011-06-27 07:44:01 +00006458 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006459 if (!cap_offset)
6460 return -E1000_ERR_CONFIG;
6461
6462 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6463
6464 return 0;
6465}
6466
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006467static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006468{
6469 struct igb_adapter *adapter = netdev_priv(netdev);
6470 struct e1000_hw *hw = &adapter->hw;
6471 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006472 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006473
Alexander Duyck5faf0302011-08-26 07:46:08 +00006474 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006475 /* enable VLAN tag insert/strip */
6476 ctrl = rd32(E1000_CTRL);
6477 ctrl |= E1000_CTRL_VME;
6478 wr32(E1000_CTRL, ctrl);
6479
Alexander Duyck51466232009-10-27 23:47:35 +00006480 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006481 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006482 rctl &= ~E1000_RCTL_CFIEN;
6483 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006484 } else {
6485 /* disable VLAN tag insert/strip */
6486 ctrl = rd32(E1000_CTRL);
6487 ctrl &= ~E1000_CTRL_VME;
6488 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006489 }
6490
Alexander Duycke1739522009-02-19 20:39:44 -08006491 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006492}
6493
Jiri Pirko8e586132011-12-08 19:52:37 -05006494static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006495{
6496 struct igb_adapter *adapter = netdev_priv(netdev);
6497 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006498 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006499
Alexander Duyck51466232009-10-27 23:47:35 +00006500 /* attempt to add filter to vlvf array */
6501 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006502
Alexander Duyck51466232009-10-27 23:47:35 +00006503 /* add the filter since PF can receive vlans w/o entry in vlvf */
6504 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006505
6506 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006507
6508 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006509}
6510
Jiri Pirko8e586132011-12-08 19:52:37 -05006511static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Auke Kok9d5c8242008-01-24 02:22:38 -08006512{
6513 struct igb_adapter *adapter = netdev_priv(netdev);
6514 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006515 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006516 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006517
Alexander Duyck51466232009-10-27 23:47:35 +00006518 /* remove vlan from VLVF table array */
6519 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006520
Alexander Duyck51466232009-10-27 23:47:35 +00006521 /* if vid was not present in VLVF just remove it from table */
6522 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006523 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006524
6525 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05006526
6527 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006528}
6529
6530static void igb_restore_vlan(struct igb_adapter *adapter)
6531{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006532 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006533
Alexander Duyck5faf0302011-08-26 07:46:08 +00006534 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6535
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006536 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6537 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006538}
6539
David Decotigny14ad2512011-04-27 18:32:43 +00006540int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006541{
Alexander Duyck090b1792009-10-27 23:51:55 +00006542 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006543 struct e1000_mac_info *mac = &adapter->hw.mac;
6544
6545 mac->autoneg = 0;
6546
David Decotigny14ad2512011-04-27 18:32:43 +00006547 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6548 * for the switch() below to work */
6549 if ((spd & 1) || (dplx & ~1))
6550 goto err_inval;
6551
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006552 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6553 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006554 spd != SPEED_1000 &&
6555 dplx != DUPLEX_FULL)
6556 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006557
David Decotigny14ad2512011-04-27 18:32:43 +00006558 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006559 case SPEED_10 + DUPLEX_HALF:
6560 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6561 break;
6562 case SPEED_10 + DUPLEX_FULL:
6563 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6564 break;
6565 case SPEED_100 + DUPLEX_HALF:
6566 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6567 break;
6568 case SPEED_100 + DUPLEX_FULL:
6569 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6570 break;
6571 case SPEED_1000 + DUPLEX_FULL:
6572 mac->autoneg = 1;
6573 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6574 break;
6575 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6576 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006577 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006578 }
6579 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006580
6581err_inval:
6582 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6583 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006584}
6585
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006586static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006587{
6588 struct net_device *netdev = pci_get_drvdata(pdev);
6589 struct igb_adapter *adapter = netdev_priv(netdev);
6590 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006591 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006592 u32 wufc = adapter->wol;
6593#ifdef CONFIG_PM
6594 int retval = 0;
6595#endif
6596
6597 netif_device_detach(netdev);
6598
Alexander Duycka88f10e2008-07-08 15:13:38 -07006599 if (netif_running(netdev))
6600 igb_close(netdev);
6601
Alexander Duyck047e0032009-10-27 15:49:27 +00006602 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006603
6604#ifdef CONFIG_PM
6605 retval = pci_save_state(pdev);
6606 if (retval)
6607 return retval;
6608#endif
6609
6610 status = rd32(E1000_STATUS);
6611 if (status & E1000_STATUS_LU)
6612 wufc &= ~E1000_WUFC_LNKC;
6613
6614 if (wufc) {
6615 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006616 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006617
6618 /* turn on all-multi mode if wake on multicast is enabled */
6619 if (wufc & E1000_WUFC_MC) {
6620 rctl = rd32(E1000_RCTL);
6621 rctl |= E1000_RCTL_MPE;
6622 wr32(E1000_RCTL, rctl);
6623 }
6624
6625 ctrl = rd32(E1000_CTRL);
6626 /* advertise wake from D3Cold */
6627 #define E1000_CTRL_ADVD3WUC 0x00100000
6628 /* phy power management enable */
6629 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6630 ctrl |= E1000_CTRL_ADVD3WUC;
6631 wr32(E1000_CTRL, ctrl);
6632
Auke Kok9d5c8242008-01-24 02:22:38 -08006633 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006634 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006635
6636 wr32(E1000_WUC, E1000_WUC_PME_EN);
6637 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 } else {
6639 wr32(E1000_WUC, 0);
6640 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006641 }
6642
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006643 *enable_wake = wufc || adapter->en_mng_pt;
6644 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006645 igb_power_down_link(adapter);
6646 else
6647 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006648
6649 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6650 * would have already happened in close and is redundant. */
6651 igb_release_hw_control(adapter);
6652
6653 pci_disable_device(pdev);
6654
Auke Kok9d5c8242008-01-24 02:22:38 -08006655 return 0;
6656}
6657
6658#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006659static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6660{
6661 int retval;
6662 bool wake;
6663
6664 retval = __igb_shutdown(pdev, &wake);
6665 if (retval)
6666 return retval;
6667
6668 if (wake) {
6669 pci_prepare_to_sleep(pdev);
6670 } else {
6671 pci_wake_from_d3(pdev, false);
6672 pci_set_power_state(pdev, PCI_D3hot);
6673 }
6674
6675 return 0;
6676}
6677
Auke Kok9d5c8242008-01-24 02:22:38 -08006678static int igb_resume(struct pci_dev *pdev)
6679{
6680 struct net_device *netdev = pci_get_drvdata(pdev);
6681 struct igb_adapter *adapter = netdev_priv(netdev);
6682 struct e1000_hw *hw = &adapter->hw;
6683 u32 err;
6684
6685 pci_set_power_state(pdev, PCI_D0);
6686 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006687 pci_save_state(pdev);
Taku Izumi42bfd332008-06-20 12:10:30 +09006688
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006689 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006690 if (err) {
6691 dev_err(&pdev->dev,
6692 "igb: Cannot enable PCI device from suspend\n");
6693 return err;
6694 }
6695 pci_set_master(pdev);
6696
6697 pci_enable_wake(pdev, PCI_D3hot, 0);
6698 pci_enable_wake(pdev, PCI_D3cold, 0);
6699
Alexander Duyck047e0032009-10-27 15:49:27 +00006700 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006701 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6702 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006703 }
6704
Auke Kok9d5c8242008-01-24 02:22:38 -08006705 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006706
6707 /* let the f/w know that the h/w is now under the control of the
6708 * driver. */
6709 igb_get_hw_control(adapter);
6710
Auke Kok9d5c8242008-01-24 02:22:38 -08006711 wr32(E1000_WUS, ~0);
6712
Alexander Duycka88f10e2008-07-08 15:13:38 -07006713 if (netif_running(netdev)) {
6714 err = igb_open(netdev);
6715 if (err)
6716 return err;
6717 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006718
6719 netif_device_attach(netdev);
6720
Auke Kok9d5c8242008-01-24 02:22:38 -08006721 return 0;
6722}
6723#endif
6724
6725static void igb_shutdown(struct pci_dev *pdev)
6726{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006727 bool wake;
6728
6729 __igb_shutdown(pdev, &wake);
6730
6731 if (system_state == SYSTEM_POWER_OFF) {
6732 pci_wake_from_d3(pdev, wake);
6733 pci_set_power_state(pdev, PCI_D3hot);
6734 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006735}
6736
6737#ifdef CONFIG_NET_POLL_CONTROLLER
6738/*
6739 * Polling 'interrupt' - used by things like netconsole to send skbs
6740 * without having to re-enable interrupts. It's not called while
6741 * the interrupt routine is executing.
6742 */
6743static void igb_netpoll(struct net_device *netdev)
6744{
6745 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006746 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006747 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006748 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006749
Alexander Duyck047e0032009-10-27 15:49:27 +00006750 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006751 q_vector = adapter->q_vector[i];
6752 if (adapter->msix_entries)
6753 wr32(E1000_EIMC, q_vector->eims_value);
6754 else
6755 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006756 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006757 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006758}
6759#endif /* CONFIG_NET_POLL_CONTROLLER */
6760
6761/**
6762 * igb_io_error_detected - called when PCI error is detected
6763 * @pdev: Pointer to PCI device
6764 * @state: The current pci connection state
6765 *
6766 * This function is called after a PCI bus error affecting
6767 * this device has been detected.
6768 */
6769static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6770 pci_channel_state_t state)
6771{
6772 struct net_device *netdev = pci_get_drvdata(pdev);
6773 struct igb_adapter *adapter = netdev_priv(netdev);
6774
6775 netif_device_detach(netdev);
6776
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006777 if (state == pci_channel_io_perm_failure)
6778 return PCI_ERS_RESULT_DISCONNECT;
6779
Auke Kok9d5c8242008-01-24 02:22:38 -08006780 if (netif_running(netdev))
6781 igb_down(adapter);
6782 pci_disable_device(pdev);
6783
6784 /* Request a slot slot reset. */
6785 return PCI_ERS_RESULT_NEED_RESET;
6786}
6787
6788/**
6789 * igb_io_slot_reset - called after the pci bus has been reset.
6790 * @pdev: Pointer to PCI device
6791 *
6792 * Restart the card from scratch, as if from a cold-boot. Implementation
6793 * resembles the first-half of the igb_resume routine.
6794 */
6795static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6796{
6797 struct net_device *netdev = pci_get_drvdata(pdev);
6798 struct igb_adapter *adapter = netdev_priv(netdev);
6799 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006800 pci_ers_result_t result;
Taku Izumi42bfd332008-06-20 12:10:30 +09006801 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006802
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006803 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006804 dev_err(&pdev->dev,
6805 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006806 result = PCI_ERS_RESULT_DISCONNECT;
6807 } else {
6808 pci_set_master(pdev);
6809 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006810 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006811
6812 pci_enable_wake(pdev, PCI_D3hot, 0);
6813 pci_enable_wake(pdev, PCI_D3cold, 0);
6814
6815 igb_reset(adapter);
6816 wr32(E1000_WUS, ~0);
6817 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006818 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006819
Jeff Kirsherea943d42008-12-11 20:34:19 -08006820 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6821 if (err) {
6822 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6823 "failed 0x%0x\n", err);
6824 /* non-fatal, continue */
6825 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006826
Alexander Duyck40a914f2008-11-27 00:24:37 -08006827 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006828}
6829
6830/**
6831 * igb_io_resume - called when traffic can start flowing again.
6832 * @pdev: Pointer to PCI device
6833 *
6834 * This callback is called when the error recovery driver tells us that
6835 * its OK to resume normal operation. Implementation resembles the
6836 * second-half of the igb_resume routine.
6837 */
6838static void igb_io_resume(struct pci_dev *pdev)
6839{
6840 struct net_device *netdev = pci_get_drvdata(pdev);
6841 struct igb_adapter *adapter = netdev_priv(netdev);
6842
Auke Kok9d5c8242008-01-24 02:22:38 -08006843 if (netif_running(netdev)) {
6844 if (igb_up(adapter)) {
6845 dev_err(&pdev->dev, "igb_up failed after reset\n");
6846 return;
6847 }
6848 }
6849
6850 netif_device_attach(netdev);
6851
6852 /* let the f/w know that the h/w is now under the control of the
6853 * driver. */
6854 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006855}
6856
Alexander Duyck26ad9172009-10-05 06:32:49 +00006857static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6858 u8 qsel)
6859{
6860 u32 rar_low, rar_high;
6861 struct e1000_hw *hw = &adapter->hw;
6862
6863 /* HW expects these in little endian so we reverse the byte order
6864 * from network order (big endian) to little endian
6865 */
6866 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6867 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6868 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6869
6870 /* Indicate to hardware the Address is Valid. */
6871 rar_high |= E1000_RAH_AV;
6872
6873 if (hw->mac.type == e1000_82575)
6874 rar_high |= E1000_RAH_POOL_1 * qsel;
6875 else
6876 rar_high |= E1000_RAH_POOL_1 << qsel;
6877
6878 wr32(E1000_RAL(index), rar_low);
6879 wrfl();
6880 wr32(E1000_RAH(index), rar_high);
6881 wrfl();
6882}
6883
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006884static int igb_set_vf_mac(struct igb_adapter *adapter,
6885 int vf, unsigned char *mac_addr)
6886{
6887 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006888 /* VF MAC addresses start at end of receive addresses and moves
6889 * torwards the first, as a result a collision should not be possible */
6890 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006891
Alexander Duyck37680112009-02-19 20:40:30 -08006892 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006893
Alexander Duyck26ad9172009-10-05 06:32:49 +00006894 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006895
6896 return 0;
6897}
6898
Williams, Mitch A8151d292010-02-10 01:44:24 +00006899static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6900{
6901 struct igb_adapter *adapter = netdev_priv(netdev);
6902 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6903 return -EINVAL;
6904 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6905 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6906 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6907 " change effective.");
6908 if (test_bit(__IGB_DOWN, &adapter->state)) {
6909 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6910 " but the PF device is not up.\n");
6911 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6912 " attempting to use the VF device.\n");
6913 }
6914 return igb_set_vf_mac(adapter, vf, mac);
6915}
6916
Lior Levy17dc5662011-02-08 02:28:46 +00006917static int igb_link_mbps(int internal_link_speed)
6918{
6919 switch (internal_link_speed) {
6920 case SPEED_100:
6921 return 100;
6922 case SPEED_1000:
6923 return 1000;
6924 default:
6925 return 0;
6926 }
6927}
6928
6929static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6930 int link_speed)
6931{
6932 int rf_dec, rf_int;
6933 u32 bcnrc_val;
6934
6935 if (tx_rate != 0) {
6936 /* Calculate the rate factor values to set */
6937 rf_int = link_speed / tx_rate;
6938 rf_dec = (link_speed - (rf_int * tx_rate));
6939 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6940
6941 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6942 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6943 E1000_RTTBCNRC_RF_INT_MASK);
6944 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6945 } else {
6946 bcnrc_val = 0;
6947 }
6948
6949 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6950 wr32(E1000_RTTBCNRC, bcnrc_val);
6951}
6952
6953static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6954{
6955 int actual_link_speed, i;
6956 bool reset_rate = false;
6957
6958 /* VF TX rate limit was not set or not supported */
6959 if ((adapter->vf_rate_link_speed == 0) ||
6960 (adapter->hw.mac.type != e1000_82576))
6961 return;
6962
6963 actual_link_speed = igb_link_mbps(adapter->link_speed);
6964 if (actual_link_speed != adapter->vf_rate_link_speed) {
6965 reset_rate = true;
6966 adapter->vf_rate_link_speed = 0;
6967 dev_info(&adapter->pdev->dev,
6968 "Link speed has been changed. VF Transmit "
6969 "rate is disabled\n");
6970 }
6971
6972 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6973 if (reset_rate)
6974 adapter->vf_data[i].tx_rate = 0;
6975
6976 igb_set_vf_rate_limit(&adapter->hw, i,
6977 adapter->vf_data[i].tx_rate,
6978 actual_link_speed);
6979 }
6980}
6981
Williams, Mitch A8151d292010-02-10 01:44:24 +00006982static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6983{
Lior Levy17dc5662011-02-08 02:28:46 +00006984 struct igb_adapter *adapter = netdev_priv(netdev);
6985 struct e1000_hw *hw = &adapter->hw;
6986 int actual_link_speed;
6987
6988 if (hw->mac.type != e1000_82576)
6989 return -EOPNOTSUPP;
6990
6991 actual_link_speed = igb_link_mbps(adapter->link_speed);
6992 if ((vf >= adapter->vfs_allocated_count) ||
6993 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6994 (tx_rate < 0) || (tx_rate > actual_link_speed))
6995 return -EINVAL;
6996
6997 adapter->vf_rate_link_speed = actual_link_speed;
6998 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6999 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7000
7001 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007002}
7003
7004static int igb_ndo_get_vf_config(struct net_device *netdev,
7005 int vf, struct ifla_vf_info *ivi)
7006{
7007 struct igb_adapter *adapter = netdev_priv(netdev);
7008 if (vf >= adapter->vfs_allocated_count)
7009 return -EINVAL;
7010 ivi->vf = vf;
7011 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007012 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007013 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7014 ivi->qos = adapter->vf_data[vf].pf_qos;
7015 return 0;
7016}
7017
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007018static void igb_vmm_control(struct igb_adapter *adapter)
7019{
7020 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007021 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007022
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007023 switch (hw->mac.type) {
7024 case e1000_82575:
7025 default:
7026 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007027 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007028 case e1000_82576:
7029 /* notify HW that the MAC is adding vlan tags */
7030 reg = rd32(E1000_DTXCTL);
7031 reg |= E1000_DTXCTL_VLAN_ADDED;
7032 wr32(E1000_DTXCTL, reg);
7033 case e1000_82580:
7034 /* enable replication vlan tag stripping */
7035 reg = rd32(E1000_RPLOLR);
7036 reg |= E1000_RPLOLR_STRVLAN;
7037 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007038 case e1000_i350:
7039 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007040 break;
7041 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007042
Alexander Duyckd4960302009-10-27 15:53:45 +00007043 if (adapter->vfs_allocated_count) {
7044 igb_vmdq_set_loopback_pf(hw, true);
7045 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007046 igb_vmdq_set_anti_spoofing_pf(hw, true,
7047 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007048 } else {
7049 igb_vmdq_set_loopback_pf(hw, false);
7050 igb_vmdq_set_replication_pf(hw, false);
7051 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007052}
7053
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007054static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7055{
7056 struct e1000_hw *hw = &adapter->hw;
7057 u32 dmac_thr;
7058 u16 hwm;
7059
7060 if (hw->mac.type > e1000_82580) {
7061 if (adapter->flags & IGB_FLAG_DMAC) {
7062 u32 reg;
7063
7064 /* force threshold to 0. */
7065 wr32(E1000_DMCTXTH, 0);
7066
7067 /*
Matthew Vicke8c626e2011-11-17 08:33:12 +00007068 * DMA Coalescing high water mark needs to be greater
7069 * than the Rx threshold. Set hwm to PBA - max frame
7070 * size in 16B units, capping it at PBA - 6KB.
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007071 */
Matthew Vicke8c626e2011-11-17 08:33:12 +00007072 hwm = 64 * pba - adapter->max_frame_size / 16;
7073 if (hwm < 64 * (pba - 6))
7074 hwm = 64 * (pba - 6);
7075 reg = rd32(E1000_FCRTC);
7076 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7077 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7078 & E1000_FCRTC_RTH_COAL_MASK);
7079 wr32(E1000_FCRTC, reg);
7080
7081 /*
7082 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7083 * frame size, capping it at PBA - 10KB.
7084 */
7085 dmac_thr = pba - adapter->max_frame_size / 512;
7086 if (dmac_thr < pba - 10)
7087 dmac_thr = pba - 10;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007088 reg = rd32(E1000_DMACR);
7089 reg &= ~E1000_DMACR_DMACTHR_MASK;
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007090 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7091 & E1000_DMACR_DMACTHR_MASK);
7092
7093 /* transition to L0x or L1 if available..*/
7094 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7095
7096 /* watchdog timer= +-1000 usec in 32usec intervals */
7097 reg |= (1000 >> 5);
7098 wr32(E1000_DMACR, reg);
7099
7100 /*
7101 * no lower threshold to disable
7102 * coalescing(smart fifb)-UTRESH=0
7103 */
7104 wr32(E1000_DMCRTRH, 0);
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007105
7106 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7107
7108 wr32(E1000_DMCTLX, reg);
7109
7110 /*
7111 * free space in tx packet buffer to wake from
7112 * DMA coal
7113 */
7114 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7115 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7116
7117 /*
7118 * make low power state decision controlled
7119 * by DMA coal
7120 */
7121 reg = rd32(E1000_PCIEMISC);
7122 reg &= ~E1000_PCIEMISC_LX_DECISION;
7123 wr32(E1000_PCIEMISC, reg);
7124 } /* endif adapter->dmac is not disabled */
7125 } else if (hw->mac.type == e1000_82580) {
7126 u32 reg = rd32(E1000_PCIEMISC);
7127 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7128 wr32(E1000_DMACR, 0);
7129 }
7130}
7131
Auke Kok9d5c8242008-01-24 02:22:38 -08007132/* igb_main.c */