blob: 8dc04e0e0a04f1ac69a8d16054b07720abaa8dce [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106bb2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000048#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080052#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070054#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070055#include <linux/dca.h>
56#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080057#include "igb.h"
58
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define MAJ 3
60#define MIN 0
61#define BUILD 6
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000063__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080064char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000068static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080069
Auke Kok9d5c8242008-01-24 02:22:38 -080070static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000074static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000111static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000114static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000124static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000133static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000137static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700140static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700142static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000143static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000148static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000164static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800165
Auke Kok9d5c8242008-01-24 02:22:38 -0800166#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000167static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static int igb_resume(struct pci_dev *);
169#endif
170static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700171#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700172static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
173static struct notifier_block dca_notifier = {
174 .notifier_call = igb_notify_dca,
175 .next = NULL,
176 .priority = 0
177};
178#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void igb_netpoll(struct net_device *);
182#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800183#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000184static unsigned int max_vfs = 0;
185module_param(max_vfs, uint, 0);
186MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
187 "per physical function");
188#endif /* CONFIG_PCI_IOV */
189
Auke Kok9d5c8242008-01-24 02:22:38 -0800190static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
191 pci_channel_state_t);
192static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
193static void igb_io_resume(struct pci_dev *);
194
195static struct pci_error_handlers igb_err_handler = {
196 .error_detected = igb_io_error_detected,
197 .slot_reset = igb_io_slot_reset,
198 .resume = igb_io_resume,
199};
200
201
202static struct pci_driver igb_driver = {
203 .name = igb_driver_name,
204 .id_table = igb_pci_tbl,
205 .probe = igb_probe,
206 .remove = __devexit_p(igb_remove),
207#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300208 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800209 .suspend = igb_suspend,
210 .resume = igb_resume,
211#endif
212 .shutdown = igb_shutdown,
213 .err_handler = &igb_err_handler
214};
215
216MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
217MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
218MODULE_LICENSE("GPL");
219MODULE_VERSION(DRV_VERSION);
220
Taku Izumic97ec422010-04-27 14:39:30 +0000221struct igb_reg_info {
222 u32 ofs;
223 char *name;
224};
225
226static const struct igb_reg_info igb_reg_info_tbl[] = {
227
228 /* General Registers */
229 {E1000_CTRL, "CTRL"},
230 {E1000_STATUS, "STATUS"},
231 {E1000_CTRL_EXT, "CTRL_EXT"},
232
233 /* Interrupt Registers */
234 {E1000_ICR, "ICR"},
235
236 /* RX Registers */
237 {E1000_RCTL, "RCTL"},
238 {E1000_RDLEN(0), "RDLEN"},
239 {E1000_RDH(0), "RDH"},
240 {E1000_RDT(0), "RDT"},
241 {E1000_RXDCTL(0), "RXDCTL"},
242 {E1000_RDBAL(0), "RDBAL"},
243 {E1000_RDBAH(0), "RDBAH"},
244
245 /* TX Registers */
246 {E1000_TCTL, "TCTL"},
247 {E1000_TDBAL(0), "TDBAL"},
248 {E1000_TDBAH(0), "TDBAH"},
249 {E1000_TDLEN(0), "TDLEN"},
250 {E1000_TDH(0), "TDH"},
251 {E1000_TDT(0), "TDT"},
252 {E1000_TXDCTL(0), "TXDCTL"},
253 {E1000_TDFH, "TDFH"},
254 {E1000_TDFT, "TDFT"},
255 {E1000_TDFHS, "TDFHS"},
256 {E1000_TDFPC, "TDFPC"},
257
258 /* List Terminator */
259 {}
260};
261
262/*
263 * igb_regdump - register printout routine
264 */
265static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
266{
267 int n = 0;
268 char rname[16];
269 u32 regs[8];
270
271 switch (reginfo->ofs) {
272 case E1000_RDLEN(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDLEN(n));
275 break;
276 case E1000_RDH(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RDH(n));
279 break;
280 case E1000_RDT(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDT(n));
283 break;
284 case E1000_RXDCTL(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RXDCTL(n));
287 break;
288 case E1000_RDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_RDBAL(n));
291 break;
292 case E1000_RDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDBAH(n));
295 break;
296 case E1000_TDBAL(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDBAL(n));
299 break;
300 case E1000_TDBAH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDBAH(n));
303 break;
304 case E1000_TDLEN(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDLEN(n));
307 break;
308 case E1000_TDH(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TDH(n));
311 break;
312 case E1000_TDT(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_TDT(n));
315 break;
316 case E1000_TXDCTL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_TXDCTL(n));
319 break;
320 default:
321 printk(KERN_INFO "%-15s %08x\n",
322 reginfo->name, rd32(reginfo->ofs));
323 return;
324 }
325
326 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
327 printk(KERN_INFO "%-15s ", rname);
328 for (n = 0; n < 4; n++)
329 printk(KERN_CONT "%08x ", regs[n]);
330 printk(KERN_CONT "\n");
331}
332
333/*
334 * igb_dump - Print registers, tx-rings and rx-rings
335 */
336static void igb_dump(struct igb_adapter *adapter)
337{
338 struct net_device *netdev = adapter->netdev;
339 struct e1000_hw *hw = &adapter->hw;
340 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000341 struct igb_ring *tx_ring;
342 union e1000_adv_tx_desc *tx_desc;
343 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000344 struct igb_ring *rx_ring;
345 union e1000_adv_rx_desc *rx_desc;
346 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000347 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000348
349 if (!netif_msg_hw(adapter))
350 return;
351
352 /* Print netdevice Info */
353 if (netdev) {
354 dev_info(&adapter->pdev->dev, "Net device Info\n");
355 printk(KERN_INFO "Device Name state "
356 "trans_start last_rx\n");
357 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
358 netdev->name,
359 netdev->state,
360 netdev->trans_start,
361 netdev->last_rx);
362 }
363
364 /* Print Registers */
365 dev_info(&adapter->pdev->dev, "Register Dump\n");
366 printk(KERN_INFO " Register Name Value\n");
367 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
368 reginfo->name; reginfo++) {
369 igb_regdump(hw, reginfo);
370 }
371
372 /* Print TX Ring Summary */
373 if (!netdev || !netif_running(netdev))
374 goto exit;
375
376 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
377 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
378 " leng ntw timestamp\n");
379 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000380 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000381 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000382 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyck8542db02011-08-26 07:44:43 +0000383 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumic97ec422010-04-27 14:39:30 +0000384 n, tx_ring->next_to_use, tx_ring->next_to_clean,
385 (u64)buffer_info->dma,
386 buffer_info->length,
387 buffer_info->next_to_watch,
388 (u64)buffer_info->time_stamp);
389 }
390
391 /* Print TX Rings */
392 if (!netif_msg_tx_done(adapter))
393 goto rx_ring_summary;
394
395 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
396
397 /* Transmit Descriptor Formats
398 *
399 * Advanced Transmit Descriptor
400 * +--------------------------------------------------------------+
401 * 0 | Buffer Address [63:0] |
402 * +--------------------------------------------------------------+
403 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
404 * +--------------------------------------------------------------+
405 * 63 46 45 40 39 38 36 35 32 31 24 15 0
406 */
407
408 for (n = 0; n < adapter->num_tx_queues; n++) {
409 tx_ring = adapter->tx_ring[n];
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
412 printk(KERN_INFO "------------------------------------\n");
413 printk(KERN_INFO "T [desc] [address 63:0 ] "
414 "[PlPOCIStDDM Ln] [bi->dma ] "
415 "leng ntw timestamp bi->skb\n");
416
417 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000418 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000419 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000420 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000421 u0 = (struct my_u0 *)tx_desc;
422 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
Alexander Duyck8542db02011-08-26 07:44:43 +0000423 " %04X %p %016llX %p", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000424 le64_to_cpu(u0->a),
425 le64_to_cpu(u0->b),
426 (u64)buffer_info->dma,
427 buffer_info->length,
428 buffer_info->next_to_watch,
429 (u64)buffer_info->time_stamp,
430 buffer_info->skb);
431 if (i == tx_ring->next_to_use &&
432 i == tx_ring->next_to_clean)
433 printk(KERN_CONT " NTC/U\n");
434 else if (i == tx_ring->next_to_use)
435 printk(KERN_CONT " NTU\n");
436 else if (i == tx_ring->next_to_clean)
437 printk(KERN_CONT " NTC\n");
438 else
439 printk(KERN_CONT "\n");
440
441 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
442 print_hex_dump(KERN_INFO, "",
443 DUMP_PREFIX_ADDRESS,
444 16, 1, phys_to_virt(buffer_info->dma),
445 buffer_info->length, true);
446 }
447 }
448
449 /* Print RX Rings Summary */
450rx_ring_summary:
451 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
452 printk(KERN_INFO "Queue [NTU] [NTC]\n");
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO " %5d %5X %5X\n", n,
456 rx_ring->next_to_use, rx_ring->next_to_clean);
457 }
458
459 /* Print RX Rings */
460 if (!netif_msg_rx_status(adapter))
461 goto exit;
462
463 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
464
465 /* Advanced Receive Descriptor (Read) Format
466 * 63 1 0
467 * +-----------------------------------------------------+
468 * 0 | Packet Buffer Address [63:1] |A0/NSE|
469 * +----------------------------------------------+------+
470 * 8 | Header Buffer Address [63:1] | DD |
471 * +-----------------------------------------------------+
472 *
473 *
474 * Advanced Receive Descriptor (Write-Back) Format
475 *
476 * 63 48 47 32 31 30 21 20 17 16 4 3 0
477 * +------------------------------------------------------+
478 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
479 * | Checksum Ident | | | | Type | Type |
480 * +------------------------------------------------------+
481 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
482 * +------------------------------------------------------+
483 * 63 48 47 32 31 20 19 0
484 */
485
486 for (n = 0; n < adapter->num_rx_queues; n++) {
487 rx_ring = adapter->rx_ring[n];
488 printk(KERN_INFO "------------------------------------\n");
489 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
490 printk(KERN_INFO "------------------------------------\n");
491 printk(KERN_INFO "R [desc] [ PktBuf A0] "
492 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
493 "<-- Adv Rx Read format\n");
494 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
495 "[vl er S cks ln] ---------------- [bi->skb] "
496 "<-- Adv Rx Write-Back format\n");
497
498 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000499 struct igb_rx_buffer *buffer_info;
500 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000501 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000502 u0 = (struct my_u0 *)rx_desc;
503 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
504 if (staterr & E1000_RXD_STAT_DD) {
505 /* Descriptor Done */
506 printk(KERN_INFO "RWB[0x%03X] %016llX "
507 "%016llX ---------------- %p", i,
508 le64_to_cpu(u0->a),
509 le64_to_cpu(u0->b),
510 buffer_info->skb);
511 } else {
512 printk(KERN_INFO "R [0x%03X] %016llX "
513 "%016llX %016llX %p", i,
514 le64_to_cpu(u0->a),
515 le64_to_cpu(u0->b),
516 (u64)buffer_info->dma,
517 buffer_info->skb);
518
519 if (netif_msg_pktdata(adapter)) {
520 print_hex_dump(KERN_INFO, "",
521 DUMP_PREFIX_ADDRESS,
522 16, 1,
523 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000524 IGB_RX_HDR_LEN, true);
525 print_hex_dump(KERN_INFO, "",
526 DUMP_PREFIX_ADDRESS,
527 16, 1,
528 phys_to_virt(
529 buffer_info->page_dma +
530 buffer_info->page_offset),
531 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000532 }
533 }
534
535 if (i == rx_ring->next_to_use)
536 printk(KERN_CONT " NTU\n");
537 else if (i == rx_ring->next_to_clean)
538 printk(KERN_CONT " NTC\n");
539 else
540 printk(KERN_CONT "\n");
541
542 }
543 }
544
545exit:
546 return;
547}
548
549
Patrick Ohly38c845c2009-02-12 05:03:41 +0000550/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000551 * igb_read_clock - read raw cycle counter (to be used by time counter)
552 */
553static cycle_t igb_read_clock(const struct cyclecounter *tc)
554{
555 struct igb_adapter *adapter =
556 container_of(tc, struct igb_adapter, cycles);
557 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000558 u64 stamp = 0;
559 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000560
Alexander Duyck55cac242009-11-19 12:42:21 +0000561 /*
562 * The timestamp latches on lowest register read. For the 82580
563 * the lowest register is SYSTIMR instead of SYSTIML. However we never
564 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
565 */
566 if (hw->mac.type == e1000_82580) {
567 stamp = rd32(E1000_SYSTIMR) >> 8;
568 shift = IGB_82580_TSYNC_SHIFT;
569 }
570
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000571 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
572 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000573 return stamp;
574}
575
Auke Kok9d5c8242008-01-24 02:22:38 -0800576/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000577 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800578 * used by hardware layer to print debugging information
579 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000580struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800581{
582 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000583 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800584}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000585
586/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 * igb_init_module - Driver Registration Routine
588 *
589 * igb_init_module is the first routine called when the driver is
590 * loaded. All it does is register with the PCI subsystem.
591 **/
592static int __init igb_init_module(void)
593{
594 int ret;
595 printk(KERN_INFO "%s - version %s\n",
596 igb_driver_string, igb_driver_version);
597
598 printk(KERN_INFO "%s\n", igb_copyright);
599
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700600#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700601 dca_register_notify(&dca_notifier);
602#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800603 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800604 return ret;
605}
606
607module_init(igb_init_module);
608
609/**
610 * igb_exit_module - Driver Exit Cleanup Routine
611 *
612 * igb_exit_module is called just before the driver is removed
613 * from memory.
614 **/
615static void __exit igb_exit_module(void)
616{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700617#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700618 dca_unregister_notify(&dca_notifier);
619#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800620 pci_unregister_driver(&igb_driver);
621}
622
623module_exit(igb_exit_module);
624
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800625#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
626/**
627 * igb_cache_ring_register - Descriptor ring to register mapping
628 * @adapter: board private structure to initialize
629 *
630 * Once we know the feature-set enabled for the device, we'll cache
631 * the register offset the descriptor ring is assigned to.
632 **/
633static void igb_cache_ring_register(struct igb_adapter *adapter)
634{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000635 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000636 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800637
638 switch (adapter->hw.mac.type) {
639 case e1000_82576:
640 /* The queues are allocated for virtualization such that VF 0
641 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
642 * In order to avoid collision we start at the first free queue
643 * and continue consuming queues in the same sequence
644 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000646 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000647 adapter->rx_ring[i]->reg_idx = rbase_offset +
648 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000649 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800650 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000651 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000652 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000656 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000657 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800658 break;
659 }
660}
661
Alexander Duyck047e0032009-10-27 15:49:27 +0000662static void igb_free_queues(struct igb_adapter *adapter)
663{
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000665
Alexander Duyck3025a442010-02-17 01:02:39 +0000666 for (i = 0; i < adapter->num_tx_queues; i++) {
667 kfree(adapter->tx_ring[i]);
668 adapter->tx_ring[i] = NULL;
669 }
670 for (i = 0; i < adapter->num_rx_queues; i++) {
671 kfree(adapter->rx_ring[i]);
672 adapter->rx_ring[i] = NULL;
673 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000674 adapter->num_rx_queues = 0;
675 adapter->num_tx_queues = 0;
676}
677
Auke Kok9d5c8242008-01-24 02:22:38 -0800678/**
679 * igb_alloc_queues - Allocate memory for all rings
680 * @adapter: board private structure to initialize
681 *
682 * We allocate one ring per queue at run-time since we don't know the
683 * number of queues at compile-time.
684 **/
685static int igb_alloc_queues(struct igb_adapter *adapter)
686{
Alexander Duyck3025a442010-02-17 01:02:39 +0000687 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800688 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000689 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800690
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700691 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000692 if (orig_node == -1) {
693 int cur_node = next_online_node(adapter->node);
694 if (cur_node == MAX_NUMNODES)
695 cur_node = first_online_node;
696 adapter->node = cur_node;
697 }
698 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
699 adapter->node);
700 if (!ring)
701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000702 if (!ring)
703 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800704 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700705 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000706 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000707 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000708 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000709 /* For 82575, context index must be unique per ring. */
710 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000711 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000712 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700713 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000714 /* Restore the adapter's original node */
715 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000716
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000718 if (orig_node == -1) {
719 int cur_node = next_online_node(adapter->node);
720 if (cur_node == MAX_NUMNODES)
721 cur_node = first_online_node;
722 adapter->node = cur_node;
723 }
724 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
725 adapter->node);
726 if (!ring)
727 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000728 if (!ring)
729 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800730 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700731 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000732 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000733 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000734 ring->numa_node = adapter->node;
Alexander Duyck866cff02011-08-26 07:45:36 +0000735 /* enable rx checksum */
736 set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000737 /* set flag indicating ring supports SCTP checksum offload */
738 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000739 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000740 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800741 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000742 /* Restore the adapter's original node */
743 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800744
745 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000746
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800748
Alexander Duyck047e0032009-10-27 15:49:27 +0000749err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000750 /* Restore the adapter's original node */
751 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000752 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700753
Alexander Duyck047e0032009-10-27 15:49:27 +0000754 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700755}
756
Auke Kok9d5c8242008-01-24 02:22:38 -0800757#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000758static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800759{
760 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000761 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800762 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700763 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000764 int rx_queue = IGB_N0_QUEUE;
765 int tx_queue = IGB_N0_QUEUE;
766
Alexander Duyck0ba82992011-08-26 07:45:47 +0000767 if (q_vector->rx.ring)
768 rx_queue = q_vector->rx.ring->reg_idx;
769 if (q_vector->tx.ring)
770 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700771
772 switch (hw->mac.type) {
773 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800774 /* The 82575 assigns vectors using a bitmask, which matches the
775 bitmask for the EICR/EIMS/EIMC registers. To assign one
776 or more queues to a vector, we write the appropriate bits
777 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000778 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800779 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000780 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800781 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000782 if (!adapter->msix_entries && msix_vector == 0)
783 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800784 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000785 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700786 break;
787 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800788 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700789 Each queue has a single entry in the table to which we write
790 a vector number along with a "valid" bit. Sadly, the layout
791 of the table is somewhat counterintuitive. */
792 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000793 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700794 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000795 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800796 /* vector goes into low byte of register */
797 ivar = ivar & 0xFFFFFF00;
798 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000799 } else {
800 /* vector goes into third byte of register */
801 ivar = ivar & 0xFF00FFFF;
802 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700803 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700804 array_wr32(E1000_IVAR0, index, ivar);
805 }
806 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000807 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700808 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000809 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800810 /* vector goes into second byte of register */
811 ivar = ivar & 0xFFFF00FF;
812 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000813 } else {
814 /* vector goes into high byte of register */
815 ivar = ivar & 0x00FFFFFF;
816 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700817 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700818 array_wr32(E1000_IVAR0, index, ivar);
819 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000820 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700821 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000822 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000823 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +0000824 /* 82580 uses the same table-based approach as 82576 but has fewer
825 entries as a result we carry over for queues greater than 4. */
826 if (rx_queue > IGB_N0_QUEUE) {
827 index = (rx_queue >> 1);
828 ivar = array_rd32(E1000_IVAR0, index);
829 if (rx_queue & 0x1) {
830 /* vector goes into third byte of register */
831 ivar = ivar & 0xFF00FFFF;
832 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
833 } else {
834 /* vector goes into low byte of register */
835 ivar = ivar & 0xFFFFFF00;
836 ivar |= msix_vector | E1000_IVAR_VALID;
837 }
838 array_wr32(E1000_IVAR0, index, ivar);
839 }
840 if (tx_queue > IGB_N0_QUEUE) {
841 index = (tx_queue >> 1);
842 ivar = array_rd32(E1000_IVAR0, index);
843 if (tx_queue & 0x1) {
844 /* vector goes into high byte of register */
845 ivar = ivar & 0x00FFFFFF;
846 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
847 } else {
848 /* vector goes into second byte of register */
849 ivar = ivar & 0xFFFF00FF;
850 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
851 }
852 array_wr32(E1000_IVAR0, index, ivar);
853 }
854 q_vector->eims_value = 1 << msix_vector;
855 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700856 default:
857 BUG();
858 break;
859 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000860
861 /* add q_vector eims value to global eims_enable_mask */
862 adapter->eims_enable_mask |= q_vector->eims_value;
863
864 /* configure q_vector to set itr on first interrupt */
865 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800866}
867
868/**
869 * igb_configure_msix - Configure MSI-X hardware
870 *
871 * igb_configure_msix sets up the hardware to properly
872 * generate MSI-X interrupts.
873 **/
874static void igb_configure_msix(struct igb_adapter *adapter)
875{
876 u32 tmp;
877 int i, vector = 0;
878 struct e1000_hw *hw = &adapter->hw;
879
880 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800881
882 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700883 switch (hw->mac.type) {
884 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800885 tmp = rd32(E1000_CTRL_EXT);
886 /* enable MSI-X PBA support*/
887 tmp |= E1000_CTRL_EXT_PBA_CLR;
888
889 /* Auto-Mask interrupts upon ICR read. */
890 tmp |= E1000_CTRL_EXT_EIAME;
891 tmp |= E1000_CTRL_EXT_IRCA;
892
893 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000894
895 /* enable msix_other interrupt */
896 array_wr32(E1000_MSIXBM(0), vector++,
897 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700898 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800899
Alexander Duyck2d064c02008-07-08 15:10:12 -0700900 break;
901
902 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000903 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000904 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000905 /* Turn on MSI-X capability first, or our settings
906 * won't stick. And it will take days to debug. */
907 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
908 E1000_GPIE_PBA | E1000_GPIE_EIAME |
909 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700910
Alexander Duyck047e0032009-10-27 15:49:27 +0000911 /* enable msix_other interrupt */
912 adapter->eims_other = 1 << vector;
913 tmp = (vector++ | E1000_IVAR_VALID) << 8;
914
915 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700916 break;
917 default:
918 /* do nothing, since nothing else supports MSI-X */
919 break;
920 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000921
922 adapter->eims_enable_mask |= adapter->eims_other;
923
Alexander Duyck26b39272010-02-17 01:00:41 +0000924 for (i = 0; i < adapter->num_q_vectors; i++)
925 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000926
Auke Kok9d5c8242008-01-24 02:22:38 -0800927 wrfl();
928}
929
930/**
931 * igb_request_msix - Initialize MSI-X interrupts
932 *
933 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
934 * kernel.
935 **/
936static int igb_request_msix(struct igb_adapter *adapter)
937{
938 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000939 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800940 int i, err = 0, vector = 0;
941
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800943 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 if (err)
945 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000946 vector++;
947
948 for (i = 0; i < adapter->num_q_vectors; i++) {
949 struct igb_q_vector *q_vector = adapter->q_vector[i];
950
951 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
952
Alexander Duyck0ba82992011-08-26 07:45:47 +0000953 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000954 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000955 q_vector->rx.ring->queue_index);
956 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000958 q_vector->tx.ring->queue_index);
959 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000960 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000961 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000962 else
963 sprintf(q_vector->name, "%s-unused", netdev->name);
964
965 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800966 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000967 q_vector);
968 if (err)
969 goto out;
970 vector++;
971 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800972
Auke Kok9d5c8242008-01-24 02:22:38 -0800973 igb_configure_msix(adapter);
974 return 0;
975out:
976 return err;
977}
978
979static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
980{
981 if (adapter->msix_entries) {
982 pci_disable_msix(adapter->pdev);
983 kfree(adapter->msix_entries);
984 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000985 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800986 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000987 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800988}
989
Alexander Duyck047e0032009-10-27 15:49:27 +0000990/**
991 * igb_free_q_vectors - Free memory allocated for interrupt vectors
992 * @adapter: board private structure to initialize
993 *
994 * This function frees the memory allocated to the q_vectors. In addition if
995 * NAPI is enabled it will delete any references to the NAPI struct prior
996 * to freeing the q_vector.
997 **/
998static void igb_free_q_vectors(struct igb_adapter *adapter)
999{
1000 int v_idx;
1001
1002 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1003 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1004 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001005 if (!q_vector)
1006 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001007 netif_napi_del(&q_vector->napi);
1008 kfree(q_vector);
1009 }
1010 adapter->num_q_vectors = 0;
1011}
1012
1013/**
1014 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1015 *
1016 * This function resets the device so that it has 0 rx queues, tx queues, and
1017 * MSI-X interrupts allocated.
1018 */
1019static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1020{
1021 igb_free_queues(adapter);
1022 igb_free_q_vectors(adapter);
1023 igb_reset_interrupt_capability(adapter);
1024}
Auke Kok9d5c8242008-01-24 02:22:38 -08001025
1026/**
1027 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1028 *
1029 * Attempt to configure interrupts using the best available
1030 * capabilities of the hardware and kernel.
1031 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001032static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001033{
1034 int err;
1035 int numvecs, i;
1036
Alexander Duyck83b71802009-02-06 23:15:45 +00001037 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001038 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001039 if (adapter->vfs_allocated_count)
1040 adapter->num_tx_queues = 1;
1041 else
1042 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001043
Alexander Duyck047e0032009-10-27 15:49:27 +00001044 /* start with one vector for every rx queue */
1045 numvecs = adapter->num_rx_queues;
1046
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001047 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1049 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001050
1051 /* store the number of vectors reserved for queues */
1052 adapter->num_q_vectors = numvecs;
1053
1054 /* add 1 vector for link status interrupts */
1055 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001056 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1057 GFP_KERNEL);
1058 if (!adapter->msix_entries)
1059 goto msi_only;
1060
1061 for (i = 0; i < numvecs; i++)
1062 adapter->msix_entries[i].entry = i;
1063
1064 err = pci_enable_msix(adapter->pdev,
1065 adapter->msix_entries,
1066 numvecs);
1067 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001068 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001069
1070 igb_reset_interrupt_capability(adapter);
1071
1072 /* If we can't do MSI-X, try MSI */
1073msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001074#ifdef CONFIG_PCI_IOV
1075 /* disable SR-IOV for non MSI-X configurations */
1076 if (adapter->vf_data) {
1077 struct e1000_hw *hw = &adapter->hw;
1078 /* disable iov and allow time for transactions to clear */
1079 pci_disable_sriov(adapter->pdev);
1080 msleep(500);
1081
1082 kfree(adapter->vf_data);
1083 adapter->vf_data = NULL;
1084 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001085 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001086 msleep(100);
1087 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1088 }
1089#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001090 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001091 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001092 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001093 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001094 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001095 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001096 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001097 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001098out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001099 /* Notify the stack of the (possibly) reduced queue counts. */
1100 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1101 return netif_set_real_num_rx_queues(adapter->netdev,
1102 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001103}
1104
1105/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001106 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1107 * @adapter: board private structure to initialize
1108 *
1109 * We allocate one q_vector per queue interrupt. If allocation fails we
1110 * return -ENOMEM.
1111 **/
1112static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1113{
1114 struct igb_q_vector *q_vector;
1115 struct e1000_hw *hw = &adapter->hw;
1116 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001117 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001118
1119 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001120 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1121 adapter->num_tx_queues)) &&
1122 (adapter->num_rx_queues == v_idx))
1123 adapter->node = orig_node;
1124 if (orig_node == -1) {
1125 int cur_node = next_online_node(adapter->node);
1126 if (cur_node == MAX_NUMNODES)
1127 cur_node = first_online_node;
1128 adapter->node = cur_node;
1129 }
1130 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1131 adapter->node);
1132 if (!q_vector)
1133 q_vector = kzalloc(sizeof(struct igb_q_vector),
1134 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001135 if (!q_vector)
1136 goto err_out;
1137 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001138 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1139 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001140 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1141 adapter->q_vector[v_idx] = q_vector;
1142 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001143 /* Restore the adapter's original node */
1144 adapter->node = orig_node;
1145
Alexander Duyck047e0032009-10-27 15:49:27 +00001146 return 0;
1147
1148err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001149 /* Restore the adapter's original node */
1150 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001151 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001152 return -ENOMEM;
1153}
1154
1155static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1156 int ring_idx, int v_idx)
1157{
Alexander Duyck3025a442010-02-17 01:02:39 +00001158 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001159
Alexander Duyck0ba82992011-08-26 07:45:47 +00001160 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1161 q_vector->rx.ring->q_vector = q_vector;
1162 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001163 q_vector->itr_val = adapter->rx_itr_setting;
1164 if (q_vector->itr_val && q_vector->itr_val <= 3)
1165 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001166}
1167
1168static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1169 int ring_idx, int v_idx)
1170{
Alexander Duyck3025a442010-02-17 01:02:39 +00001171 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001172
Alexander Duyck0ba82992011-08-26 07:45:47 +00001173 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1174 q_vector->tx.ring->q_vector = q_vector;
1175 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001176 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001177 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001178 if (q_vector->itr_val && q_vector->itr_val <= 3)
1179 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001180}
1181
1182/**
1183 * igb_map_ring_to_vector - maps allocated queues to vectors
1184 *
1185 * This function maps the recently allocated queues to vectors.
1186 **/
1187static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1188{
1189 int i;
1190 int v_idx = 0;
1191
1192 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1193 (adapter->num_q_vectors < adapter->num_tx_queues))
1194 return -ENOMEM;
1195
1196 if (adapter->num_q_vectors >=
1197 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1198 for (i = 0; i < adapter->num_rx_queues; i++)
1199 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1200 for (i = 0; i < adapter->num_tx_queues; i++)
1201 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1202 } else {
1203 for (i = 0; i < adapter->num_rx_queues; i++) {
1204 if (i < adapter->num_tx_queues)
1205 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1206 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1207 }
1208 for (; i < adapter->num_tx_queues; i++)
1209 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1210 }
1211 return 0;
1212}
1213
1214/**
1215 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1216 *
1217 * This function initializes the interrupts and allocates all of the queues.
1218 **/
1219static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1220{
1221 struct pci_dev *pdev = adapter->pdev;
1222 int err;
1223
Ben Hutchings21adef32010-09-27 08:28:39 +00001224 err = igb_set_interrupt_capability(adapter);
1225 if (err)
1226 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001227
1228 err = igb_alloc_q_vectors(adapter);
1229 if (err) {
1230 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1231 goto err_alloc_q_vectors;
1232 }
1233
1234 err = igb_alloc_queues(adapter);
1235 if (err) {
1236 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1237 goto err_alloc_queues;
1238 }
1239
1240 err = igb_map_ring_to_vector(adapter);
1241 if (err) {
1242 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1243 goto err_map_queues;
1244 }
1245
1246
1247 return 0;
1248err_map_queues:
1249 igb_free_queues(adapter);
1250err_alloc_queues:
1251 igb_free_q_vectors(adapter);
1252err_alloc_q_vectors:
1253 igb_reset_interrupt_capability(adapter);
1254 return err;
1255}
1256
1257/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001258 * igb_request_irq - initialize interrupts
1259 *
1260 * Attempts to configure interrupts using the best available
1261 * capabilities of the hardware and kernel.
1262 **/
1263static int igb_request_irq(struct igb_adapter *adapter)
1264{
1265 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001266 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001267 int err = 0;
1268
1269 if (adapter->msix_entries) {
1270 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001271 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001272 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001273 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001274 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001275 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001276 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001277 igb_free_all_tx_resources(adapter);
1278 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001279 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001280 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001281 adapter->num_q_vectors = 1;
1282 err = igb_alloc_q_vectors(adapter);
1283 if (err) {
1284 dev_err(&pdev->dev,
1285 "Unable to allocate memory for vectors\n");
1286 goto request_done;
1287 }
1288 err = igb_alloc_queues(adapter);
1289 if (err) {
1290 dev_err(&pdev->dev,
1291 "Unable to allocate memory for queues\n");
1292 igb_free_q_vectors(adapter);
1293 goto request_done;
1294 }
1295 igb_setup_all_tx_resources(adapter);
1296 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001297 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001298 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001299 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001300
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001301 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001302 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001303 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001304 if (!err)
1305 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001306
Auke Kok9d5c8242008-01-24 02:22:38 -08001307 /* fall back to legacy interrupts */
1308 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001309 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001310 }
1311
Joe Perchesa0607fd2009-11-18 23:29:17 -08001312 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001313 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001314
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001315 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001316 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1317 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001318
1319request_done:
1320 return err;
1321}
1322
1323static void igb_free_irq(struct igb_adapter *adapter)
1324{
Auke Kok9d5c8242008-01-24 02:22:38 -08001325 if (adapter->msix_entries) {
1326 int vector = 0, i;
1327
Alexander Duyck047e0032009-10-27 15:49:27 +00001328 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001329
Alexander Duyck047e0032009-10-27 15:49:27 +00001330 for (i = 0; i < adapter->num_q_vectors; i++) {
1331 struct igb_q_vector *q_vector = adapter->q_vector[i];
1332 free_irq(adapter->msix_entries[vector++].vector,
1333 q_vector);
1334 }
1335 } else {
1336 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001337 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001338}
1339
1340/**
1341 * igb_irq_disable - Mask off interrupt generation on the NIC
1342 * @adapter: board private structure
1343 **/
1344static void igb_irq_disable(struct igb_adapter *adapter)
1345{
1346 struct e1000_hw *hw = &adapter->hw;
1347
Alexander Duyck25568a52009-10-27 23:49:59 +00001348 /*
1349 * we need to be careful when disabling interrupts. The VFs are also
1350 * mapped into these registers and so clearing the bits can cause
1351 * issues on the VF drivers so we only need to clear what we set
1352 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001353 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001354 u32 regval = rd32(E1000_EIAM);
1355 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1356 wr32(E1000_EIMC, adapter->eims_enable_mask);
1357 regval = rd32(E1000_EIAC);
1358 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001359 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001360
1361 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001362 wr32(E1000_IMC, ~0);
1363 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001364 if (adapter->msix_entries) {
1365 int i;
1366 for (i = 0; i < adapter->num_q_vectors; i++)
1367 synchronize_irq(adapter->msix_entries[i].vector);
1368 } else {
1369 synchronize_irq(adapter->pdev->irq);
1370 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001371}
1372
1373/**
1374 * igb_irq_enable - Enable default interrupt generation settings
1375 * @adapter: board private structure
1376 **/
1377static void igb_irq_enable(struct igb_adapter *adapter)
1378{
1379 struct e1000_hw *hw = &adapter->hw;
1380
1381 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001382 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001383 u32 regval = rd32(E1000_EIAC);
1384 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1385 regval = rd32(E1000_EIAM);
1386 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001387 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001388 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001389 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001390 ims |= E1000_IMS_VMMB;
1391 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001392 if (adapter->hw.mac.type == e1000_82580)
1393 ims |= E1000_IMS_DRSTA;
1394
Alexander Duyck25568a52009-10-27 23:49:59 +00001395 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001396 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001397 wr32(E1000_IMS, IMS_ENABLE_MASK |
1398 E1000_IMS_DRSTA);
1399 wr32(E1000_IAM, IMS_ENABLE_MASK |
1400 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001401 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001402}
1403
1404static void igb_update_mng_vlan(struct igb_adapter *adapter)
1405{
Alexander Duyck51466232009-10-27 23:47:35 +00001406 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001407 u16 vid = adapter->hw.mng_cookie.vlan_id;
1408 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001409
Alexander Duyck51466232009-10-27 23:47:35 +00001410 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1411 /* add VID to filter table */
1412 igb_vfta_set(hw, vid, true);
1413 adapter->mng_vlan_id = vid;
1414 } else {
1415 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1416 }
1417
1418 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1419 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001420 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001421 /* remove VID from filter table */
1422 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001423 }
1424}
1425
1426/**
1427 * igb_release_hw_control - release control of the h/w to f/w
1428 * @adapter: address of board private structure
1429 *
1430 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1431 * For ASF and Pass Through versions of f/w this means that the
1432 * driver is no longer loaded.
1433 *
1434 **/
1435static void igb_release_hw_control(struct igb_adapter *adapter)
1436{
1437 struct e1000_hw *hw = &adapter->hw;
1438 u32 ctrl_ext;
1439
1440 /* Let firmware take over control of h/w */
1441 ctrl_ext = rd32(E1000_CTRL_EXT);
1442 wr32(E1000_CTRL_EXT,
1443 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1444}
1445
Auke Kok9d5c8242008-01-24 02:22:38 -08001446/**
1447 * igb_get_hw_control - get control of the h/w from f/w
1448 * @adapter: address of board private structure
1449 *
1450 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1451 * For ASF and Pass Through versions of f/w this means that
1452 * the driver is loaded.
1453 *
1454 **/
1455static void igb_get_hw_control(struct igb_adapter *adapter)
1456{
1457 struct e1000_hw *hw = &adapter->hw;
1458 u32 ctrl_ext;
1459
1460 /* Let firmware know the driver has taken over */
1461 ctrl_ext = rd32(E1000_CTRL_EXT);
1462 wr32(E1000_CTRL_EXT,
1463 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1464}
1465
Auke Kok9d5c8242008-01-24 02:22:38 -08001466/**
1467 * igb_configure - configure the hardware for RX and TX
1468 * @adapter: private board structure
1469 **/
1470static void igb_configure(struct igb_adapter *adapter)
1471{
1472 struct net_device *netdev = adapter->netdev;
1473 int i;
1474
1475 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001476 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001477
1478 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001479
Alexander Duyck85b430b2009-10-27 15:50:29 +00001480 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001481 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001482 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001483
1484 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001485 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001486
1487 igb_rx_fifo_flush_82575(&adapter->hw);
1488
Alexander Duyckc493ea42009-03-20 00:16:50 +00001489 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001490 * at least 1 descriptor unused to make sure
1491 * next_to_use != next_to_clean */
1492 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001493 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001494 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001495 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001496}
1497
Nick Nunley88a268c2010-02-17 01:01:59 +00001498/**
1499 * igb_power_up_link - Power up the phy/serdes link
1500 * @adapter: address of board private structure
1501 **/
1502void igb_power_up_link(struct igb_adapter *adapter)
1503{
1504 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1505 igb_power_up_phy_copper(&adapter->hw);
1506 else
1507 igb_power_up_serdes_link_82575(&adapter->hw);
1508}
1509
1510/**
1511 * igb_power_down_link - Power down the phy/serdes link
1512 * @adapter: address of board private structure
1513 */
1514static void igb_power_down_link(struct igb_adapter *adapter)
1515{
1516 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1517 igb_power_down_phy_copper_82575(&adapter->hw);
1518 else
1519 igb_shutdown_serdes_link_82575(&adapter->hw);
1520}
Auke Kok9d5c8242008-01-24 02:22:38 -08001521
1522/**
1523 * igb_up - Open the interface and prepare it to handle traffic
1524 * @adapter: board private structure
1525 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001526int igb_up(struct igb_adapter *adapter)
1527{
1528 struct e1000_hw *hw = &adapter->hw;
1529 int i;
1530
1531 /* hardware has been reset, we need to reload some things */
1532 igb_configure(adapter);
1533
1534 clear_bit(__IGB_DOWN, &adapter->state);
1535
Alexander Duyck047e0032009-10-27 15:49:27 +00001536 for (i = 0; i < adapter->num_q_vectors; i++) {
1537 struct igb_q_vector *q_vector = adapter->q_vector[i];
1538 napi_enable(&q_vector->napi);
1539 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001540 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001541 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001542 else
1543 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001544
1545 /* Clear any pending interrupts. */
1546 rd32(E1000_ICR);
1547 igb_irq_enable(adapter);
1548
Alexander Duyckd4960302009-10-27 15:53:45 +00001549 /* notify VFs that reset has been completed */
1550 if (adapter->vfs_allocated_count) {
1551 u32 reg_data = rd32(E1000_CTRL_EXT);
1552 reg_data |= E1000_CTRL_EXT_PFRSTD;
1553 wr32(E1000_CTRL_EXT, reg_data);
1554 }
1555
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001556 netif_tx_start_all_queues(adapter->netdev);
1557
Alexander Duyck25568a52009-10-27 23:49:59 +00001558 /* start the watchdog. */
1559 hw->mac.get_link_status = 1;
1560 schedule_work(&adapter->watchdog_task);
1561
Auke Kok9d5c8242008-01-24 02:22:38 -08001562 return 0;
1563}
1564
1565void igb_down(struct igb_adapter *adapter)
1566{
Auke Kok9d5c8242008-01-24 02:22:38 -08001567 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001568 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001569 u32 tctl, rctl;
1570 int i;
1571
1572 /* signal that we're down so the interrupt handler does not
1573 * reschedule our watchdog timer */
1574 set_bit(__IGB_DOWN, &adapter->state);
1575
1576 /* disable receives in the hardware */
1577 rctl = rd32(E1000_RCTL);
1578 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1579 /* flush and sleep below */
1580
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001581 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001582
1583 /* disable transmits in the hardware */
1584 tctl = rd32(E1000_TCTL);
1585 tctl &= ~E1000_TCTL_EN;
1586 wr32(E1000_TCTL, tctl);
1587 /* flush both disables and wait for them to finish */
1588 wrfl();
1589 msleep(10);
1590
Alexander Duyck047e0032009-10-27 15:49:27 +00001591 for (i = 0; i < adapter->num_q_vectors; i++) {
1592 struct igb_q_vector *q_vector = adapter->q_vector[i];
1593 napi_disable(&q_vector->napi);
1594 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001595
Auke Kok9d5c8242008-01-24 02:22:38 -08001596 igb_irq_disable(adapter);
1597
1598 del_timer_sync(&adapter->watchdog_timer);
1599 del_timer_sync(&adapter->phy_info_timer);
1600
Auke Kok9d5c8242008-01-24 02:22:38 -08001601 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001602
1603 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001604 spin_lock(&adapter->stats64_lock);
1605 igb_update_stats(adapter, &adapter->stats64);
1606 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001607
Auke Kok9d5c8242008-01-24 02:22:38 -08001608 adapter->link_speed = 0;
1609 adapter->link_duplex = 0;
1610
Jeff Kirsher30236822008-06-24 17:01:15 -07001611 if (!pci_channel_offline(adapter->pdev))
1612 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001613 igb_clean_all_tx_rings(adapter);
1614 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001615#ifdef CONFIG_IGB_DCA
1616
1617 /* since we reset the hardware DCA settings were cleared */
1618 igb_setup_dca(adapter);
1619#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001620}
1621
1622void igb_reinit_locked(struct igb_adapter *adapter)
1623{
1624 WARN_ON(in_interrupt());
1625 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1626 msleep(1);
1627 igb_down(adapter);
1628 igb_up(adapter);
1629 clear_bit(__IGB_RESETTING, &adapter->state);
1630}
1631
1632void igb_reset(struct igb_adapter *adapter)
1633{
Alexander Duyck090b1792009-10-27 23:51:55 +00001634 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001635 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001636 struct e1000_mac_info *mac = &hw->mac;
1637 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001638 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1639 u16 hwm;
1640
1641 /* Repartition Pba for greater than 9k mtu
1642 * To take effect CTRL.RST is required.
1643 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001644 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001645 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001646 case e1000_82580:
1647 pba = rd32(E1000_RXPBS);
1648 pba = igb_rxpbs_adjust_82580(pba);
1649 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001650 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001651 pba = rd32(E1000_RXPBS);
1652 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001653 break;
1654 case e1000_82575:
1655 default:
1656 pba = E1000_PBA_34K;
1657 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001658 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001659
Alexander Duyck2d064c02008-07-08 15:10:12 -07001660 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1661 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001662 /* adjust PBA for jumbo frames */
1663 wr32(E1000_PBA, pba);
1664
1665 /* To maintain wire speed transmits, the Tx FIFO should be
1666 * large enough to accommodate two full transmit packets,
1667 * rounded up to the next 1KB and expressed in KB. Likewise,
1668 * the Rx FIFO should be large enough to accommodate at least
1669 * one full receive packet and is similarly rounded up and
1670 * expressed in KB. */
1671 pba = rd32(E1000_PBA);
1672 /* upper 16 bits has Tx packet buffer allocation size in KB */
1673 tx_space = pba >> 16;
1674 /* lower 16 bits has Rx packet buffer allocation size in KB */
1675 pba &= 0xffff;
1676 /* the tx fifo also stores 16 bytes of information about the tx
1677 * but don't include ethernet FCS because hardware appends it */
1678 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001679 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 ETH_FCS_LEN) * 2;
1681 min_tx_space = ALIGN(min_tx_space, 1024);
1682 min_tx_space >>= 10;
1683 /* software strips receive CRC, so leave room for it */
1684 min_rx_space = adapter->max_frame_size;
1685 min_rx_space = ALIGN(min_rx_space, 1024);
1686 min_rx_space >>= 10;
1687
1688 /* If current Tx allocation is less than the min Tx FIFO size,
1689 * and the min Tx FIFO size is less than the current Rx FIFO
1690 * allocation, take space away from current Rx allocation */
1691 if (tx_space < min_tx_space &&
1692 ((min_tx_space - tx_space) < pba)) {
1693 pba = pba - (min_tx_space - tx_space);
1694
1695 /* if short on rx space, rx wins and must trump tx
1696 * adjustment */
1697 if (pba < min_rx_space)
1698 pba = min_rx_space;
1699 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001701 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001702
1703 /* flow control settings */
1704 /* The high water mark must be low enough to fit one full frame
1705 * (or the size used for early receive) above it in the Rx FIFO.
1706 * Set it to the lower of:
1707 * - 90% of the Rx FIFO size, or
1708 * - the full Rx FIFO size minus one full frame */
1709 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001710 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001711
Alexander Duyckd405ea32009-12-23 13:21:27 +00001712 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1713 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001714 fc->pause_time = 0xFFFF;
1715 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001716 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001717
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001718 /* disable receive for all VFs and wait one second */
1719 if (adapter->vfs_allocated_count) {
1720 int i;
1721 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001722 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001723
1724 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001725 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001726
1727 /* disable transmits and receives */
1728 wr32(E1000_VFRE, 0);
1729 wr32(E1000_VFTE, 0);
1730 }
1731
Auke Kok9d5c8242008-01-24 02:22:38 -08001732 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001733 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001734 wr32(E1000_WUC, 0);
1735
Alexander Duyck330a6d62009-10-27 23:51:35 +00001736 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001737 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001738 if (hw->mac.type > e1000_82580) {
1739 if (adapter->flags & IGB_FLAG_DMAC) {
1740 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001741
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001742 /*
1743 * DMA Coalescing high water mark needs to be higher
1744 * than * the * Rx threshold. The Rx threshold is
1745 * currently * pba - 6, so we * should use a high water
1746 * mark of pba * - 4. */
1747 hwm = (pba - 4) << 10;
1748
1749 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1750 & E1000_DMACR_DMACTHR_MASK);
1751
1752 /* transition to L0x or L1 if available..*/
1753 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1754
1755 /* watchdog timer= +-1000 usec in 32usec intervals */
1756 reg |= (1000 >> 5);
1757 wr32(E1000_DMACR, reg);
1758
1759 /* no lower threshold to disable coalescing(smart fifb)
1760 * -UTRESH=0*/
1761 wr32(E1000_DMCRTRH, 0);
1762
1763 /* set hwm to PBA - 2 * max frame size */
1764 wr32(E1000_FCRTC, hwm);
1765
1766 /*
1767 * This sets the time to wait before requesting tran-
1768 * sition to * low power state to number of usecs needed
1769 * to receive 1 512 * byte frame at gigabit line rate
1770 */
1771 reg = rd32(E1000_DMCTLX);
1772 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1773
1774 /* Delay 255 usec before entering Lx state. */
1775 reg |= 0xFF;
1776 wr32(E1000_DMCTLX, reg);
1777
1778 /* free space in Tx packet buffer to wake from DMAC */
1779 wr32(E1000_DMCTXTH,
1780 (IGB_MIN_TXPBSIZE -
1781 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1782 >> 6);
1783
1784 /* make low power state decision controlled by DMAC */
1785 reg = rd32(E1000_PCIEMISC);
1786 reg |= E1000_PCIEMISC_LX_DECISION;
1787 wr32(E1000_PCIEMISC, reg);
1788 } /* end if IGB_FLAG_DMAC set */
1789 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001790 if (hw->mac.type == e1000_82580) {
1791 u32 reg = rd32(E1000_PCIEMISC);
1792 wr32(E1000_PCIEMISC,
1793 reg & ~E1000_PCIEMISC_LX_DECISION);
1794 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001795 if (!netif_running(adapter->netdev))
1796 igb_power_down_link(adapter);
1797
Auke Kok9d5c8242008-01-24 02:22:38 -08001798 igb_update_mng_vlan(adapter);
1799
1800 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1801 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1802
Alexander Duyck330a6d62009-10-27 23:51:35 +00001803 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001804}
1805
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001806static u32 igb_fix_features(struct net_device *netdev, u32 features)
1807{
1808 /*
1809 * Since there is no support for separate rx/tx vlan accel
1810 * enable/disable make sure tx flag is always in same state as rx.
1811 */
1812 if (features & NETIF_F_HW_VLAN_RX)
1813 features |= NETIF_F_HW_VLAN_TX;
1814 else
1815 features &= ~NETIF_F_HW_VLAN_TX;
1816
1817 return features;
1818}
1819
Michał Mirosławac52caa2011-06-08 08:38:01 +00001820static int igb_set_features(struct net_device *netdev, u32 features)
1821{
1822 struct igb_adapter *adapter = netdev_priv(netdev);
1823 int i;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001824 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001825
1826 for (i = 0; i < adapter->num_rx_queues; i++) {
1827 if (features & NETIF_F_RXCSUM)
Alexander Duyck866cff02011-08-26 07:45:36 +00001828 set_bit(IGB_RING_FLAG_RX_CSUM,
1829 &adapter->rx_ring[i]->flags);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001830 else
Alexander Duyck866cff02011-08-26 07:45:36 +00001831 clear_bit(IGB_RING_FLAG_RX_CSUM,
1832 &adapter->rx_ring[i]->flags);
Michał Mirosławac52caa2011-06-08 08:38:01 +00001833 }
1834
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001835 if (changed & NETIF_F_HW_VLAN_RX)
1836 igb_vlan_mode(netdev, features);
1837
Michał Mirosławac52caa2011-06-08 08:38:01 +00001838 return 0;
1839}
1840
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001841static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001842 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001843 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001844 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001845 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001846 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001847 .ndo_set_mac_address = igb_set_mac,
1848 .ndo_change_mtu = igb_change_mtu,
1849 .ndo_do_ioctl = igb_ioctl,
1850 .ndo_tx_timeout = igb_tx_timeout,
1851 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001852 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1853 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001854 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1855 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1856 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1857 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001858#ifdef CONFIG_NET_POLL_CONTROLLER
1859 .ndo_poll_controller = igb_netpoll,
1860#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001861 .ndo_fix_features = igb_fix_features,
1862 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001863};
1864
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001865/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001866 * igb_probe - Device Initialization Routine
1867 * @pdev: PCI device information struct
1868 * @ent: entry in igb_pci_tbl
1869 *
1870 * Returns 0 on success, negative on failure
1871 *
1872 * igb_probe initializes an adapter identified by a pci_dev structure.
1873 * The OS initialization, configuring of the adapter private structure,
1874 * and a hardware reset occur.
1875 **/
1876static int __devinit igb_probe(struct pci_dev *pdev,
1877 const struct pci_device_id *ent)
1878{
1879 struct net_device *netdev;
1880 struct igb_adapter *adapter;
1881 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001882 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001883 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001884 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1886 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001887 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001888 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001889 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001890
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001891 /* Catch broken hardware that put the wrong VF device ID in
1892 * the PCIe SR-IOV capability.
1893 */
1894 if (pdev->is_virtfn) {
1895 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1896 pci_name(pdev), pdev->vendor, pdev->device);
1897 return -EINVAL;
1898 }
1899
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001900 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001901 if (err)
1902 return err;
1903
1904 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001905 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001906 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001907 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001908 if (!err)
1909 pci_using_dac = 1;
1910 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001911 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001912 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001913 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001914 if (err) {
1915 dev_err(&pdev->dev, "No usable DMA "
1916 "configuration, aborting\n");
1917 goto err_dma;
1918 }
1919 }
1920 }
1921
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001922 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1923 IORESOURCE_MEM),
1924 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001925 if (err)
1926 goto err_pci_reg;
1927
Frans Pop19d5afd2009-10-02 10:04:12 -07001928 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001929
Auke Kok9d5c8242008-01-24 02:22:38 -08001930 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001931 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001932
1933 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001934 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001935 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001936 if (!netdev)
1937 goto err_alloc_etherdev;
1938
1939 SET_NETDEV_DEV(netdev, &pdev->dev);
1940
1941 pci_set_drvdata(pdev, netdev);
1942 adapter = netdev_priv(netdev);
1943 adapter->netdev = netdev;
1944 adapter->pdev = pdev;
1945 hw = &adapter->hw;
1946 hw->back = adapter;
1947 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1948
1949 mmio_start = pci_resource_start(pdev, 0);
1950 mmio_len = pci_resource_len(pdev, 0);
1951
1952 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001953 hw->hw_addr = ioremap(mmio_start, mmio_len);
1954 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001955 goto err_ioremap;
1956
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001957 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001958 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001959 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001960
1961 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1962
1963 netdev->mem_start = mmio_start;
1964 netdev->mem_end = mmio_start + mmio_len;
1965
Auke Kok9d5c8242008-01-24 02:22:38 -08001966 /* PCI config space info */
1967 hw->vendor_id = pdev->vendor;
1968 hw->device_id = pdev->device;
1969 hw->revision_id = pdev->revision;
1970 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1971 hw->subsystem_device_id = pdev->subsystem_device;
1972
Auke Kok9d5c8242008-01-24 02:22:38 -08001973 /* Copy the default MAC, PHY and NVM function pointers */
1974 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1975 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1976 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1977 /* Initialize skew-specific constants */
1978 err = ei->get_invariants(hw);
1979 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001980 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001981
Alexander Duyck450c87c2009-02-06 23:22:11 +00001982 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001983 err = igb_sw_init(adapter);
1984 if (err)
1985 goto err_sw_init;
1986
1987 igb_get_bus_info_pcie(hw);
1988
1989 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001990
1991 /* Copper options */
1992 if (hw->phy.media_type == e1000_media_type_copper) {
1993 hw->phy.mdix = AUTO_ALL_MODES;
1994 hw->phy.disable_polarity_correction = false;
1995 hw->phy.ms_type = e1000_ms_hw_default;
1996 }
1997
1998 if (igb_check_reset_block(hw))
1999 dev_info(&pdev->dev,
2000 "PHY reset is blocked due to SOL/IDER session.\n");
2001
Michał Mirosławac52caa2011-06-08 08:38:01 +00002002 netdev->hw_features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00002003 NETIF_F_IP_CSUM |
Michał Mirosławac52caa2011-06-08 08:38:01 +00002004 NETIF_F_IPV6_CSUM |
2005 NETIF_F_TSO |
2006 NETIF_F_TSO6 |
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002007 NETIF_F_RXCSUM |
2008 NETIF_F_HW_VLAN_RX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002009
2010 netdev->features = netdev->hw_features |
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 NETIF_F_HW_VLAN_TX |
Auke Kok9d5c8242008-01-24 02:22:38 -08002012 NETIF_F_HW_VLAN_FILTER;
2013
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002014 netdev->vlan_features |= NETIF_F_TSO;
2015 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00002016 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00002017 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002018 netdev->vlan_features |= NETIF_F_SG;
2019
Yi Zou7b872a52010-09-22 17:57:58 +00002020 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002021 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002022 netdev->vlan_features |= NETIF_F_HIGHDMA;
2023 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002024
Michał Mirosławac52caa2011-06-08 08:38:01 +00002025 if (hw->mac.type >= e1000_82576) {
2026 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002027 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002028 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002029
Jiri Pirko01789342011-08-16 06:29:00 +00002030 netdev->priv_flags |= IFF_UNICAST_FLT;
2031
Alexander Duyck330a6d62009-10-27 23:51:35 +00002032 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002033
2034 /* before reading the NVM, reset the controller to put the device in a
2035 * known good starting state */
2036 hw->mac.ops.reset_hw(hw);
2037
2038 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08002039 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002040 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2041 err = -EIO;
2042 goto err_eeprom;
2043 }
2044
2045 /* copy the MAC address out of the NVM */
2046 if (hw->mac.ops.read_mac_addr(hw))
2047 dev_err(&pdev->dev, "NVM Read Error\n");
2048
2049 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2050 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2051
2052 if (!is_valid_ether_addr(netdev->perm_addr)) {
2053 dev_err(&pdev->dev, "Invalid MAC Address\n");
2054 err = -EIO;
2055 goto err_eeprom;
2056 }
2057
Joe Perchesc061b182010-08-23 18:20:03 +00002058 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002059 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002060 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002061 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002062
2063 INIT_WORK(&adapter->reset_task, igb_reset_task);
2064 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2065
Alexander Duyck450c87c2009-02-06 23:22:11 +00002066 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002067 adapter->fc_autoneg = true;
2068 hw->mac.autoneg = true;
2069 hw->phy.autoneg_advertised = 0x2f;
2070
Alexander Duyck0cce1192009-07-23 18:10:24 +00002071 hw->fc.requested_mode = e1000_fc_default;
2072 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002073
Auke Kok9d5c8242008-01-24 02:22:38 -08002074 igb_validate_mdi_setting(hw);
2075
Auke Kok9d5c8242008-01-24 02:22:38 -08002076 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2077 * enable the ACPI Magic Packet filter
2078 */
2079
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002080 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002081 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002082 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002083 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2084 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2085 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002086 else if (hw->bus.func == 1)
2087 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002088
2089 if (eeprom_data & eeprom_apme_mask)
2090 adapter->eeprom_wol |= E1000_WUFC_MAG;
2091
2092 /* now that we have the eeprom settings, apply the special cases where
2093 * the eeprom may be wrong or the board simply won't support wake on
2094 * lan on a particular port */
2095 switch (pdev->device) {
2096 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2097 adapter->eeprom_wol = 0;
2098 break;
2099 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002100 case E1000_DEV_ID_82576_FIBER:
2101 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002102 /* Wake events only supported on port A for dual fiber
2103 * regardless of eeprom setting */
2104 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2105 adapter->eeprom_wol = 0;
2106 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002107 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002108 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002109 /* if quad port adapter, disable WoL on all but port A */
2110 if (global_quad_port_a != 0)
2111 adapter->eeprom_wol = 0;
2112 else
2113 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2114 /* Reset for multiple quad port adapters */
2115 if (++global_quad_port_a == 4)
2116 global_quad_port_a = 0;
2117 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002118 }
2119
2120 /* initialize the wol settings based on the eeprom settings */
2121 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002122 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002123
2124 /* reset the hardware with the new settings */
2125 igb_reset(adapter);
2126
2127 /* let the f/w know that the h/w is now under the control of the
2128 * driver. */
2129 igb_get_hw_control(adapter);
2130
Auke Kok9d5c8242008-01-24 02:22:38 -08002131 strcpy(netdev->name, "eth%d");
2132 err = register_netdev(netdev);
2133 if (err)
2134 goto err_register;
2135
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002136 igb_vlan_mode(netdev, netdev->features);
2137
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002138 /* carrier off reporting is important to ethtool even BEFORE open */
2139 netif_carrier_off(netdev);
2140
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002141#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002142 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002143 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002144 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002145 igb_setup_dca(adapter);
2146 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002147
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002148#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002149 /* do hw tstamp init after resetting */
2150 igb_init_hw_timer(adapter);
2151
Auke Kok9d5c8242008-01-24 02:22:38 -08002152 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2153 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002154 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002155 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002156 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002157 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002158 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002159 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2160 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2161 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2162 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002163 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002164
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002165 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2166 if (ret_val)
2167 strcpy(part_str, "Unknown");
2168 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002169 dev_info(&pdev->dev,
2170 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2171 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002172 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002173 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002174 switch (hw->mac.type) {
2175 case e1000_i350:
2176 igb_set_eee_i350(hw);
2177 break;
2178 default:
2179 break;
2180 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002181 return 0;
2182
2183err_register:
2184 igb_release_hw_control(adapter);
2185err_eeprom:
2186 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002187 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002188
2189 if (hw->flash_address)
2190 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002191err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002192 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002193 iounmap(hw->hw_addr);
2194err_ioremap:
2195 free_netdev(netdev);
2196err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002197 pci_release_selected_regions(pdev,
2198 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002199err_pci_reg:
2200err_dma:
2201 pci_disable_device(pdev);
2202 return err;
2203}
2204
2205/**
2206 * igb_remove - Device Removal Routine
2207 * @pdev: PCI device information struct
2208 *
2209 * igb_remove is called by the PCI subsystem to alert the driver
2210 * that it should release a PCI device. The could be caused by a
2211 * Hot-Plug event, or because the driver is going to be removed from
2212 * memory.
2213 **/
2214static void __devexit igb_remove(struct pci_dev *pdev)
2215{
2216 struct net_device *netdev = pci_get_drvdata(pdev);
2217 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002218 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002219
Tejun Heo760141a2010-12-12 16:45:14 +01002220 /*
2221 * The watchdog timer may be rescheduled, so explicitly
2222 * disable watchdog from being rescheduled.
2223 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002224 set_bit(__IGB_DOWN, &adapter->state);
2225 del_timer_sync(&adapter->watchdog_timer);
2226 del_timer_sync(&adapter->phy_info_timer);
2227
Tejun Heo760141a2010-12-12 16:45:14 +01002228 cancel_work_sync(&adapter->reset_task);
2229 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002230
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002231#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002232 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002233 dev_info(&pdev->dev, "DCA disabled\n");
2234 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002235 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002236 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002237 }
2238#endif
2239
Auke Kok9d5c8242008-01-24 02:22:38 -08002240 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2241 * would have already happened in close and is redundant. */
2242 igb_release_hw_control(adapter);
2243
2244 unregister_netdev(netdev);
2245
Alexander Duyck047e0032009-10-27 15:49:27 +00002246 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002247
Alexander Duyck37680112009-02-19 20:40:30 -08002248#ifdef CONFIG_PCI_IOV
2249 /* reclaim resources allocated to VFs */
2250 if (adapter->vf_data) {
2251 /* disable iov and allow time for transactions to clear */
2252 pci_disable_sriov(pdev);
2253 msleep(500);
2254
2255 kfree(adapter->vf_data);
2256 adapter->vf_data = NULL;
2257 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002258 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002259 msleep(100);
2260 dev_info(&pdev->dev, "IOV Disabled\n");
2261 }
2262#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002263
Alexander Duyck28b07592009-02-06 23:20:31 +00002264 iounmap(hw->hw_addr);
2265 if (hw->flash_address)
2266 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002267 pci_release_selected_regions(pdev,
2268 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002269
2270 free_netdev(netdev);
2271
Frans Pop19d5afd2009-10-02 10:04:12 -07002272 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002273
Auke Kok9d5c8242008-01-24 02:22:38 -08002274 pci_disable_device(pdev);
2275}
2276
2277/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002278 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2279 * @adapter: board private structure to initialize
2280 *
2281 * This function initializes the vf specific data storage and then attempts to
2282 * allocate the VFs. The reason for ordering it this way is because it is much
2283 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2284 * the memory for the VFs.
2285 **/
2286static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2287{
2288#ifdef CONFIG_PCI_IOV
2289 struct pci_dev *pdev = adapter->pdev;
2290
Alexander Duycka6b623e2009-10-27 23:47:53 +00002291 if (adapter->vfs_allocated_count) {
2292 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2293 sizeof(struct vf_data_storage),
2294 GFP_KERNEL);
2295 /* if allocation failed then we do not support SR-IOV */
2296 if (!adapter->vf_data) {
2297 adapter->vfs_allocated_count = 0;
2298 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2299 "Data Storage\n");
2300 }
2301 }
2302
2303 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2304 kfree(adapter->vf_data);
2305 adapter->vf_data = NULL;
2306#endif /* CONFIG_PCI_IOV */
2307 adapter->vfs_allocated_count = 0;
2308#ifdef CONFIG_PCI_IOV
2309 } else {
2310 unsigned char mac_addr[ETH_ALEN];
2311 int i;
2312 dev_info(&pdev->dev, "%d vfs allocated\n",
2313 adapter->vfs_allocated_count);
2314 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2315 random_ether_addr(mac_addr);
2316 igb_set_vf_mac(adapter, i, mac_addr);
2317 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002318 /* DMA Coalescing is not supported in IOV mode. */
2319 if (adapter->flags & IGB_FLAG_DMAC)
2320 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002321 }
2322#endif /* CONFIG_PCI_IOV */
2323}
2324
Alexander Duyck115f4592009-11-12 18:37:00 +00002325
2326/**
2327 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2328 * @adapter: board private structure to initialize
2329 *
2330 * igb_init_hw_timer initializes the function pointer and values for the hw
2331 * timer found in hardware.
2332 **/
2333static void igb_init_hw_timer(struct igb_adapter *adapter)
2334{
2335 struct e1000_hw *hw = &adapter->hw;
2336
2337 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002338 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002339 case e1000_82580:
2340 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2341 adapter->cycles.read = igb_read_clock;
2342 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2343 adapter->cycles.mult = 1;
2344 /*
2345 * The 82580 timesync updates the system timer every 8ns by 8ns
2346 * and the value cannot be shifted. Instead we need to shift
2347 * the registers to generate a 64bit timer value. As a result
2348 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2349 * 24 in order to generate a larger value for synchronization.
2350 */
2351 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2352 /* disable system timer temporarily by setting bit 31 */
2353 wr32(E1000_TSAUXC, 0x80000000);
2354 wrfl();
2355
2356 /* Set registers so that rollover occurs soon to test this. */
2357 wr32(E1000_SYSTIMR, 0x00000000);
2358 wr32(E1000_SYSTIML, 0x80000000);
2359 wr32(E1000_SYSTIMH, 0x000000FF);
2360 wrfl();
2361
2362 /* enable system timer by clearing bit 31 */
2363 wr32(E1000_TSAUXC, 0x0);
2364 wrfl();
2365
2366 timecounter_init(&adapter->clock,
2367 &adapter->cycles,
2368 ktime_to_ns(ktime_get_real()));
2369 /*
2370 * Synchronize our NIC clock against system wall clock. NIC
2371 * time stamp reading requires ~3us per sample, each sample
2372 * was pretty stable even under load => only require 10
2373 * samples for each offset comparison.
2374 */
2375 memset(&adapter->compare, 0, sizeof(adapter->compare));
2376 adapter->compare.source = &adapter->clock;
2377 adapter->compare.target = ktime_get_real;
2378 adapter->compare.num_samples = 10;
2379 timecompare_update(&adapter->compare, 0);
2380 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002381 case e1000_82576:
2382 /*
2383 * Initialize hardware timer: we keep it running just in case
2384 * that some program needs it later on.
2385 */
2386 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2387 adapter->cycles.read = igb_read_clock;
2388 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2389 adapter->cycles.mult = 1;
2390 /**
2391 * Scale the NIC clock cycle by a large factor so that
2392 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002393 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002394 * factor are a) that the clock register overflows more quickly
2395 * (not such a big deal) and b) that the increment per tick has
2396 * to fit into 24 bits. As a result we need to use a shift of
2397 * 19 so we can fit a value of 16 into the TIMINCA register.
2398 */
2399 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2400 wr32(E1000_TIMINCA,
2401 (1 << E1000_TIMINCA_16NS_SHIFT) |
2402 (16 << IGB_82576_TSYNC_SHIFT));
2403
2404 /* Set registers so that rollover occurs soon to test this. */
2405 wr32(E1000_SYSTIML, 0x00000000);
2406 wr32(E1000_SYSTIMH, 0xFF800000);
2407 wrfl();
2408
2409 timecounter_init(&adapter->clock,
2410 &adapter->cycles,
2411 ktime_to_ns(ktime_get_real()));
2412 /*
2413 * Synchronize our NIC clock against system wall clock. NIC
2414 * time stamp reading requires ~3us per sample, each sample
2415 * was pretty stable even under load => only require 10
2416 * samples for each offset comparison.
2417 */
2418 memset(&adapter->compare, 0, sizeof(adapter->compare));
2419 adapter->compare.source = &adapter->clock;
2420 adapter->compare.target = ktime_get_real;
2421 adapter->compare.num_samples = 10;
2422 timecompare_update(&adapter->compare, 0);
2423 break;
2424 case e1000_82575:
2425 /* 82575 does not support timesync */
2426 default:
2427 break;
2428 }
2429
2430}
2431
Alexander Duycka6b623e2009-10-27 23:47:53 +00002432/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002433 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2434 * @adapter: board private structure to initialize
2435 *
2436 * igb_sw_init initializes the Adapter private data structure.
2437 * Fields are initialized based on PCI device information and
2438 * OS network device settings (MTU size).
2439 **/
2440static int __devinit igb_sw_init(struct igb_adapter *adapter)
2441{
2442 struct e1000_hw *hw = &adapter->hw;
2443 struct net_device *netdev = adapter->netdev;
2444 struct pci_dev *pdev = adapter->pdev;
2445
2446 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2447
Alexander Duyck13fde972011-10-05 13:35:24 +00002448 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002449 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2450 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002451
2452 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002453 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2454 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2455
Alexander Duyck13fde972011-10-05 13:35:24 +00002456 /* set default work limits */
2457 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2458
Alexander Duyck153285f2011-08-26 07:43:32 +00002459 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2460 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002461 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2462
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002463 adapter->node = -1;
2464
Eric Dumazet12dcd862010-10-15 17:27:10 +00002465 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002466#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002467 switch (hw->mac.type) {
2468 case e1000_82576:
2469 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002470 if (max_vfs > 7) {
2471 dev_warn(&pdev->dev,
2472 "Maximum of 7 VFs per PF, using max\n");
2473 adapter->vfs_allocated_count = 7;
2474 } else
2475 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002476 break;
2477 default:
2478 break;
2479 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002480#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002481 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002482 /* i350 cannot do RSS and SR-IOV at the same time */
2483 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2484 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002485
2486 /*
2487 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2488 * then we should combine the queues into a queue pair in order to
2489 * conserve interrupts due to limited supply
2490 */
2491 if ((adapter->rss_queues > 4) ||
2492 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2493 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2494
Alexander Duycka6b623e2009-10-27 23:47:53 +00002495 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002496 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002497 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2498 return -ENOMEM;
2499 }
2500
Alexander Duycka6b623e2009-10-27 23:47:53 +00002501 igb_probe_vfs(adapter);
2502
Auke Kok9d5c8242008-01-24 02:22:38 -08002503 /* Explicitly disable IRQ since the NIC can be in any state. */
2504 igb_irq_disable(adapter);
2505
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002506 if (hw->mac.type == e1000_i350)
2507 adapter->flags &= ~IGB_FLAG_DMAC;
2508
Auke Kok9d5c8242008-01-24 02:22:38 -08002509 set_bit(__IGB_DOWN, &adapter->state);
2510 return 0;
2511}
2512
2513/**
2514 * igb_open - Called when a network interface is made active
2515 * @netdev: network interface device structure
2516 *
2517 * Returns 0 on success, negative value on failure
2518 *
2519 * The open entry point is called when a network interface is made
2520 * active by the system (IFF_UP). At this point all resources needed
2521 * for transmit and receive operations are allocated, the interrupt
2522 * handler is registered with the OS, the watchdog timer is started,
2523 * and the stack is notified that the interface is ready.
2524 **/
2525static int igb_open(struct net_device *netdev)
2526{
2527 struct igb_adapter *adapter = netdev_priv(netdev);
2528 struct e1000_hw *hw = &adapter->hw;
2529 int err;
2530 int i;
2531
2532 /* disallow open during test */
2533 if (test_bit(__IGB_TESTING, &adapter->state))
2534 return -EBUSY;
2535
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002536 netif_carrier_off(netdev);
2537
Auke Kok9d5c8242008-01-24 02:22:38 -08002538 /* allocate transmit descriptors */
2539 err = igb_setup_all_tx_resources(adapter);
2540 if (err)
2541 goto err_setup_tx;
2542
2543 /* allocate receive descriptors */
2544 err = igb_setup_all_rx_resources(adapter);
2545 if (err)
2546 goto err_setup_rx;
2547
Nick Nunley88a268c2010-02-17 01:01:59 +00002548 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002549
Auke Kok9d5c8242008-01-24 02:22:38 -08002550 /* before we allocate an interrupt, we must be ready to handle it.
2551 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2552 * as soon as we call pci_request_irq, so we have to setup our
2553 * clean_rx handler before we do so. */
2554 igb_configure(adapter);
2555
2556 err = igb_request_irq(adapter);
2557 if (err)
2558 goto err_req_irq;
2559
2560 /* From here on the code is the same as igb_up() */
2561 clear_bit(__IGB_DOWN, &adapter->state);
2562
Alexander Duyck047e0032009-10-27 15:49:27 +00002563 for (i = 0; i < adapter->num_q_vectors; i++) {
2564 struct igb_q_vector *q_vector = adapter->q_vector[i];
2565 napi_enable(&q_vector->napi);
2566 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002567
2568 /* Clear any pending interrupts. */
2569 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002570
2571 igb_irq_enable(adapter);
2572
Alexander Duyckd4960302009-10-27 15:53:45 +00002573 /* notify VFs that reset has been completed */
2574 if (adapter->vfs_allocated_count) {
2575 u32 reg_data = rd32(E1000_CTRL_EXT);
2576 reg_data |= E1000_CTRL_EXT_PFRSTD;
2577 wr32(E1000_CTRL_EXT, reg_data);
2578 }
2579
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002580 netif_tx_start_all_queues(netdev);
2581
Alexander Duyck25568a52009-10-27 23:49:59 +00002582 /* start the watchdog. */
2583 hw->mac.get_link_status = 1;
2584 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002585
2586 return 0;
2587
2588err_req_irq:
2589 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002590 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002591 igb_free_all_rx_resources(adapter);
2592err_setup_rx:
2593 igb_free_all_tx_resources(adapter);
2594err_setup_tx:
2595 igb_reset(adapter);
2596
2597 return err;
2598}
2599
2600/**
2601 * igb_close - Disables a network interface
2602 * @netdev: network interface device structure
2603 *
2604 * Returns 0, this is not allowed to fail
2605 *
2606 * The close entry point is called when an interface is de-activated
2607 * by the OS. The hardware is still under the driver's control, but
2608 * needs to be disabled. A global MAC reset is issued to stop the
2609 * hardware, and all transmit and receive resources are freed.
2610 **/
2611static int igb_close(struct net_device *netdev)
2612{
2613 struct igb_adapter *adapter = netdev_priv(netdev);
2614
2615 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2616 igb_down(adapter);
2617
2618 igb_free_irq(adapter);
2619
2620 igb_free_all_tx_resources(adapter);
2621 igb_free_all_rx_resources(adapter);
2622
Auke Kok9d5c8242008-01-24 02:22:38 -08002623 return 0;
2624}
2625
2626/**
2627 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002628 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2629 *
2630 * Return 0 on success, negative on failure
2631 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002632int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002633{
Alexander Duyck59d71982010-04-27 13:09:25 +00002634 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002635 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002636 int size;
2637
Alexander Duyck06034642011-08-26 07:44:22 +00002638 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002639 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2640 if (!tx_ring->tx_buffer_info)
2641 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002642 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002643 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002644
2645 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002646 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002647 tx_ring->size = ALIGN(tx_ring->size, 4096);
2648
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002649 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002650 tx_ring->desc = dma_alloc_coherent(dev,
2651 tx_ring->size,
2652 &tx_ring->dma,
2653 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002654 set_dev_node(dev, orig_node);
2655 if (!tx_ring->desc)
2656 tx_ring->desc = dma_alloc_coherent(dev,
2657 tx_ring->size,
2658 &tx_ring->dma,
2659 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002660
2661 if (!tx_ring->desc)
2662 goto err;
2663
Auke Kok9d5c8242008-01-24 02:22:38 -08002664 tx_ring->next_to_use = 0;
2665 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002666
Auke Kok9d5c8242008-01-24 02:22:38 -08002667 return 0;
2668
2669err:
Alexander Duyck06034642011-08-26 07:44:22 +00002670 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002671 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 "Unable to allocate memory for the transmit descriptor ring\n");
2673 return -ENOMEM;
2674}
2675
2676/**
2677 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2678 * (Descriptors) for all queues
2679 * @adapter: board private structure
2680 *
2681 * Return 0 on success, negative on failure
2682 **/
2683static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2684{
Alexander Duyck439705e2009-10-27 23:49:20 +00002685 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002686 int i, err = 0;
2687
2688 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002689 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002690 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002691 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002692 "Allocation for Tx Queue %u failed\n", i);
2693 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002694 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002695 break;
2696 }
2697 }
2698
2699 return err;
2700}
2701
2702/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002703 * igb_setup_tctl - configure the transmit control registers
2704 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002705 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002706void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002707{
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 struct e1000_hw *hw = &adapter->hw;
2709 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002710
Alexander Duyck85b430b2009-10-27 15:50:29 +00002711 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2712 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002713
2714 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002715 tctl = rd32(E1000_TCTL);
2716 tctl &= ~E1000_TCTL_CT;
2717 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2718 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2719
2720 igb_config_collision_dist(hw);
2721
Auke Kok9d5c8242008-01-24 02:22:38 -08002722 /* Enable transmits */
2723 tctl |= E1000_TCTL_EN;
2724
2725 wr32(E1000_TCTL, tctl);
2726}
2727
2728/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002729 * igb_configure_tx_ring - Configure transmit ring after Reset
2730 * @adapter: board private structure
2731 * @ring: tx ring to configure
2732 *
2733 * Configure a transmit ring after a reset.
2734 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002735void igb_configure_tx_ring(struct igb_adapter *adapter,
2736 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002737{
2738 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002739 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002740 u64 tdba = ring->dma;
2741 int reg_idx = ring->reg_idx;
2742
2743 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002744 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002745 wrfl();
2746 mdelay(10);
2747
2748 wr32(E1000_TDLEN(reg_idx),
2749 ring->count * sizeof(union e1000_adv_tx_desc));
2750 wr32(E1000_TDBAL(reg_idx),
2751 tdba & 0x00000000ffffffffULL);
2752 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2753
Alexander Duyckfce99e32009-10-27 15:51:27 +00002754 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002755 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002756 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002757
2758 txdctl |= IGB_TX_PTHRESH;
2759 txdctl |= IGB_TX_HTHRESH << 8;
2760 txdctl |= IGB_TX_WTHRESH << 16;
2761
2762 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2763 wr32(E1000_TXDCTL(reg_idx), txdctl);
2764}
2765
2766/**
2767 * igb_configure_tx - Configure transmit Unit after Reset
2768 * @adapter: board private structure
2769 *
2770 * Configure the Tx unit of the MAC after a reset.
2771 **/
2772static void igb_configure_tx(struct igb_adapter *adapter)
2773{
2774 int i;
2775
2776 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002777 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002778}
2779
2780/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002781 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002782 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2783 *
2784 * Returns 0 on success, negative on failure
2785 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002786int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002787{
Alexander Duyck59d71982010-04-27 13:09:25 +00002788 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002789 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002790 int size, desc_len;
2791
Alexander Duyck06034642011-08-26 07:44:22 +00002792 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002793 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2794 if (!rx_ring->rx_buffer_info)
2795 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002796 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002797 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002798
2799 desc_len = sizeof(union e1000_adv_rx_desc);
2800
2801 /* Round up to nearest 4K */
2802 rx_ring->size = rx_ring->count * desc_len;
2803 rx_ring->size = ALIGN(rx_ring->size, 4096);
2804
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002805 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002806 rx_ring->desc = dma_alloc_coherent(dev,
2807 rx_ring->size,
2808 &rx_ring->dma,
2809 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002810 set_dev_node(dev, orig_node);
2811 if (!rx_ring->desc)
2812 rx_ring->desc = dma_alloc_coherent(dev,
2813 rx_ring->size,
2814 &rx_ring->dma,
2815 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002816
2817 if (!rx_ring->desc)
2818 goto err;
2819
2820 rx_ring->next_to_clean = 0;
2821 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002822
Auke Kok9d5c8242008-01-24 02:22:38 -08002823 return 0;
2824
2825err:
Alexander Duyck06034642011-08-26 07:44:22 +00002826 vfree(rx_ring->rx_buffer_info);
2827 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002828 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2829 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002830 return -ENOMEM;
2831}
2832
2833/**
2834 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2835 * (Descriptors) for all queues
2836 * @adapter: board private structure
2837 *
2838 * Return 0 on success, negative on failure
2839 **/
2840static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2841{
Alexander Duyck439705e2009-10-27 23:49:20 +00002842 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002843 int i, err = 0;
2844
2845 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002846 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002847 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002848 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002849 "Allocation for Rx Queue %u failed\n", i);
2850 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002851 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002852 break;
2853 }
2854 }
2855
2856 return err;
2857}
2858
2859/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002860 * igb_setup_mrqc - configure the multiple receive queue control registers
2861 * @adapter: Board private structure
2862 **/
2863static void igb_setup_mrqc(struct igb_adapter *adapter)
2864{
2865 struct e1000_hw *hw = &adapter->hw;
2866 u32 mrqc, rxcsum;
2867 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2868 union e1000_reta {
2869 u32 dword;
2870 u8 bytes[4];
2871 } reta;
2872 static const u8 rsshash[40] = {
2873 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2874 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2875 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2876 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2877
2878 /* Fill out hash function seeds */
2879 for (j = 0; j < 10; j++) {
2880 u32 rsskey = rsshash[(j * 4)];
2881 rsskey |= rsshash[(j * 4) + 1] << 8;
2882 rsskey |= rsshash[(j * 4) + 2] << 16;
2883 rsskey |= rsshash[(j * 4) + 3] << 24;
2884 array_wr32(E1000_RSSRK(0), j, rsskey);
2885 }
2886
Alexander Duycka99955f2009-11-12 18:37:19 +00002887 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002888
2889 if (adapter->vfs_allocated_count) {
2890 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2891 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002892 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002893 case e1000_82580:
2894 num_rx_queues = 1;
2895 shift = 0;
2896 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002897 case e1000_82576:
2898 shift = 3;
2899 num_rx_queues = 2;
2900 break;
2901 case e1000_82575:
2902 shift = 2;
2903 shift2 = 6;
2904 default:
2905 break;
2906 }
2907 } else {
2908 if (hw->mac.type == e1000_82575)
2909 shift = 6;
2910 }
2911
2912 for (j = 0; j < (32 * 4); j++) {
2913 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2914 if (shift2)
2915 reta.bytes[j & 3] |= num_rx_queues << shift2;
2916 if ((j & 3) == 3)
2917 wr32(E1000_RETA(j >> 2), reta.dword);
2918 }
2919
2920 /*
2921 * Disable raw packet checksumming so that RSS hash is placed in
2922 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2923 * offloads as they are enabled by default
2924 */
2925 rxcsum = rd32(E1000_RXCSUM);
2926 rxcsum |= E1000_RXCSUM_PCSD;
2927
2928 if (adapter->hw.mac.type >= e1000_82576)
2929 /* Enable Receive Checksum Offload for SCTP */
2930 rxcsum |= E1000_RXCSUM_CRCOFL;
2931
2932 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2933 wr32(E1000_RXCSUM, rxcsum);
2934
2935 /* If VMDq is enabled then we set the appropriate mode for that, else
2936 * we default to RSS so that an RSS hash is calculated per packet even
2937 * if we are only using one queue */
2938 if (adapter->vfs_allocated_count) {
2939 if (hw->mac.type > e1000_82575) {
2940 /* Set the default pool for the PF's first queue */
2941 u32 vtctl = rd32(E1000_VT_CTL);
2942 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2943 E1000_VT_CTL_DISABLE_DEF_POOL);
2944 vtctl |= adapter->vfs_allocated_count <<
2945 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2946 wr32(E1000_VT_CTL, vtctl);
2947 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002948 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002949 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2950 else
2951 mrqc = E1000_MRQC_ENABLE_VMDQ;
2952 } else {
2953 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2954 }
2955 igb_vmm_control(adapter);
2956
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002957 /*
2958 * Generate RSS hash based on TCP port numbers and/or
2959 * IPv4/v6 src and dst addresses since UDP cannot be
2960 * hashed reliably due to IP fragmentation
2961 */
2962 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2963 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2964 E1000_MRQC_RSS_FIELD_IPV6 |
2965 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2966 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002967
2968 wr32(E1000_MRQC, mrqc);
2969}
2970
2971/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002972 * igb_setup_rctl - configure the receive control registers
2973 * @adapter: Board private structure
2974 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002975void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002976{
2977 struct e1000_hw *hw = &adapter->hw;
2978 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002979
2980 rctl = rd32(E1000_RCTL);
2981
2982 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002983 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002984
Alexander Duyck69d728b2008-11-25 01:04:03 -08002985 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002986 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002987
Auke Kok87cb7e82008-07-08 15:08:29 -07002988 /*
2989 * enable stripping of CRC. It's unlikely this will break BMC
2990 * redirection as it did with e1000. Newer features require
2991 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002992 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002993 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002994
Alexander Duyck559e9c42009-10-27 23:52:50 +00002995 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002996 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002997
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002998 /* enable LPE to prevent packets larger than max_frame_size */
2999 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08003000
Alexander Duyck952f72a2009-10-27 15:51:07 +00003001 /* disable queue 0 to prevent tail write w/o re-config */
3002 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08003003
Alexander Duycke1739522009-02-19 20:39:44 -08003004 /* Attention!!! For SR-IOV PF driver operations you must enable
3005 * queue drop for all VF and PF queues to prevent head of line blocking
3006 * if an un-trusted VF does not provide descriptors to hardware.
3007 */
3008 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08003009 /* set all queue drop enable bits */
3010 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08003011 }
3012
Auke Kok9d5c8242008-01-24 02:22:38 -08003013 wr32(E1000_RCTL, rctl);
3014}
3015
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003016static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3017 int vfn)
3018{
3019 struct e1000_hw *hw = &adapter->hw;
3020 u32 vmolr;
3021
3022 /* if it isn't the PF check to see if VFs are enabled and
3023 * increase the size to support vlan tags */
3024 if (vfn < adapter->vfs_allocated_count &&
3025 adapter->vf_data[vfn].vlans_enabled)
3026 size += VLAN_TAG_SIZE;
3027
3028 vmolr = rd32(E1000_VMOLR(vfn));
3029 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3030 vmolr |= size | E1000_VMOLR_LPE;
3031 wr32(E1000_VMOLR(vfn), vmolr);
3032
3033 return 0;
3034}
3035
Auke Kok9d5c8242008-01-24 02:22:38 -08003036/**
Alexander Duycke1739522009-02-19 20:39:44 -08003037 * igb_rlpml_set - set maximum receive packet size
3038 * @adapter: board private structure
3039 *
3040 * Configure maximum receivable packet size.
3041 **/
3042static void igb_rlpml_set(struct igb_adapter *adapter)
3043{
Alexander Duyck153285f2011-08-26 07:43:32 +00003044 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003045 struct e1000_hw *hw = &adapter->hw;
3046 u16 pf_id = adapter->vfs_allocated_count;
3047
Alexander Duycke1739522009-02-19 20:39:44 -08003048 if (pf_id) {
3049 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003050 /*
3051 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3052 * to our max jumbo frame size, in case we need to enable
3053 * jumbo frames on one of the rings later.
3054 * This will not pass over-length frames into the default
3055 * queue because it's gated by the VMOLR.RLPML.
3056 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003057 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003058 }
3059
3060 wr32(E1000_RLPML, max_frame_size);
3061}
3062
Williams, Mitch A8151d292010-02-10 01:44:24 +00003063static inline void igb_set_vmolr(struct igb_adapter *adapter,
3064 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003065{
3066 struct e1000_hw *hw = &adapter->hw;
3067 u32 vmolr;
3068
3069 /*
3070 * This register exists only on 82576 and newer so if we are older then
3071 * we should exit and do nothing
3072 */
3073 if (hw->mac.type < e1000_82576)
3074 return;
3075
3076 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003077 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3078 if (aupe)
3079 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3080 else
3081 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003082
3083 /* clear all bits that might not be set */
3084 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3085
Alexander Duycka99955f2009-11-12 18:37:19 +00003086 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003087 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3088 /*
3089 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3090 * multicast packets
3091 */
3092 if (vfn <= adapter->vfs_allocated_count)
3093 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3094
3095 wr32(E1000_VMOLR(vfn), vmolr);
3096}
3097
Alexander Duycke1739522009-02-19 20:39:44 -08003098/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003099 * igb_configure_rx_ring - Configure a receive ring after Reset
3100 * @adapter: board private structure
3101 * @ring: receive ring to be configured
3102 *
3103 * Configure the Rx unit of the MAC after a reset.
3104 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003105void igb_configure_rx_ring(struct igb_adapter *adapter,
3106 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003107{
3108 struct e1000_hw *hw = &adapter->hw;
3109 u64 rdba = ring->dma;
3110 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003111 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003112
3113 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003114 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003115
3116 /* Set DMA base address registers */
3117 wr32(E1000_RDBAL(reg_idx),
3118 rdba & 0x00000000ffffffffULL);
3119 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3120 wr32(E1000_RDLEN(reg_idx),
3121 ring->count * sizeof(union e1000_adv_rx_desc));
3122
3123 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003124 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003125 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003126 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003127
Alexander Duyck952f72a2009-10-27 15:51:07 +00003128 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003129 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003130#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003131 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003132#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003133 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003134#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003135 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Nick Nunley757b77e2010-03-26 11:36:47 +00003136 if (hw->mac.type == e1000_82580)
3137 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003138 /* Only set Drop Enable if we are supporting multiple queues */
3139 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3140 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003141
3142 wr32(E1000_SRRCTL(reg_idx), srrctl);
3143
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003144 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003145 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003146
Alexander Duyck85b430b2009-10-27 15:50:29 +00003147 rxdctl |= IGB_RX_PTHRESH;
3148 rxdctl |= IGB_RX_HTHRESH << 8;
3149 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003150
3151 /* enable receive descriptor fetching */
3152 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003153 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3154}
3155
3156/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003157 * igb_configure_rx - Configure receive Unit after Reset
3158 * @adapter: board private structure
3159 *
3160 * Configure the Rx unit of the MAC after a reset.
3161 **/
3162static void igb_configure_rx(struct igb_adapter *adapter)
3163{
Hannes Eder91075842009-02-18 19:36:04 -08003164 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003165
Alexander Duyck68d480c2009-10-05 06:33:08 +00003166 /* set UTA to appropriate mode */
3167 igb_set_uta(adapter);
3168
Alexander Duyck26ad9172009-10-05 06:32:49 +00003169 /* set the correct pool for the PF default MAC address in entry 0 */
3170 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3171 adapter->vfs_allocated_count);
3172
Alexander Duyck06cf2662009-10-27 15:53:25 +00003173 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3174 * the Base and Length of the Rx Descriptor Ring */
3175 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003176 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003177}
3178
3179/**
3180 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003181 * @tx_ring: Tx descriptor ring for a specific queue
3182 *
3183 * Free all transmit software resources
3184 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003185void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003186{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003187 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003188
Alexander Duyck06034642011-08-26 07:44:22 +00003189 vfree(tx_ring->tx_buffer_info);
3190 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003191
Alexander Duyck439705e2009-10-27 23:49:20 +00003192 /* if not set, then don't free */
3193 if (!tx_ring->desc)
3194 return;
3195
Alexander Duyck59d71982010-04-27 13:09:25 +00003196 dma_free_coherent(tx_ring->dev, tx_ring->size,
3197 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003198
3199 tx_ring->desc = NULL;
3200}
3201
3202/**
3203 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3204 * @adapter: board private structure
3205 *
3206 * Free all transmit software resources
3207 **/
3208static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3209{
3210 int i;
3211
3212 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003213 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003214}
3215
Alexander Duyckebe42d12011-08-26 07:45:09 +00003216void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3217 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003218{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003219 if (tx_buffer->skb) {
3220 dev_kfree_skb_any(tx_buffer->skb);
3221 if (tx_buffer->dma)
3222 dma_unmap_single(ring->dev,
3223 tx_buffer->dma,
3224 tx_buffer->length,
3225 DMA_TO_DEVICE);
3226 } else if (tx_buffer->dma) {
3227 dma_unmap_page(ring->dev,
3228 tx_buffer->dma,
3229 tx_buffer->length,
3230 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003231 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003232 tx_buffer->next_to_watch = NULL;
3233 tx_buffer->skb = NULL;
3234 tx_buffer->dma = 0;
3235 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003236}
3237
3238/**
3239 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003240 * @tx_ring: ring to be cleaned
3241 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003242static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003243{
Alexander Duyck06034642011-08-26 07:44:22 +00003244 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003245 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003246 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003247
Alexander Duyck06034642011-08-26 07:44:22 +00003248 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003249 return;
3250 /* Free all the Tx ring sk_buffs */
3251
3252 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003253 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003254 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003255 }
3256
Alexander Duyck06034642011-08-26 07:44:22 +00003257 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3258 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003259
3260 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003261 memset(tx_ring->desc, 0, tx_ring->size);
3262
3263 tx_ring->next_to_use = 0;
3264 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003265}
3266
3267/**
3268 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3269 * @adapter: board private structure
3270 **/
3271static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3272{
3273 int i;
3274
3275 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003276 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003277}
3278
3279/**
3280 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003281 * @rx_ring: ring to clean the resources from
3282 *
3283 * Free all receive software resources
3284 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003285void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003286{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003287 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003288
Alexander Duyck06034642011-08-26 07:44:22 +00003289 vfree(rx_ring->rx_buffer_info);
3290 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003291
Alexander Duyck439705e2009-10-27 23:49:20 +00003292 /* if not set, then don't free */
3293 if (!rx_ring->desc)
3294 return;
3295
Alexander Duyck59d71982010-04-27 13:09:25 +00003296 dma_free_coherent(rx_ring->dev, rx_ring->size,
3297 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003298
3299 rx_ring->desc = NULL;
3300}
3301
3302/**
3303 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3304 * @adapter: board private structure
3305 *
3306 * Free all receive software resources
3307 **/
3308static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3309{
3310 int i;
3311
3312 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003313 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003314}
3315
3316/**
3317 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003318 * @rx_ring: ring to free buffers from
3319 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003320static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003321{
Auke Kok9d5c8242008-01-24 02:22:38 -08003322 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003323 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003324
Alexander Duyck06034642011-08-26 07:44:22 +00003325 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003326 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003327
Auke Kok9d5c8242008-01-24 02:22:38 -08003328 /* Free all the Rx ring sk_buffs */
3329 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003330 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003331 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003332 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003333 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003334 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003335 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003336 buffer_info->dma = 0;
3337 }
3338
3339 if (buffer_info->skb) {
3340 dev_kfree_skb(buffer_info->skb);
3341 buffer_info->skb = NULL;
3342 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003343 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003344 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003345 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003346 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003347 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003348 buffer_info->page_dma = 0;
3349 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003350 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003351 put_page(buffer_info->page);
3352 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003353 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003354 }
3355 }
3356
Alexander Duyck06034642011-08-26 07:44:22 +00003357 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3358 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003359
3360 /* Zero out the descriptor ring */
3361 memset(rx_ring->desc, 0, rx_ring->size);
3362
3363 rx_ring->next_to_clean = 0;
3364 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003365}
3366
3367/**
3368 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3369 * @adapter: board private structure
3370 **/
3371static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3372{
3373 int i;
3374
3375 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003376 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003377}
3378
3379/**
3380 * igb_set_mac - Change the Ethernet Address of the NIC
3381 * @netdev: network interface device structure
3382 * @p: pointer to an address structure
3383 *
3384 * Returns 0 on success, negative on failure
3385 **/
3386static int igb_set_mac(struct net_device *netdev, void *p)
3387{
3388 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003389 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003390 struct sockaddr *addr = p;
3391
3392 if (!is_valid_ether_addr(addr->sa_data))
3393 return -EADDRNOTAVAIL;
3394
3395 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003396 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003397
Alexander Duyck26ad9172009-10-05 06:32:49 +00003398 /* set the correct pool for the new PF MAC address in entry 0 */
3399 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3400 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003401
Auke Kok9d5c8242008-01-24 02:22:38 -08003402 return 0;
3403}
3404
3405/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003406 * igb_write_mc_addr_list - write multicast addresses to MTA
3407 * @netdev: network interface device structure
3408 *
3409 * Writes multicast address list to the MTA hash table.
3410 * Returns: -ENOMEM on failure
3411 * 0 on no addresses written
3412 * X on writing X addresses to MTA
3413 **/
3414static int igb_write_mc_addr_list(struct net_device *netdev)
3415{
3416 struct igb_adapter *adapter = netdev_priv(netdev);
3417 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003418 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003419 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003420 int i;
3421
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003422 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003423 /* nothing to program, so clear mc list */
3424 igb_update_mc_addr_list(hw, NULL, 0);
3425 igb_restore_vf_multicasts(adapter);
3426 return 0;
3427 }
3428
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003429 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003430 if (!mta_list)
3431 return -ENOMEM;
3432
Alexander Duyck68d480c2009-10-05 06:33:08 +00003433 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003434 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003435 netdev_for_each_mc_addr(ha, netdev)
3436 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003437
Alexander Duyck68d480c2009-10-05 06:33:08 +00003438 igb_update_mc_addr_list(hw, mta_list, i);
3439 kfree(mta_list);
3440
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003441 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003442}
3443
3444/**
3445 * igb_write_uc_addr_list - write unicast addresses to RAR table
3446 * @netdev: network interface device structure
3447 *
3448 * Writes unicast address list to the RAR table.
3449 * Returns: -ENOMEM on failure/insufficient address space
3450 * 0 on no addresses written
3451 * X on writing X addresses to the RAR table
3452 **/
3453static int igb_write_uc_addr_list(struct net_device *netdev)
3454{
3455 struct igb_adapter *adapter = netdev_priv(netdev);
3456 struct e1000_hw *hw = &adapter->hw;
3457 unsigned int vfn = adapter->vfs_allocated_count;
3458 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3459 int count = 0;
3460
3461 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003462 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003463 return -ENOMEM;
3464
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003465 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003466 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003467
3468 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003469 if (!rar_entries)
3470 break;
3471 igb_rar_set_qsel(adapter, ha->addr,
3472 rar_entries--,
3473 vfn);
3474 count++;
3475 }
3476 }
3477 /* write the addresses in reverse order to avoid write combining */
3478 for (; rar_entries > 0 ; rar_entries--) {
3479 wr32(E1000_RAH(rar_entries), 0);
3480 wr32(E1000_RAL(rar_entries), 0);
3481 }
3482 wrfl();
3483
3484 return count;
3485}
3486
3487/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003488 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003489 * @netdev: network interface device structure
3490 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003491 * The set_rx_mode entry point is called whenever the unicast or multicast
3492 * address lists or the network interface flags are updated. This routine is
3493 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003494 * promiscuous mode, and all-multi behavior.
3495 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003496static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003497{
3498 struct igb_adapter *adapter = netdev_priv(netdev);
3499 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003500 unsigned int vfn = adapter->vfs_allocated_count;
3501 u32 rctl, vmolr = 0;
3502 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003503
3504 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003505 rctl = rd32(E1000_RCTL);
3506
Alexander Duyck68d480c2009-10-05 06:33:08 +00003507 /* clear the effected bits */
3508 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3509
Patrick McHardy746b9f02008-07-16 20:15:45 -07003510 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003511 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003512 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003513 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003514 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003515 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003516 vmolr |= E1000_VMOLR_MPME;
3517 } else {
3518 /*
3519 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003520 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003521 * that we can at least receive multicast traffic
3522 */
3523 count = igb_write_mc_addr_list(netdev);
3524 if (count < 0) {
3525 rctl |= E1000_RCTL_MPE;
3526 vmolr |= E1000_VMOLR_MPME;
3527 } else if (count) {
3528 vmolr |= E1000_VMOLR_ROMPE;
3529 }
3530 }
3531 /*
3532 * Write addresses to available RAR registers, if there is not
3533 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003534 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003535 */
3536 count = igb_write_uc_addr_list(netdev);
3537 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003538 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003539 vmolr |= E1000_VMOLR_ROPE;
3540 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003541 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003542 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003543 wr32(E1000_RCTL, rctl);
3544
Alexander Duyck68d480c2009-10-05 06:33:08 +00003545 /*
3546 * In order to support SR-IOV and eventually VMDq it is necessary to set
3547 * the VMOLR to enable the appropriate modes. Without this workaround
3548 * we will have issues with VLAN tag stripping not being done for frames
3549 * that are only arriving because we are the default pool
3550 */
3551 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003552 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003553
Alexander Duyck68d480c2009-10-05 06:33:08 +00003554 vmolr |= rd32(E1000_VMOLR(vfn)) &
3555 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3556 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003557 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003558}
3559
Greg Rose13800462010-11-06 02:08:26 +00003560static void igb_check_wvbr(struct igb_adapter *adapter)
3561{
3562 struct e1000_hw *hw = &adapter->hw;
3563 u32 wvbr = 0;
3564
3565 switch (hw->mac.type) {
3566 case e1000_82576:
3567 case e1000_i350:
3568 if (!(wvbr = rd32(E1000_WVBR)))
3569 return;
3570 break;
3571 default:
3572 break;
3573 }
3574
3575 adapter->wvbr |= wvbr;
3576}
3577
3578#define IGB_STAGGERED_QUEUE_OFFSET 8
3579
3580static void igb_spoof_check(struct igb_adapter *adapter)
3581{
3582 int j;
3583
3584 if (!adapter->wvbr)
3585 return;
3586
3587 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3588 if (adapter->wvbr & (1 << j) ||
3589 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3590 dev_warn(&adapter->pdev->dev,
3591 "Spoof event(s) detected on VF %d\n", j);
3592 adapter->wvbr &=
3593 ~((1 << j) |
3594 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3595 }
3596 }
3597}
3598
Auke Kok9d5c8242008-01-24 02:22:38 -08003599/* Need to wait a few seconds after link up to get diagnostic information from
3600 * the phy */
3601static void igb_update_phy_info(unsigned long data)
3602{
3603 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003604 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003605}
3606
3607/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003608 * igb_has_link - check shared code for link and determine up/down
3609 * @adapter: pointer to driver private info
3610 **/
Nick Nunley31455352010-02-17 01:01:21 +00003611bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003612{
3613 struct e1000_hw *hw = &adapter->hw;
3614 bool link_active = false;
3615 s32 ret_val = 0;
3616
3617 /* get_link_status is set on LSC (link status) interrupt or
3618 * rx sequence error interrupt. get_link_status will stay
3619 * false until the e1000_check_for_link establishes link
3620 * for copper adapters ONLY
3621 */
3622 switch (hw->phy.media_type) {
3623 case e1000_media_type_copper:
3624 if (hw->mac.get_link_status) {
3625 ret_val = hw->mac.ops.check_for_link(hw);
3626 link_active = !hw->mac.get_link_status;
3627 } else {
3628 link_active = true;
3629 }
3630 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003631 case e1000_media_type_internal_serdes:
3632 ret_val = hw->mac.ops.check_for_link(hw);
3633 link_active = hw->mac.serdes_has_link;
3634 break;
3635 default:
3636 case e1000_media_type_unknown:
3637 break;
3638 }
3639
3640 return link_active;
3641}
3642
Stefan Assmann563988d2011-04-05 04:27:15 +00003643static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3644{
3645 bool ret = false;
3646 u32 ctrl_ext, thstat;
3647
3648 /* check for thermal sensor event on i350, copper only */
3649 if (hw->mac.type == e1000_i350) {
3650 thstat = rd32(E1000_THSTAT);
3651 ctrl_ext = rd32(E1000_CTRL_EXT);
3652
3653 if ((hw->phy.media_type == e1000_media_type_copper) &&
3654 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3655 ret = !!(thstat & event);
3656 }
3657 }
3658
3659 return ret;
3660}
3661
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003662/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003663 * igb_watchdog - Timer Call-back
3664 * @data: pointer to adapter cast into an unsigned long
3665 **/
3666static void igb_watchdog(unsigned long data)
3667{
3668 struct igb_adapter *adapter = (struct igb_adapter *)data;
3669 /* Do the rest outside of interrupt context */
3670 schedule_work(&adapter->watchdog_task);
3671}
3672
3673static void igb_watchdog_task(struct work_struct *work)
3674{
3675 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003676 struct igb_adapter,
3677 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003678 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003679 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003680 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003681 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003682
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003683 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003684 if (link) {
3685 if (!netif_carrier_ok(netdev)) {
3686 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003687 hw->mac.ops.get_speed_and_duplex(hw,
3688 &adapter->link_speed,
3689 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003690
3691 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003692 /* Links status message must follow this format */
3693 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003694 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003695 netdev->name,
3696 adapter->link_speed,
3697 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003698 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003699 ((ctrl & E1000_CTRL_TFCE) &&
3700 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3701 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3702 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003703
Stefan Assmann563988d2011-04-05 04:27:15 +00003704 /* check for thermal sensor event */
3705 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3706 printk(KERN_INFO "igb: %s The network adapter "
3707 "link speed was downshifted "
3708 "because it overheated.\n",
3709 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003710 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003711
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003712 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003713 adapter->tx_timeout_factor = 1;
3714 switch (adapter->link_speed) {
3715 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003716 adapter->tx_timeout_factor = 14;
3717 break;
3718 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003719 /* maybe add some timeout factor ? */
3720 break;
3721 }
3722
3723 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003724
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003725 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003726 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003727
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003728 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003729 if (!test_bit(__IGB_DOWN, &adapter->state))
3730 mod_timer(&adapter->phy_info_timer,
3731 round_jiffies(jiffies + 2 * HZ));
3732 }
3733 } else {
3734 if (netif_carrier_ok(netdev)) {
3735 adapter->link_speed = 0;
3736 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003737
3738 /* check for thermal sensor event */
3739 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3740 printk(KERN_ERR "igb: %s The network adapter "
3741 "was stopped because it "
3742 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003743 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003744 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003745
Alexander Duyck527d47c2008-11-27 00:21:39 -08003746 /* Links status message must follow this format */
3747 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3748 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003749 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003750
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003751 igb_ping_all_vfs(adapter);
3752
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003753 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003754 if (!test_bit(__IGB_DOWN, &adapter->state))
3755 mod_timer(&adapter->phy_info_timer,
3756 round_jiffies(jiffies + 2 * HZ));
3757 }
3758 }
3759
Eric Dumazet12dcd862010-10-15 17:27:10 +00003760 spin_lock(&adapter->stats64_lock);
3761 igb_update_stats(adapter, &adapter->stats64);
3762 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003763
Alexander Duyckdbabb062009-11-12 18:38:16 +00003764 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003765 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003766 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003767 /* We've lost link, so the controller stops DMA,
3768 * but we've got queued Tx work that's never going
3769 * to get done, so reset controller to flush Tx.
3770 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003771 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3772 adapter->tx_timeout_count++;
3773 schedule_work(&adapter->reset_task);
3774 /* return immediately since reset is imminent */
3775 return;
3776 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003777 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003778
Alexander Duyckdbabb062009-11-12 18:38:16 +00003779 /* Force detection of hung controller every watchdog period */
3780 tx_ring->detect_tx_hung = true;
3781 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003782
Auke Kok9d5c8242008-01-24 02:22:38 -08003783 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003784 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003785 u32 eics = 0;
3786 for (i = 0; i < adapter->num_q_vectors; i++) {
3787 struct igb_q_vector *q_vector = adapter->q_vector[i];
3788 eics |= q_vector->eims_value;
3789 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003790 wr32(E1000_EICS, eics);
3791 } else {
3792 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3793 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003794
Greg Rose13800462010-11-06 02:08:26 +00003795 igb_spoof_check(adapter);
3796
Auke Kok9d5c8242008-01-24 02:22:38 -08003797 /* Reset the timer */
3798 if (!test_bit(__IGB_DOWN, &adapter->state))
3799 mod_timer(&adapter->watchdog_timer,
3800 round_jiffies(jiffies + 2 * HZ));
3801}
3802
3803enum latency_range {
3804 lowest_latency = 0,
3805 low_latency = 1,
3806 bulk_latency = 2,
3807 latency_invalid = 255
3808};
3809
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003810/**
3811 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3812 *
3813 * Stores a new ITR value based on strictly on packet size. This
3814 * algorithm is less sophisticated than that used in igb_update_itr,
3815 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003816 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003817 * were determined based on theoretical maximum wire speed and testing
3818 * data, in order to minimize response time while increasing bulk
3819 * throughput.
3820 * This functionality is controlled by the InterruptThrottleRate module
3821 * parameter (see igb_param.c)
3822 * NOTE: This function is called only when operating in a multiqueue
3823 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003824 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003825 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003826static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003827{
Alexander Duyck047e0032009-10-27 15:49:27 +00003828 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003829 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003830 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003831 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003832
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003833 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3834 * ints/sec - ITR timer value of 120 ticks.
3835 */
3836 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003837 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003838 goto set_itr_val;
3839 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003840
Alexander Duyck0ba82992011-08-26 07:45:47 +00003841 packets = q_vector->rx.total_packets;
3842 if (packets)
3843 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003844
Alexander Duyck0ba82992011-08-26 07:45:47 +00003845 packets = q_vector->tx.total_packets;
3846 if (packets)
3847 avg_wire_size = max_t(u32, avg_wire_size,
3848 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003849
3850 /* if avg_wire_size isn't set no work was done */
3851 if (!avg_wire_size)
3852 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003853
3854 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3855 avg_wire_size += 24;
3856
3857 /* Don't starve jumbo frames */
3858 avg_wire_size = min(avg_wire_size, 3000);
3859
3860 /* Give a little boost to mid-size frames */
3861 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3862 new_val = avg_wire_size / 3;
3863 else
3864 new_val = avg_wire_size / 2;
3865
Alexander Duyck0ba82992011-08-26 07:45:47 +00003866 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3867 if (new_val < IGB_20K_ITR &&
3868 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3869 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3870 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003871
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003872set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003873 if (new_val != q_vector->itr_val) {
3874 q_vector->itr_val = new_val;
3875 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003876 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003877clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003878 q_vector->rx.total_bytes = 0;
3879 q_vector->rx.total_packets = 0;
3880 q_vector->tx.total_bytes = 0;
3881 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003882}
3883
3884/**
3885 * igb_update_itr - update the dynamic ITR value based on statistics
3886 * Stores a new ITR value based on packets and byte
3887 * counts during the last interrupt. The advantage of per interrupt
3888 * computation is faster updates and more accurate ITR for the current
3889 * traffic pattern. Constants in this function were computed
3890 * based on theoretical maximum wire speed and thresholds were set based
3891 * on testing data as well as attempting to minimize response time
3892 * while increasing bulk throughput.
3893 * this functionality is controlled by the InterruptThrottleRate module
3894 * parameter (see igb_param.c)
3895 * NOTE: These calculations are only valid when operating in a single-
3896 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003897 * @q_vector: pointer to q_vector
3898 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003899 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003900static void igb_update_itr(struct igb_q_vector *q_vector,
3901 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003902{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 unsigned int packets = ring_container->total_packets;
3904 unsigned int bytes = ring_container->total_bytes;
3905 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003906
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003908 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003910
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 case lowest_latency:
3913 /* handle TSO and jumbo frames */
3914 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003915 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003918 break;
3919 case low_latency: /* 50 usec aka 20000 ints/s */
3920 if (bytes > 10000) {
3921 /* this if handles the TSO accounting */
3922 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003923 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003924 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003925 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003926 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003927 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003928 }
3929 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003930 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003931 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003932 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003933 }
3934 break;
3935 case bulk_latency: /* 250 usec aka 4000 ints/s */
3936 if (bytes > 25000) {
3937 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003938 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003939 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003940 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003941 }
3942 break;
3943 }
3944
Alexander Duyck0ba82992011-08-26 07:45:47 +00003945 /* clear work counters since we have the values we need */
3946 ring_container->total_bytes = 0;
3947 ring_container->total_packets = 0;
3948
3949 /* write updated itr to ring container */
3950 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003951}
3952
Alexander Duyck0ba82992011-08-26 07:45:47 +00003953static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003954{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003955 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003956 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003957 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003958
3959 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3960 if (adapter->link_speed != SPEED_1000) {
3961 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003962 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003963 goto set_itr_now;
3964 }
3965
Alexander Duyck0ba82992011-08-26 07:45:47 +00003966 igb_update_itr(q_vector, &q_vector->tx);
3967 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003968
Alexander Duyck0ba82992011-08-26 07:45:47 +00003969 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003970
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003971 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003972 if (current_itr == lowest_latency &&
3973 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3974 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003975 current_itr = low_latency;
3976
Auke Kok9d5c8242008-01-24 02:22:38 -08003977 switch (current_itr) {
3978 /* counts and packets in update_itr are dependent on these numbers */
3979 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003980 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003981 break;
3982 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003983 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003984 break;
3985 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003986 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003987 break;
3988 default:
3989 break;
3990 }
3991
3992set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003993 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003994 /* this attempts to bias the interrupt rate towards Bulk
3995 * by adding intermediate steps when interrupt rate is
3996 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003997 new_itr = new_itr > q_vector->itr_val ?
3998 max((new_itr * q_vector->itr_val) /
3999 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00004000 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08004001 new_itr;
4002 /* Don't write the value here; it resets the adapter's
4003 * internal timer, and causes us to delay far longer than
4004 * we should between interrupts. Instead, we write the ITR
4005 * value at the beginning of the next interrupt so the timing
4006 * ends up being correct.
4007 */
Alexander Duyck047e0032009-10-27 15:49:27 +00004008 q_vector->itr_val = new_itr;
4009 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004010 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004011}
4012
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004013void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4014 u32 type_tucmd, u32 mss_l4len_idx)
4015{
4016 struct e1000_adv_tx_context_desc *context_desc;
4017 u16 i = tx_ring->next_to_use;
4018
4019 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4020
4021 i++;
4022 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4023
4024 /* set bits to identify this as an advanced context descriptor */
4025 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4026
4027 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004028 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004029 mss_l4len_idx |= tx_ring->reg_idx << 4;
4030
4031 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4032 context_desc->seqnum_seed = 0;
4033 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4034 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4035}
4036
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004037static int igb_tso(struct igb_ring *tx_ring,
4038 struct igb_tx_buffer *first,
4039 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004040{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004041 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004042 u32 vlan_macip_lens, type_tucmd;
4043 u32 mss_l4len_idx, l4len;
4044
4045 if (!skb_is_gso(skb))
4046 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004047
4048 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004049 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004050 if (err)
4051 return err;
4052 }
4053
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004054 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4055 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004056
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004057 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004058 struct iphdr *iph = ip_hdr(skb);
4059 iph->tot_len = 0;
4060 iph->check = 0;
4061 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4062 iph->daddr, 0,
4063 IPPROTO_TCP,
4064 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004065 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004066 first->tx_flags |= IGB_TX_FLAGS_TSO |
4067 IGB_TX_FLAGS_CSUM |
4068 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004069 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004070 ipv6_hdr(skb)->payload_len = 0;
4071 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4072 &ipv6_hdr(skb)->daddr,
4073 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004074 first->tx_flags |= IGB_TX_FLAGS_TSO |
4075 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004076 }
4077
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004078 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004079 l4len = tcp_hdrlen(skb);
4080 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004081
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004082 /* update gso size and bytecount with header size */
4083 first->gso_segs = skb_shinfo(skb)->gso_segs;
4084 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4085
Auke Kok9d5c8242008-01-24 02:22:38 -08004086 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004087 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4088 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004089
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004090 /* VLAN MACLEN IPLEN */
4091 vlan_macip_lens = skb_network_header_len(skb);
4092 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004093 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004094
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004095 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004096
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004097 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004098}
4099
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004100static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004101{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004102 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004103 u32 vlan_macip_lens = 0;
4104 u32 mss_l4len_idx = 0;
4105 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004106
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004107 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004108 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4109 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004110 } else {
4111 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004112 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004113 case __constant_htons(ETH_P_IP):
4114 vlan_macip_lens |= skb_network_header_len(skb);
4115 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4116 l4_hdr = ip_hdr(skb)->protocol;
4117 break;
4118 case __constant_htons(ETH_P_IPV6):
4119 vlan_macip_lens |= skb_network_header_len(skb);
4120 l4_hdr = ipv6_hdr(skb)->nexthdr;
4121 break;
4122 default:
4123 if (unlikely(net_ratelimit())) {
4124 dev_warn(tx_ring->dev,
4125 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004126 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004127 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004128 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004129 }
4130
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004131 switch (l4_hdr) {
4132 case IPPROTO_TCP:
4133 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4134 mss_l4len_idx = tcp_hdrlen(skb) <<
4135 E1000_ADVTXD_L4LEN_SHIFT;
4136 break;
4137 case IPPROTO_SCTP:
4138 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4139 mss_l4len_idx = sizeof(struct sctphdr) <<
4140 E1000_ADVTXD_L4LEN_SHIFT;
4141 break;
4142 case IPPROTO_UDP:
4143 mss_l4len_idx = sizeof(struct udphdr) <<
4144 E1000_ADVTXD_L4LEN_SHIFT;
4145 break;
4146 default:
4147 if (unlikely(net_ratelimit())) {
4148 dev_warn(tx_ring->dev,
4149 "partial checksum but l4 proto=%x!\n",
4150 l4_hdr);
4151 }
4152 break;
4153 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004154
4155 /* update TX checksum flag */
4156 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004157 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004158
4159 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004160 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004161
4162 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004163}
4164
Alexander Duycke032afc2011-08-26 07:44:48 +00004165static __le32 igb_tx_cmd_type(u32 tx_flags)
4166{
4167 /* set type for advanced descriptor with frame checksum insertion */
4168 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4169 E1000_ADVTXD_DCMD_IFCS |
4170 E1000_ADVTXD_DCMD_DEXT);
4171
4172 /* set HW vlan bit if vlan is present */
4173 if (tx_flags & IGB_TX_FLAGS_VLAN)
4174 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4175
4176 /* set timestamp bit if present */
4177 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4178 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4179
4180 /* set segmentation bits for TSO */
4181 if (tx_flags & IGB_TX_FLAGS_TSO)
4182 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4183
4184 return cmd_type;
4185}
4186
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004187static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4188 union e1000_adv_tx_desc *tx_desc,
4189 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004190{
4191 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4192
4193 /* 82575 requires a unique index per ring if any offload is enabled */
4194 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004195 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004196 olinfo_status |= tx_ring->reg_idx << 4;
4197
4198 /* insert L4 checksum */
4199 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4200 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4201
4202 /* insert IPv4 checksum */
4203 if (tx_flags & IGB_TX_FLAGS_IPV4)
4204 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4205 }
4206
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004207 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004208}
4209
Alexander Duyckebe42d12011-08-26 07:45:09 +00004210/*
4211 * The largest size we can write to the descriptor is 65535. In order to
4212 * maintain a power of two alignment we have to limit ourselves to 32K.
4213 */
4214#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004215#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004216
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004217static void igb_tx_map(struct igb_ring *tx_ring,
4218 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004219 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004220{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004221 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004222 struct igb_tx_buffer *tx_buffer_info;
4223 union e1000_adv_tx_desc *tx_desc;
4224 dma_addr_t dma;
4225 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4226 unsigned int data_len = skb->data_len;
4227 unsigned int size = skb_headlen(skb);
4228 unsigned int paylen = skb->len - hdr_len;
4229 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004230 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004231 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004232
4233 tx_desc = IGB_TX_DESC(tx_ring, i);
4234
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004235 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004236 cmd_type = igb_tx_cmd_type(tx_flags);
4237
4238 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4239 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004240 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004241
Alexander Duyckebe42d12011-08-26 07:45:09 +00004242 /* record length, and DMA address */
4243 first->length = size;
4244 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004245 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004246
Alexander Duyckebe42d12011-08-26 07:45:09 +00004247 for (;;) {
4248 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4249 tx_desc->read.cmd_type_len =
4250 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004251
Alexander Duyckebe42d12011-08-26 07:45:09 +00004252 i++;
4253 tx_desc++;
4254 if (i == tx_ring->count) {
4255 tx_desc = IGB_TX_DESC(tx_ring, 0);
4256 i = 0;
4257 }
4258
4259 dma += IGB_MAX_DATA_PER_TXD;
4260 size -= IGB_MAX_DATA_PER_TXD;
4261
4262 tx_desc->read.olinfo_status = 0;
4263 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4264 }
4265
4266 if (likely(!data_len))
4267 break;
4268
4269 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4270
Alexander Duyck65689fe2009-03-20 00:17:43 +00004271 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004272 tx_desc++;
4273 if (i == tx_ring->count) {
4274 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004275 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004276 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004277
Alexander Duyckebe42d12011-08-26 07:45:09 +00004278 size = frag->size;
4279 data_len -= size;
4280
4281 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4282 size, DMA_TO_DEVICE);
4283 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004284 goto dma_error;
4285
Alexander Duyckebe42d12011-08-26 07:45:09 +00004286 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4287 tx_buffer_info->length = size;
4288 tx_buffer_info->dma = dma;
4289
4290 tx_desc->read.olinfo_status = 0;
4291 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4292
4293 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004294 }
4295
Alexander Duyckebe42d12011-08-26 07:45:09 +00004296 /* write last descriptor with RS and EOP bits */
4297 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4298 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004299
4300 /* set the timestamp */
4301 first->time_stamp = jiffies;
4302
Alexander Duyckebe42d12011-08-26 07:45:09 +00004303 /*
4304 * Force memory writes to complete before letting h/w know there
4305 * are new descriptors to fetch. (Only applicable for weak-ordered
4306 * memory model archs, such as IA-64).
4307 *
4308 * We also need this memory barrier to make certain all of the
4309 * status bits have been updated before next_to_watch is written.
4310 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004311 wmb();
4312
Alexander Duyckebe42d12011-08-26 07:45:09 +00004313 /* set next_to_watch value indicating a packet is present */
4314 first->next_to_watch = tx_desc;
4315
4316 i++;
4317 if (i == tx_ring->count)
4318 i = 0;
4319
Auke Kok9d5c8242008-01-24 02:22:38 -08004320 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004321
Alexander Duyckfce99e32009-10-27 15:51:27 +00004322 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004323
Auke Kok9d5c8242008-01-24 02:22:38 -08004324 /* we need this if more than one processor can write to our tail
4325 * at a time, it syncronizes IO on IA64/Altix systems */
4326 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004327
4328 return;
4329
4330dma_error:
4331 dev_err(tx_ring->dev, "TX DMA map failed\n");
4332
4333 /* clear dma mappings for failed tx_buffer_info map */
4334 for (;;) {
4335 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4336 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4337 if (tx_buffer_info == first)
4338 break;
4339 if (i == 0)
4340 i = tx_ring->count;
4341 i--;
4342 }
4343
4344 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004345}
4346
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004347static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004348{
Alexander Duycke694e962009-10-27 15:53:06 +00004349 struct net_device *netdev = tx_ring->netdev;
4350
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004351 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004352
Auke Kok9d5c8242008-01-24 02:22:38 -08004353 /* Herbert's original patch had:
4354 * smp_mb__after_netif_stop_queue();
4355 * but since that doesn't exist yet, just open code it. */
4356 smp_mb();
4357
4358 /* We need to check again in a case another CPU has just
4359 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004360 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004361 return -EBUSY;
4362
4363 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004364 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004365
4366 u64_stats_update_begin(&tx_ring->tx_syncp2);
4367 tx_ring->tx_stats.restart_queue2++;
4368 u64_stats_update_end(&tx_ring->tx_syncp2);
4369
Auke Kok9d5c8242008-01-24 02:22:38 -08004370 return 0;
4371}
4372
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004373static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004374{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004375 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004376 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004377 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004378}
4379
Alexander Duyckcd392f52011-08-26 07:43:59 +00004380netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4381 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004382{
Alexander Duyck8542db02011-08-26 07:44:43 +00004383 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004384 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004385 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004386 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004387 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004388
Auke Kok9d5c8242008-01-24 02:22:38 -08004389 /* need: 1 descriptor per page,
4390 * + 2 desc gap to keep tail from touching head,
4391 * + 1 desc for skb->data,
4392 * + 1 desc for context descriptor,
4393 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004394 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004395 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004396 return NETDEV_TX_BUSY;
4397 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004398
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004399 /* record the location of the first descriptor for this packet */
4400 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4401 first->skb = skb;
4402 first->bytecount = skb->len;
4403 first->gso_segs = 1;
4404
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004405 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4406 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004407 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004408 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004409
Jesse Grosseab6d182010-10-20 13:56:03 +00004410 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004411 tx_flags |= IGB_TX_FLAGS_VLAN;
4412 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4413 }
4414
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004415 /* record initial flags and protocol */
4416 first->tx_flags = tx_flags;
4417 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004418
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004419 tso = igb_tso(tx_ring, first, &hdr_len);
4420 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004421 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004422 else if (!tso)
4423 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004424
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004425 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004426
4427 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004428 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004429
Auke Kok9d5c8242008-01-24 02:22:38 -08004430 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004431
4432out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004433 igb_unmap_and_free_tx_resource(tx_ring, first);
4434
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004435 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004436}
4437
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004438static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4439 struct sk_buff *skb)
4440{
4441 unsigned int r_idx = skb->queue_mapping;
4442
4443 if (r_idx >= adapter->num_tx_queues)
4444 r_idx = r_idx % adapter->num_tx_queues;
4445
4446 return adapter->tx_ring[r_idx];
4447}
4448
Alexander Duyckcd392f52011-08-26 07:43:59 +00004449static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4450 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004451{
4452 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004453
4454 if (test_bit(__IGB_DOWN, &adapter->state)) {
4455 dev_kfree_skb_any(skb);
4456 return NETDEV_TX_OK;
4457 }
4458
4459 if (skb->len <= 0) {
4460 dev_kfree_skb_any(skb);
4461 return NETDEV_TX_OK;
4462 }
4463
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004464 /*
4465 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4466 * in order to meet this minimum size requirement.
4467 */
4468 if (skb->len < 17) {
4469 if (skb_padto(skb, 17))
4470 return NETDEV_TX_OK;
4471 skb->len = 17;
4472 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004473
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004474 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004475}
4476
4477/**
4478 * igb_tx_timeout - Respond to a Tx Hang
4479 * @netdev: network interface device structure
4480 **/
4481static void igb_tx_timeout(struct net_device *netdev)
4482{
4483 struct igb_adapter *adapter = netdev_priv(netdev);
4484 struct e1000_hw *hw = &adapter->hw;
4485
4486 /* Do the reset outside of interrupt context */
4487 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004488
Alexander Duyck55cac242009-11-19 12:42:21 +00004489 if (hw->mac.type == e1000_82580)
4490 hw->dev_spec._82575.global_device_reset = true;
4491
Auke Kok9d5c8242008-01-24 02:22:38 -08004492 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004493 wr32(E1000_EICS,
4494 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004495}
4496
4497static void igb_reset_task(struct work_struct *work)
4498{
4499 struct igb_adapter *adapter;
4500 adapter = container_of(work, struct igb_adapter, reset_task);
4501
Taku Izumic97ec422010-04-27 14:39:30 +00004502 igb_dump(adapter);
4503 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004504 igb_reinit_locked(adapter);
4505}
4506
4507/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004508 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004509 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004510 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004511 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004512 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004513static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4514 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004515{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004516 struct igb_adapter *adapter = netdev_priv(netdev);
4517
4518 spin_lock(&adapter->stats64_lock);
4519 igb_update_stats(adapter, &adapter->stats64);
4520 memcpy(stats, &adapter->stats64, sizeof(*stats));
4521 spin_unlock(&adapter->stats64_lock);
4522
4523 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004524}
4525
4526/**
4527 * igb_change_mtu - Change the Maximum Transfer Unit
4528 * @netdev: network interface device structure
4529 * @new_mtu: new value for maximum frame size
4530 *
4531 * Returns 0 on success, negative on failure
4532 **/
4533static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4534{
4535 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004536 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004537 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004538
Alexander Duyckc809d222009-10-27 23:52:13 +00004539 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004540 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004541 return -EINVAL;
4542 }
4543
Alexander Duyck153285f2011-08-26 07:43:32 +00004544#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004545 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004546 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004547 return -EINVAL;
4548 }
4549
4550 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4551 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004552
Auke Kok9d5c8242008-01-24 02:22:38 -08004553 /* igb_down has a dependency on max_frame_size */
4554 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004555
Alexander Duyck4c844852009-10-27 15:52:07 +00004556 if (netif_running(netdev))
4557 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004558
Alexander Duyck090b1792009-10-27 23:51:55 +00004559 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004560 netdev->mtu, new_mtu);
4561 netdev->mtu = new_mtu;
4562
4563 if (netif_running(netdev))
4564 igb_up(adapter);
4565 else
4566 igb_reset(adapter);
4567
4568 clear_bit(__IGB_RESETTING, &adapter->state);
4569
4570 return 0;
4571}
4572
4573/**
4574 * igb_update_stats - Update the board statistics counters
4575 * @adapter: board private structure
4576 **/
4577
Eric Dumazet12dcd862010-10-15 17:27:10 +00004578void igb_update_stats(struct igb_adapter *adapter,
4579 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004580{
4581 struct e1000_hw *hw = &adapter->hw;
4582 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004583 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004584 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004585 int i;
4586 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004587 unsigned int start;
4588 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004589
4590#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4591
4592 /*
4593 * Prevent stats update while adapter is being reset, or if the pci
4594 * connection is down.
4595 */
4596 if (adapter->link_speed == 0)
4597 return;
4598 if (pci_channel_offline(pdev))
4599 return;
4600
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004601 bytes = 0;
4602 packets = 0;
4603 for (i = 0; i < adapter->num_rx_queues; i++) {
4604 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004605 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004606
Alexander Duyck3025a442010-02-17 01:02:39 +00004607 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004608 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004609
4610 do {
4611 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4612 _bytes = ring->rx_stats.bytes;
4613 _packets = ring->rx_stats.packets;
4614 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4615 bytes += _bytes;
4616 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004617 }
4618
Alexander Duyck128e45e2009-11-12 18:37:38 +00004619 net_stats->rx_bytes = bytes;
4620 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004621
4622 bytes = 0;
4623 packets = 0;
4624 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004625 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004626 do {
4627 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4628 _bytes = ring->tx_stats.bytes;
4629 _packets = ring->tx_stats.packets;
4630 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4631 bytes += _bytes;
4632 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004633 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004634 net_stats->tx_bytes = bytes;
4635 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004636
4637 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004638 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4639 adapter->stats.gprc += rd32(E1000_GPRC);
4640 adapter->stats.gorc += rd32(E1000_GORCL);
4641 rd32(E1000_GORCH); /* clear GORCL */
4642 adapter->stats.bprc += rd32(E1000_BPRC);
4643 adapter->stats.mprc += rd32(E1000_MPRC);
4644 adapter->stats.roc += rd32(E1000_ROC);
4645
4646 adapter->stats.prc64 += rd32(E1000_PRC64);
4647 adapter->stats.prc127 += rd32(E1000_PRC127);
4648 adapter->stats.prc255 += rd32(E1000_PRC255);
4649 adapter->stats.prc511 += rd32(E1000_PRC511);
4650 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4651 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4652 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4653 adapter->stats.sec += rd32(E1000_SEC);
4654
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004655 mpc = rd32(E1000_MPC);
4656 adapter->stats.mpc += mpc;
4657 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004658 adapter->stats.scc += rd32(E1000_SCC);
4659 adapter->stats.ecol += rd32(E1000_ECOL);
4660 adapter->stats.mcc += rd32(E1000_MCC);
4661 adapter->stats.latecol += rd32(E1000_LATECOL);
4662 adapter->stats.dc += rd32(E1000_DC);
4663 adapter->stats.rlec += rd32(E1000_RLEC);
4664 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4665 adapter->stats.xontxc += rd32(E1000_XONTXC);
4666 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4667 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4668 adapter->stats.fcruc += rd32(E1000_FCRUC);
4669 adapter->stats.gptc += rd32(E1000_GPTC);
4670 adapter->stats.gotc += rd32(E1000_GOTCL);
4671 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004672 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004673 adapter->stats.ruc += rd32(E1000_RUC);
4674 adapter->stats.rfc += rd32(E1000_RFC);
4675 adapter->stats.rjc += rd32(E1000_RJC);
4676 adapter->stats.tor += rd32(E1000_TORH);
4677 adapter->stats.tot += rd32(E1000_TOTH);
4678 adapter->stats.tpr += rd32(E1000_TPR);
4679
4680 adapter->stats.ptc64 += rd32(E1000_PTC64);
4681 adapter->stats.ptc127 += rd32(E1000_PTC127);
4682 adapter->stats.ptc255 += rd32(E1000_PTC255);
4683 adapter->stats.ptc511 += rd32(E1000_PTC511);
4684 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4685 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4686
4687 adapter->stats.mptc += rd32(E1000_MPTC);
4688 adapter->stats.bptc += rd32(E1000_BPTC);
4689
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004690 adapter->stats.tpt += rd32(E1000_TPT);
4691 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004692
4693 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004694 /* read internal phy specific stats */
4695 reg = rd32(E1000_CTRL_EXT);
4696 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4697 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4698 adapter->stats.tncrs += rd32(E1000_TNCRS);
4699 }
4700
Auke Kok9d5c8242008-01-24 02:22:38 -08004701 adapter->stats.tsctc += rd32(E1000_TSCTC);
4702 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4703
4704 adapter->stats.iac += rd32(E1000_IAC);
4705 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4706 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4707 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4708 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4709 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4710 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4711 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4712 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4713
4714 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004715 net_stats->multicast = adapter->stats.mprc;
4716 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004717
4718 /* Rx Errors */
4719
4720 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004721 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004722 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004723 adapter->stats.crcerrs + adapter->stats.algnerrc +
4724 adapter->stats.ruc + adapter->stats.roc +
4725 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004726 net_stats->rx_length_errors = adapter->stats.ruc +
4727 adapter->stats.roc;
4728 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4729 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4730 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004731
4732 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004733 net_stats->tx_errors = adapter->stats.ecol +
4734 adapter->stats.latecol;
4735 net_stats->tx_aborted_errors = adapter->stats.ecol;
4736 net_stats->tx_window_errors = adapter->stats.latecol;
4737 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004738
4739 /* Tx Dropped needs to be maintained elsewhere */
4740
4741 /* Phy Stats */
4742 if (hw->phy.media_type == e1000_media_type_copper) {
4743 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004744 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004745 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4746 adapter->phy_stats.idle_errors += phy_tmp;
4747 }
4748 }
4749
4750 /* Management Stats */
4751 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4752 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4753 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004754
4755 /* OS2BMC Stats */
4756 reg = rd32(E1000_MANC);
4757 if (reg & E1000_MANC_EN_BMC2OS) {
4758 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4759 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4760 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4761 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4762 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004763}
4764
Auke Kok9d5c8242008-01-24 02:22:38 -08004765static irqreturn_t igb_msix_other(int irq, void *data)
4766{
Alexander Duyck047e0032009-10-27 15:49:27 +00004767 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004768 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004769 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004770 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004771
Alexander Duyck7f081d42010-01-07 17:41:00 +00004772 if (icr & E1000_ICR_DRSTA)
4773 schedule_work(&adapter->reset_task);
4774
Alexander Duyck047e0032009-10-27 15:49:27 +00004775 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004776 /* HW is reporting DMA is out of sync */
4777 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004778 /* The DMA Out of Sync is also indication of a spoof event
4779 * in IOV mode. Check the Wrong VM Behavior register to
4780 * see if it is really a spoof event. */
4781 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004782 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004783
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004784 /* Check for a mailbox event */
4785 if (icr & E1000_ICR_VMMB)
4786 igb_msg_task(adapter);
4787
4788 if (icr & E1000_ICR_LSC) {
4789 hw->mac.get_link_status = 1;
4790 /* guard against interrupt when we're going down */
4791 if (!test_bit(__IGB_DOWN, &adapter->state))
4792 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4793 }
4794
Alexander Duyck25568a52009-10-27 23:49:59 +00004795 if (adapter->vfs_allocated_count)
4796 wr32(E1000_IMS, E1000_IMS_LSC |
4797 E1000_IMS_VMMB |
4798 E1000_IMS_DOUTSYNC);
4799 else
4800 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004801 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004802
4803 return IRQ_HANDLED;
4804}
4805
Alexander Duyck047e0032009-10-27 15:49:27 +00004806static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004807{
Alexander Duyck26b39272010-02-17 01:00:41 +00004808 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004809 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004810
Alexander Duyck047e0032009-10-27 15:49:27 +00004811 if (!q_vector->set_itr)
4812 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004813
Alexander Duyck047e0032009-10-27 15:49:27 +00004814 if (!itr_val)
4815 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004816
Alexander Duyck26b39272010-02-17 01:00:41 +00004817 if (adapter->hw.mac.type == e1000_82575)
4818 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004819 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004820 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004821
4822 writel(itr_val, q_vector->itr_register);
4823 q_vector->set_itr = 0;
4824}
4825
4826static irqreturn_t igb_msix_ring(int irq, void *data)
4827{
4828 struct igb_q_vector *q_vector = data;
4829
4830 /* Write the ITR value calculated from the previous interrupt. */
4831 igb_write_itr(q_vector);
4832
4833 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004834
Auke Kok9d5c8242008-01-24 02:22:38 -08004835 return IRQ_HANDLED;
4836}
4837
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004838#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004839static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004840{
Alexander Duyck047e0032009-10-27 15:49:27 +00004841 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004842 struct e1000_hw *hw = &adapter->hw;
4843 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004844
Alexander Duyck047e0032009-10-27 15:49:27 +00004845 if (q_vector->cpu == cpu)
4846 goto out_no_update;
4847
Alexander Duyck0ba82992011-08-26 07:45:47 +00004848 if (q_vector->tx.ring) {
4849 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004850 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4851 if (hw->mac.type == e1000_82575) {
4852 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4853 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4854 } else {
4855 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4856 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4857 E1000_DCA_TXCTRL_CPUID_SHIFT;
4858 }
4859 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4860 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4861 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004862 if (q_vector->rx.ring) {
4863 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004864 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4865 if (hw->mac.type == e1000_82575) {
4866 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4867 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4868 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004869 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004870 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004871 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004872 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004873 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4874 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4875 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4876 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004877 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004878 q_vector->cpu = cpu;
4879out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004880 put_cpu();
4881}
4882
4883static void igb_setup_dca(struct igb_adapter *adapter)
4884{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004885 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004886 int i;
4887
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004888 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004889 return;
4890
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004891 /* Always use CB2 mode, difference is masked in the CB driver. */
4892 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4893
Alexander Duyck047e0032009-10-27 15:49:27 +00004894 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004895 adapter->q_vector[i]->cpu = -1;
4896 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004897 }
4898}
4899
4900static int __igb_notify_dca(struct device *dev, void *data)
4901{
4902 struct net_device *netdev = dev_get_drvdata(dev);
4903 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004904 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004905 struct e1000_hw *hw = &adapter->hw;
4906 unsigned long event = *(unsigned long *)data;
4907
4908 switch (event) {
4909 case DCA_PROVIDER_ADD:
4910 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004911 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004912 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004913 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004914 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004915 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004916 igb_setup_dca(adapter);
4917 break;
4918 }
4919 /* Fall Through since DCA is disabled. */
4920 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004921 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004922 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004923 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004924 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004925 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004926 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004927 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004928 }
4929 break;
4930 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004931
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004932 return 0;
4933}
4934
4935static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4936 void *p)
4937{
4938 int ret_val;
4939
4940 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4941 __igb_notify_dca);
4942
4943 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4944}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004945#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004946
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004947static void igb_ping_all_vfs(struct igb_adapter *adapter)
4948{
4949 struct e1000_hw *hw = &adapter->hw;
4950 u32 ping;
4951 int i;
4952
4953 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4954 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004955 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004956 ping |= E1000_VT_MSGTYPE_CTS;
4957 igb_write_mbx(hw, &ping, 1, i);
4958 }
4959}
4960
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004961static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4962{
4963 struct e1000_hw *hw = &adapter->hw;
4964 u32 vmolr = rd32(E1000_VMOLR(vf));
4965 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4966
Alexander Duyckd85b90042010-09-22 17:56:20 +00004967 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004968 IGB_VF_FLAG_MULTI_PROMISC);
4969 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4970
4971 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4972 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004973 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004974 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4975 } else {
4976 /*
4977 * if we have hashes and we are clearing a multicast promisc
4978 * flag we need to write the hashes to the MTA as this step
4979 * was previously skipped
4980 */
4981 if (vf_data->num_vf_mc_hashes > 30) {
4982 vmolr |= E1000_VMOLR_MPME;
4983 } else if (vf_data->num_vf_mc_hashes) {
4984 int j;
4985 vmolr |= E1000_VMOLR_ROMPE;
4986 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4987 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4988 }
4989 }
4990
4991 wr32(E1000_VMOLR(vf), vmolr);
4992
4993 /* there are flags left unprocessed, likely not supported */
4994 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4995 return -EINVAL;
4996
4997 return 0;
4998
4999}
5000
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005001static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5002 u32 *msgbuf, u32 vf)
5003{
5004 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5005 u16 *hash_list = (u16 *)&msgbuf[1];
5006 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5007 int i;
5008
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005009 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005010 * to this VF for later use to restore when the PF multi cast
5011 * list changes
5012 */
5013 vf_data->num_vf_mc_hashes = n;
5014
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005015 /* only up to 30 hash values supported */
5016 if (n > 30)
5017 n = 30;
5018
5019 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005020 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005021 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005022
5023 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005024 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005025
5026 return 0;
5027}
5028
5029static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5030{
5031 struct e1000_hw *hw = &adapter->hw;
5032 struct vf_data_storage *vf_data;
5033 int i, j;
5034
5035 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005036 u32 vmolr = rd32(E1000_VMOLR(i));
5037 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5038
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005039 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005040
5041 if ((vf_data->num_vf_mc_hashes > 30) ||
5042 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5043 vmolr |= E1000_VMOLR_MPME;
5044 } else if (vf_data->num_vf_mc_hashes) {
5045 vmolr |= E1000_VMOLR_ROMPE;
5046 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5047 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5048 }
5049 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005050 }
5051}
5052
5053static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5054{
5055 struct e1000_hw *hw = &adapter->hw;
5056 u32 pool_mask, reg, vid;
5057 int i;
5058
5059 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5060
5061 /* Find the vlan filter for this id */
5062 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5063 reg = rd32(E1000_VLVF(i));
5064
5065 /* remove the vf from the pool */
5066 reg &= ~pool_mask;
5067
5068 /* if pool is empty then remove entry from vfta */
5069 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5070 (reg & E1000_VLVF_VLANID_ENABLE)) {
5071 reg = 0;
5072 vid = reg & E1000_VLVF_VLANID_MASK;
5073 igb_vfta_set(hw, vid, false);
5074 }
5075
5076 wr32(E1000_VLVF(i), reg);
5077 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005078
5079 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005080}
5081
5082static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5083{
5084 struct e1000_hw *hw = &adapter->hw;
5085 u32 reg, i;
5086
Alexander Duyck51466232009-10-27 23:47:35 +00005087 /* The vlvf table only exists on 82576 hardware and newer */
5088 if (hw->mac.type < e1000_82576)
5089 return -1;
5090
5091 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005092 if (!adapter->vfs_allocated_count)
5093 return -1;
5094
5095 /* Find the vlan filter for this id */
5096 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5097 reg = rd32(E1000_VLVF(i));
5098 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5099 vid == (reg & E1000_VLVF_VLANID_MASK))
5100 break;
5101 }
5102
5103 if (add) {
5104 if (i == E1000_VLVF_ARRAY_SIZE) {
5105 /* Did not find a matching VLAN ID entry that was
5106 * enabled. Search for a free filter entry, i.e.
5107 * one without the enable bit set
5108 */
5109 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5110 reg = rd32(E1000_VLVF(i));
5111 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5112 break;
5113 }
5114 }
5115 if (i < E1000_VLVF_ARRAY_SIZE) {
5116 /* Found an enabled/available entry */
5117 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5118
5119 /* if !enabled we need to set this up in vfta */
5120 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005121 /* add VID to filter table */
5122 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005123 reg |= E1000_VLVF_VLANID_ENABLE;
5124 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005125 reg &= ~E1000_VLVF_VLANID_MASK;
5126 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005127 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005128
5129 /* do not modify RLPML for PF devices */
5130 if (vf >= adapter->vfs_allocated_count)
5131 return 0;
5132
5133 if (!adapter->vf_data[vf].vlans_enabled) {
5134 u32 size;
5135 reg = rd32(E1000_VMOLR(vf));
5136 size = reg & E1000_VMOLR_RLPML_MASK;
5137 size += 4;
5138 reg &= ~E1000_VMOLR_RLPML_MASK;
5139 reg |= size;
5140 wr32(E1000_VMOLR(vf), reg);
5141 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005142
Alexander Duyck51466232009-10-27 23:47:35 +00005143 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005144 return 0;
5145 }
5146 } else {
5147 if (i < E1000_VLVF_ARRAY_SIZE) {
5148 /* remove vf from the pool */
5149 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5150 /* if pool is empty then remove entry from vfta */
5151 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5152 reg = 0;
5153 igb_vfta_set(hw, vid, false);
5154 }
5155 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005156
5157 /* do not modify RLPML for PF devices */
5158 if (vf >= adapter->vfs_allocated_count)
5159 return 0;
5160
5161 adapter->vf_data[vf].vlans_enabled--;
5162 if (!adapter->vf_data[vf].vlans_enabled) {
5163 u32 size;
5164 reg = rd32(E1000_VMOLR(vf));
5165 size = reg & E1000_VMOLR_RLPML_MASK;
5166 size -= 4;
5167 reg &= ~E1000_VMOLR_RLPML_MASK;
5168 reg |= size;
5169 wr32(E1000_VMOLR(vf), reg);
5170 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005171 }
5172 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005173 return 0;
5174}
5175
5176static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5177{
5178 struct e1000_hw *hw = &adapter->hw;
5179
5180 if (vid)
5181 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5182 else
5183 wr32(E1000_VMVIR(vf), 0);
5184}
5185
5186static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5187 int vf, u16 vlan, u8 qos)
5188{
5189 int err = 0;
5190 struct igb_adapter *adapter = netdev_priv(netdev);
5191
5192 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5193 return -EINVAL;
5194 if (vlan || qos) {
5195 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5196 if (err)
5197 goto out;
5198 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5199 igb_set_vmolr(adapter, vf, !vlan);
5200 adapter->vf_data[vf].pf_vlan = vlan;
5201 adapter->vf_data[vf].pf_qos = qos;
5202 dev_info(&adapter->pdev->dev,
5203 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5204 if (test_bit(__IGB_DOWN, &adapter->state)) {
5205 dev_warn(&adapter->pdev->dev,
5206 "The VF VLAN has been set,"
5207 " but the PF device is not up.\n");
5208 dev_warn(&adapter->pdev->dev,
5209 "Bring the PF device up before"
5210 " attempting to use the VF device.\n");
5211 }
5212 } else {
5213 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5214 false, vf);
5215 igb_set_vmvir(adapter, vlan, vf);
5216 igb_set_vmolr(adapter, vf, true);
5217 adapter->vf_data[vf].pf_vlan = 0;
5218 adapter->vf_data[vf].pf_qos = 0;
5219 }
5220out:
5221 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005222}
5223
5224static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5225{
5226 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5227 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5228
5229 return igb_vlvf_set(adapter, vid, add, vf);
5230}
5231
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005232static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005233{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005234 /* clear flags - except flag that indicates PF has set the MAC */
5235 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005236 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005237
5238 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005239 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005240
5241 /* reset vlans for device */
5242 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005243 if (adapter->vf_data[vf].pf_vlan)
5244 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5245 adapter->vf_data[vf].pf_vlan,
5246 adapter->vf_data[vf].pf_qos);
5247 else
5248 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005249
5250 /* reset multicast table array for vf */
5251 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5252
5253 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005254 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005255}
5256
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005257static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5258{
5259 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5260
5261 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005262 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5263 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005264
5265 /* process remaining reset events */
5266 igb_vf_reset(adapter, vf);
5267}
5268
5269static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005270{
5271 struct e1000_hw *hw = &adapter->hw;
5272 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005273 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005274 u32 reg, msgbuf[3];
5275 u8 *addr = (u8 *)(&msgbuf[1]);
5276
5277 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005278 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005279
5280 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005281 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005282
5283 /* enable transmit and receive for vf */
5284 reg = rd32(E1000_VFTE);
5285 wr32(E1000_VFTE, reg | (1 << vf));
5286 reg = rd32(E1000_VFRE);
5287 wr32(E1000_VFRE, reg | (1 << vf));
5288
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005289 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005290
5291 /* reply to reset with ack and vf mac address */
5292 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5293 memcpy(addr, vf_mac, 6);
5294 igb_write_mbx(hw, msgbuf, 3, vf);
5295}
5296
5297static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5298{
Greg Rosede42edd2010-07-01 13:39:23 +00005299 /*
5300 * The VF MAC Address is stored in a packed array of bytes
5301 * starting at the second 32 bit word of the msg array
5302 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005303 unsigned char *addr = (char *)&msg[1];
5304 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005305
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005306 if (is_valid_ether_addr(addr))
5307 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005308
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005309 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005310}
5311
5312static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5313{
5314 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005315 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005316 u32 msg = E1000_VT_MSGTYPE_NACK;
5317
5318 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005319 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5320 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005321 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005322 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005323 }
5324}
5325
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005326static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005327{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005328 struct pci_dev *pdev = adapter->pdev;
5329 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005330 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005331 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005332 s32 retval;
5333
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005334 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005335
Alexander Duyckfef45f42009-12-11 22:57:34 -08005336 if (retval) {
5337 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005338 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005339 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5340 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5341 return;
5342 goto out;
5343 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005344
5345 /* this is a message we already processed, do nothing */
5346 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005347 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005348
5349 /*
5350 * until the vf completes a reset it should not be
5351 * allowed to start any configuration.
5352 */
5353
5354 if (msgbuf[0] == E1000_VF_RESET) {
5355 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005356 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005357 }
5358
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005359 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005360 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5361 return;
5362 retval = -1;
5363 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005364 }
5365
5366 switch ((msgbuf[0] & 0xFFFF)) {
5367 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005368 retval = -EINVAL;
5369 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5370 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5371 else
5372 dev_warn(&pdev->dev,
5373 "VF %d attempted to override administratively "
5374 "set MAC address\nReload the VF driver to "
5375 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005376 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005377 case E1000_VF_SET_PROMISC:
5378 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5379 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005380 case E1000_VF_SET_MULTICAST:
5381 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5382 break;
5383 case E1000_VF_SET_LPE:
5384 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5385 break;
5386 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005387 retval = -1;
5388 if (vf_data->pf_vlan)
5389 dev_warn(&pdev->dev,
5390 "VF %d attempted to override administratively "
5391 "set VLAN tag\nReload the VF driver to "
5392 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005393 else
5394 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005395 break;
5396 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005397 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005398 retval = -1;
5399 break;
5400 }
5401
Alexander Duyckfef45f42009-12-11 22:57:34 -08005402 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5403out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005404 /* notify the VF of the results of what it sent us */
5405 if (retval)
5406 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5407 else
5408 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5409
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005410 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005411}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005412
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005413static void igb_msg_task(struct igb_adapter *adapter)
5414{
5415 struct e1000_hw *hw = &adapter->hw;
5416 u32 vf;
5417
5418 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5419 /* process any reset requests */
5420 if (!igb_check_for_rst(hw, vf))
5421 igb_vf_reset_event(adapter, vf);
5422
5423 /* process any messages pending */
5424 if (!igb_check_for_msg(hw, vf))
5425 igb_rcv_msg_from_vf(adapter, vf);
5426
5427 /* process any acks */
5428 if (!igb_check_for_ack(hw, vf))
5429 igb_rcv_ack_from_vf(adapter, vf);
5430 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005431}
5432
Auke Kok9d5c8242008-01-24 02:22:38 -08005433/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005434 * igb_set_uta - Set unicast filter table address
5435 * @adapter: board private structure
5436 *
5437 * The unicast table address is a register array of 32-bit registers.
5438 * The table is meant to be used in a way similar to how the MTA is used
5439 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005440 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5441 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005442 **/
5443static void igb_set_uta(struct igb_adapter *adapter)
5444{
5445 struct e1000_hw *hw = &adapter->hw;
5446 int i;
5447
5448 /* The UTA table only exists on 82576 hardware and newer */
5449 if (hw->mac.type < e1000_82576)
5450 return;
5451
5452 /* we only need to do this if VMDq is enabled */
5453 if (!adapter->vfs_allocated_count)
5454 return;
5455
5456 for (i = 0; i < hw->mac.uta_reg_count; i++)
5457 array_wr32(E1000_UTA, i, ~0);
5458}
5459
5460/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005461 * igb_intr_msi - Interrupt Handler
5462 * @irq: interrupt number
5463 * @data: pointer to a network interface device structure
5464 **/
5465static irqreturn_t igb_intr_msi(int irq, void *data)
5466{
Alexander Duyck047e0032009-10-27 15:49:27 +00005467 struct igb_adapter *adapter = data;
5468 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005469 struct e1000_hw *hw = &adapter->hw;
5470 /* read ICR disables interrupts using IAM */
5471 u32 icr = rd32(E1000_ICR);
5472
Alexander Duyck047e0032009-10-27 15:49:27 +00005473 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005474
Alexander Duyck7f081d42010-01-07 17:41:00 +00005475 if (icr & E1000_ICR_DRSTA)
5476 schedule_work(&adapter->reset_task);
5477
Alexander Duyck047e0032009-10-27 15:49:27 +00005478 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005479 /* HW is reporting DMA is out of sync */
5480 adapter->stats.doosync++;
5481 }
5482
Auke Kok9d5c8242008-01-24 02:22:38 -08005483 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5484 hw->mac.get_link_status = 1;
5485 if (!test_bit(__IGB_DOWN, &adapter->state))
5486 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5487 }
5488
Alexander Duyck047e0032009-10-27 15:49:27 +00005489 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005490
5491 return IRQ_HANDLED;
5492}
5493
5494/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005495 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005496 * @irq: interrupt number
5497 * @data: pointer to a network interface device structure
5498 **/
5499static irqreturn_t igb_intr(int irq, void *data)
5500{
Alexander Duyck047e0032009-10-27 15:49:27 +00005501 struct igb_adapter *adapter = data;
5502 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005503 struct e1000_hw *hw = &adapter->hw;
5504 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5505 * need for the IMC write */
5506 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005507
5508 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5509 * not set, then the adapter didn't send an interrupt */
5510 if (!(icr & E1000_ICR_INT_ASSERTED))
5511 return IRQ_NONE;
5512
Alexander Duyck0ba82992011-08-26 07:45:47 +00005513 igb_write_itr(q_vector);
5514
Alexander Duyck7f081d42010-01-07 17:41:00 +00005515 if (icr & E1000_ICR_DRSTA)
5516 schedule_work(&adapter->reset_task);
5517
Alexander Duyck047e0032009-10-27 15:49:27 +00005518 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005519 /* HW is reporting DMA is out of sync */
5520 adapter->stats.doosync++;
5521 }
5522
Auke Kok9d5c8242008-01-24 02:22:38 -08005523 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5524 hw->mac.get_link_status = 1;
5525 /* guard against interrupt when we're going down */
5526 if (!test_bit(__IGB_DOWN, &adapter->state))
5527 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5528 }
5529
Alexander Duyck047e0032009-10-27 15:49:27 +00005530 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005531
5532 return IRQ_HANDLED;
5533}
5534
Alexander Duyck0ba82992011-08-26 07:45:47 +00005535void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005536{
Alexander Duyck047e0032009-10-27 15:49:27 +00005537 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005538 struct e1000_hw *hw = &adapter->hw;
5539
Alexander Duyck0ba82992011-08-26 07:45:47 +00005540 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5541 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5542 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5543 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005544 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005545 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005546 }
5547
5548 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5549 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005550 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005551 else
5552 igb_irq_enable(adapter);
5553 }
5554}
5555
Auke Kok9d5c8242008-01-24 02:22:38 -08005556/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005557 * igb_poll - NAPI Rx polling callback
5558 * @napi: napi polling structure
5559 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005560 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005561static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005562{
Alexander Duyck047e0032009-10-27 15:49:27 +00005563 struct igb_q_vector *q_vector = container_of(napi,
5564 struct igb_q_vector,
5565 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005566 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005567
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005568#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005569 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5570 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005571#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005572 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005573 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005574
Alexander Duyck0ba82992011-08-26 07:45:47 +00005575 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005576 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005577
Alexander Duyck16eb8812011-08-26 07:43:54 +00005578 /* If all work not completed, return budget and keep polling */
5579 if (!clean_complete)
5580 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005581
Alexander Duyck46544252009-02-19 20:39:04 -08005582 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005583 napi_complete(napi);
5584 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005585
Alexander Duyck16eb8812011-08-26 07:43:54 +00005586 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005587}
Al Viro6d8126f2008-03-16 22:23:24 +00005588
Auke Kok9d5c8242008-01-24 02:22:38 -08005589/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005590 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005591 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005592 * @shhwtstamps: timestamp structure to update
5593 * @regval: unsigned 64bit system time value.
5594 *
5595 * We need to convert the system time value stored in the RX/TXSTMP registers
5596 * into a hwtstamp which can be used by the upper level timestamping functions
5597 */
5598static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5599 struct skb_shared_hwtstamps *shhwtstamps,
5600 u64 regval)
5601{
5602 u64 ns;
5603
Alexander Duyck55cac242009-11-19 12:42:21 +00005604 /*
5605 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5606 * 24 to match clock shift we setup earlier.
5607 */
5608 if (adapter->hw.mac.type == e1000_82580)
5609 regval <<= IGB_82580_TSYNC_SHIFT;
5610
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005611 ns = timecounter_cyc2time(&adapter->clock, regval);
5612 timecompare_update(&adapter->compare, ns);
5613 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5614 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5615 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5616}
5617
5618/**
5619 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5620 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005621 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005622 *
5623 * If we were asked to do hardware stamping and such a time stamp is
5624 * available, then it must have been for this skb here because we only
5625 * allow only one such packet into the queue.
5626 */
Alexander Duyck06034642011-08-26 07:44:22 +00005627static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5628 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005629{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005630 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005631 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005632 struct skb_shared_hwtstamps shhwtstamps;
5633 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005634
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005635 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005636 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005637 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5638 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005639
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005640 regval = rd32(E1000_TXSTMPL);
5641 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5642
5643 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005644 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005645}
5646
5647/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005648 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005649 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005650 * returns true if ring is completely cleaned
5651 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005652static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005653{
Alexander Duyck047e0032009-10-27 15:49:27 +00005654 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005655 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005656 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005657 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005658 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005659 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005660 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005661
Alexander Duyck13fde972011-10-05 13:35:24 +00005662 if (test_bit(__IGB_DOWN, &adapter->state))
5663 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005664
Alexander Duyck06034642011-08-26 07:44:22 +00005665 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005666 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005667 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005668
Alexander Duyck13fde972011-10-05 13:35:24 +00005669 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005670 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005671
Alexander Duyck8542db02011-08-26 07:44:43 +00005672 /* prevent any other reads prior to eop_desc */
5673 rmb();
5674
5675 /* if next_to_watch is not set then there is no work pending */
5676 if (!eop_desc)
5677 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005678
5679 /* if DD is not set pending work has not been completed */
5680 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5681 break;
5682
Alexander Duyck8542db02011-08-26 07:44:43 +00005683 /* clear next_to_watch to prevent false hangs */
5684 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005685
Alexander Duyckebe42d12011-08-26 07:45:09 +00005686 /* update the statistics for this packet */
5687 total_bytes += tx_buffer->bytecount;
5688 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005689
Alexander Duyckebe42d12011-08-26 07:45:09 +00005690 /* retrieve hardware timestamp */
5691 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005692
Alexander Duyckebe42d12011-08-26 07:45:09 +00005693 /* free the skb */
5694 dev_kfree_skb_any(tx_buffer->skb);
5695 tx_buffer->skb = NULL;
5696
5697 /* unmap skb header data */
5698 dma_unmap_single(tx_ring->dev,
5699 tx_buffer->dma,
5700 tx_buffer->length,
5701 DMA_TO_DEVICE);
5702
5703 /* clear last DMA location and unmap remaining buffers */
5704 while (tx_desc != eop_desc) {
5705 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005706
Alexander Duyck13fde972011-10-05 13:35:24 +00005707 tx_buffer++;
5708 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005709 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005710 if (unlikely(!i)) {
5711 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005712 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005713 tx_desc = IGB_TX_DESC(tx_ring, 0);
5714 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005715
5716 /* unmap any remaining paged data */
5717 if (tx_buffer->dma) {
5718 dma_unmap_page(tx_ring->dev,
5719 tx_buffer->dma,
5720 tx_buffer->length,
5721 DMA_TO_DEVICE);
5722 }
5723 }
5724
5725 /* clear last DMA location */
5726 tx_buffer->dma = 0;
5727
5728 /* move us one more past the eop_desc for start of next pkt */
5729 tx_buffer++;
5730 tx_desc++;
5731 i++;
5732 if (unlikely(!i)) {
5733 i -= tx_ring->count;
5734 tx_buffer = tx_ring->tx_buffer_info;
5735 tx_desc = IGB_TX_DESC(tx_ring, 0);
5736 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005737 }
5738
Alexander Duyck8542db02011-08-26 07:44:43 +00005739 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005741 u64_stats_update_begin(&tx_ring->tx_syncp);
5742 tx_ring->tx_stats.bytes += total_bytes;
5743 tx_ring->tx_stats.packets += total_packets;
5744 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005745 q_vector->tx.total_bytes += total_bytes;
5746 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005747
5748 if (tx_ring->detect_tx_hung) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005749 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005750
Alexander Duyck8542db02011-08-26 07:44:43 +00005751 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005752
Auke Kok9d5c8242008-01-24 02:22:38 -08005753 /* Detect a transmit hang in hardware, this serializes the
5754 * check with the clearing of time_stamp and movement of i */
5755 tx_ring->detect_tx_hung = false;
Alexander Duyck8542db02011-08-26 07:44:43 +00005756 if (eop_desc &&
5757 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005758 (adapter->tx_timeout_factor * HZ)) &&
5759 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005760
Auke Kok9d5c8242008-01-24 02:22:38 -08005761 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005762 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005763 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005764 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005765 " TDH <%x>\n"
5766 " TDT <%x>\n"
5767 " next_to_use <%x>\n"
5768 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005769 "buffer_info[next_to_clean]\n"
5770 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005771 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005772 " jiffies <%lx>\n"
5773 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005774 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005775 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005776 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005777 tx_ring->next_to_use,
5778 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005779 tx_buffer->time_stamp,
5780 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005781 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005782 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005783 netif_stop_subqueue(tx_ring->netdev,
5784 tx_ring->queue_index);
5785
5786 /* we are about to reset, no point in enabling stuff */
5787 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005788 }
5789 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005790
5791 if (unlikely(total_packets &&
5792 netif_carrier_ok(tx_ring->netdev) &&
5793 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5794 /* Make sure that anybody stopping the queue after this
5795 * sees the new next_to_clean.
5796 */
5797 smp_mb();
5798 if (__netif_subqueue_stopped(tx_ring->netdev,
5799 tx_ring->queue_index) &&
5800 !(test_bit(__IGB_DOWN, &adapter->state))) {
5801 netif_wake_subqueue(tx_ring->netdev,
5802 tx_ring->queue_index);
5803
5804 u64_stats_update_begin(&tx_ring->tx_syncp);
5805 tx_ring->tx_stats.restart_queue++;
5806 u64_stats_update_end(&tx_ring->tx_syncp);
5807 }
5808 }
5809
5810 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005811}
5812
Alexander Duyckcd392f52011-08-26 07:43:59 +00005813static inline void igb_rx_checksum(struct igb_ring *ring,
5814 u32 status_err, struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005815{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005816 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005817
5818 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck866cff02011-08-26 07:45:36 +00005819 if (!test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags) ||
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005820 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005821 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005822
Auke Kok9d5c8242008-01-24 02:22:38 -08005823 /* TCP/UDP checksum error bit is set */
5824 if (status_err &
5825 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005826 /*
5827 * work around errata with sctp packets where the TCPE aka
5828 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5829 * packets, (aka let the stack check the crc32c)
5830 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005831 if (!((skb->len == 60) &&
5832 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005833 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005834 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005835 u64_stats_update_end(&ring->rx_syncp);
5836 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005837 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005838 return;
5839 }
5840 /* It must be a TCP or UDP packet with a valid checksum */
5841 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5842 skb->ip_summed = CHECKSUM_UNNECESSARY;
5843
Alexander Duyck59d71982010-04-27 13:09:25 +00005844 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005845}
5846
Nick Nunley757b77e2010-03-26 11:36:47 +00005847static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005848 struct sk_buff *skb)
5849{
5850 struct igb_adapter *adapter = q_vector->adapter;
5851 struct e1000_hw *hw = &adapter->hw;
5852 u64 regval;
5853
5854 /*
5855 * If this bit is set, then the RX registers contain the time stamp. No
5856 * other packet will be time stamped until we read these registers, so
5857 * read the registers to make them available again. Because only one
5858 * packet can be time stamped at a time, we know that the register
5859 * values must belong to this one here and therefore we don't need to
5860 * compare any of the additional attributes stored for it.
5861 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005862 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005863 * can turn into a skb_shared_hwtstamps.
5864 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005865 if (staterr & E1000_RXDADV_STAT_TSIP) {
5866 u32 *stamp = (u32 *)skb->data;
5867 regval = le32_to_cpu(*(stamp + 2));
5868 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5869 skb_pull(skb, IGB_TS_HDR_LEN);
5870 } else {
5871 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5872 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005873
Nick Nunley757b77e2010-03-26 11:36:47 +00005874 regval = rd32(E1000_RXSTMPL);
5875 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5876 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005877
5878 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5879}
Alexander Duyck44390ca2011-08-26 07:43:38 +00005880static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005881{
5882 /* HW will not DMA in data larger than the given buffer, even if it
5883 * parses the (NFS, of course) header to be larger. In that case, it
5884 * fills the header buffer and spills the rest into the page.
5885 */
5886 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5887 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005888 if (hlen > IGB_RX_HDR_LEN)
5889 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005890 return hlen;
5891}
5892
Alexander Duyckcd392f52011-08-26 07:43:59 +00005893static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005894{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005895 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005896 union e1000_adv_rx_desc *rx_desc;
5897 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005898 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005899 u32 staterr;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005900 u16 cleaned_count = igb_desc_unused(rx_ring);
5901 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005902
Alexander Duyck601369062011-08-26 07:44:05 +00005903 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005904 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5905
5906 while (staterr & E1000_RXD_STAT_DD) {
Alexander Duyck06034642011-08-26 07:44:22 +00005907 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005908 struct sk_buff *skb = buffer_info->skb;
5909 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005910
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005911 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005912 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005913
5914 i++;
5915 if (i == rx_ring->count)
5916 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005917
Alexander Duyck601369062011-08-26 07:44:05 +00005918 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005919 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005920
Alexander Duyck16eb8812011-08-26 07:43:54 +00005921 /*
5922 * This memory barrier is needed to keep us from reading
5923 * any other fields out of the rx_desc until we know the
5924 * RXD_STAT_DD bit is set
5925 */
5926 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005927
Alexander Duyck16eb8812011-08-26 07:43:54 +00005928 if (!skb_is_nonlinear(skb)) {
5929 __skb_put(skb, igb_get_hlen(rx_desc));
5930 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005931 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005932 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005933 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005934 }
5935
Alexander Duyck16eb8812011-08-26 07:43:54 +00005936 if (rx_desc->wb.upper.length) {
5937 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005938
Koki Sanagiaa913402010-04-27 01:01:19 +00005939 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005940 buffer_info->page,
5941 buffer_info->page_offset,
5942 length);
5943
Alexander Duyck16eb8812011-08-26 07:43:54 +00005944 skb->len += length;
5945 skb->data_len += length;
5946 skb->truesize += length;
5947
Alexander Duyckd1eff352009-11-12 18:38:35 +00005948 if ((page_count(buffer_info->page) != 1) ||
5949 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005950 buffer_info->page = NULL;
5951 else
5952 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005953
Alexander Duyck16eb8812011-08-26 07:43:54 +00005954 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5955 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5956 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005957 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005958
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005959 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005960 struct igb_rx_buffer *next_buffer;
5961 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005962 buffer_info->skb = next_buffer->skb;
5963 buffer_info->dma = next_buffer->dma;
5964 next_buffer->skb = skb;
5965 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005966 goto next_desc;
5967 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005968
Auke Kok9d5c8242008-01-24 02:22:38 -08005969 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005970 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005971 goto next_desc;
5972 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005973
Nick Nunley757b77e2010-03-26 11:36:47 +00005974 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5975 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005976 total_bytes += skb->len;
5977 total_packets++;
5978
Alexander Duyckcd392f52011-08-26 07:43:59 +00005979 igb_rx_checksum(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005980
Alexander Duyck16eb8812011-08-26 07:43:54 +00005981 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005982
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005983 if (staterr & E1000_RXD_STAT_VP) {
5984 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
Alexander Duyck047e0032009-10-27 15:49:27 +00005985
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005986 __vlan_hwaccel_put_tag(skb, vid);
5987 }
5988 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005989
Alexander Duyck16eb8812011-08-26 07:43:54 +00005990 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08005991next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00005992 if (!budget)
5993 break;
5994
5995 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005996 /* return some buffers to hardware, one at a time is too slow */
5997 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00005998 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005999 cleaned_count = 0;
6000 }
6001
6002 /* use prefetched values */
6003 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006004 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
6005 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006006
Auke Kok9d5c8242008-01-24 02:22:38 -08006007 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006008 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006009 rx_ring->rx_stats.packets += total_packets;
6010 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006011 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006012 q_vector->rx.total_packets += total_packets;
6013 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006014
6015 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006016 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006017
Alexander Duyck16eb8812011-08-26 07:43:54 +00006018 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006019}
6020
Alexander Duyckc023cd82011-08-26 07:43:43 +00006021static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006022 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006023{
6024 struct sk_buff *skb = bi->skb;
6025 dma_addr_t dma = bi->dma;
6026
6027 if (dma)
6028 return true;
6029
6030 if (likely(!skb)) {
6031 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6032 IGB_RX_HDR_LEN);
6033 bi->skb = skb;
6034 if (!skb) {
6035 rx_ring->rx_stats.alloc_failed++;
6036 return false;
6037 }
6038
6039 /* initialize skb for ring */
6040 skb_record_rx_queue(skb, rx_ring->queue_index);
6041 }
6042
6043 dma = dma_map_single(rx_ring->dev, skb->data,
6044 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6045
6046 if (dma_mapping_error(rx_ring->dev, dma)) {
6047 rx_ring->rx_stats.alloc_failed++;
6048 return false;
6049 }
6050
6051 bi->dma = dma;
6052 return true;
6053}
6054
6055static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006056 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006057{
6058 struct page *page = bi->page;
6059 dma_addr_t page_dma = bi->page_dma;
6060 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6061
6062 if (page_dma)
6063 return true;
6064
6065 if (!page) {
6066 page = netdev_alloc_page(rx_ring->netdev);
6067 bi->page = page;
6068 if (unlikely(!page)) {
6069 rx_ring->rx_stats.alloc_failed++;
6070 return false;
6071 }
6072 }
6073
6074 page_dma = dma_map_page(rx_ring->dev, page,
6075 page_offset, PAGE_SIZE / 2,
6076 DMA_FROM_DEVICE);
6077
6078 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6079 rx_ring->rx_stats.alloc_failed++;
6080 return false;
6081 }
6082
6083 bi->page_dma = page_dma;
6084 bi->page_offset = page_offset;
6085 return true;
6086}
6087
Auke Kok9d5c8242008-01-24 02:22:38 -08006088/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006089 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006090 * @adapter: address of board private structure
6091 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006092void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006093{
Auke Kok9d5c8242008-01-24 02:22:38 -08006094 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006095 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006096 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006097
Alexander Duyck601369062011-08-26 07:44:05 +00006098 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006099 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006100 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006101
6102 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006103 if (!igb_alloc_mapped_skb(rx_ring, bi))
6104 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006105
Alexander Duyckc023cd82011-08-26 07:43:43 +00006106 /* Refresh the desc even if buffer_addrs didn't change
6107 * because each write-back erases this info. */
6108 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006109
Alexander Duyckc023cd82011-08-26 07:43:43 +00006110 if (!igb_alloc_mapped_page(rx_ring, bi))
6111 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006112
Alexander Duyckc023cd82011-08-26 07:43:43 +00006113 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006114
Alexander Duyckc023cd82011-08-26 07:43:43 +00006115 rx_desc++;
6116 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006117 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006118 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006119 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006120 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006121 i -= rx_ring->count;
6122 }
6123
6124 /* clear the hdr_addr for the next_to_use descriptor */
6125 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006126 }
6127
Alexander Duyckc023cd82011-08-26 07:43:43 +00006128 i += rx_ring->count;
6129
Auke Kok9d5c8242008-01-24 02:22:38 -08006130 if (rx_ring->next_to_use != i) {
6131 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006132
6133 /* Force memory writes to complete before letting h/w
6134 * know there are new descriptors to fetch. (Only
6135 * applicable for weak-ordered memory model archs,
6136 * such as IA-64). */
6137 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006138 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006139 }
6140}
6141
6142/**
6143 * igb_mii_ioctl -
6144 * @netdev:
6145 * @ifreq:
6146 * @cmd:
6147 **/
6148static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6149{
6150 struct igb_adapter *adapter = netdev_priv(netdev);
6151 struct mii_ioctl_data *data = if_mii(ifr);
6152
6153 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6154 return -EOPNOTSUPP;
6155
6156 switch (cmd) {
6157 case SIOCGMIIPHY:
6158 data->phy_id = adapter->hw.phy.addr;
6159 break;
6160 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006161 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6162 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006163 return -EIO;
6164 break;
6165 case SIOCSMIIREG:
6166 default:
6167 return -EOPNOTSUPP;
6168 }
6169 return 0;
6170}
6171
6172/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006173 * igb_hwtstamp_ioctl - control hardware time stamping
6174 * @netdev:
6175 * @ifreq:
6176 * @cmd:
6177 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006178 * Outgoing time stamping can be enabled and disabled. Play nice and
6179 * disable it when requested, although it shouldn't case any overhead
6180 * when no packet needs it. At most one packet in the queue may be
6181 * marked for time stamping, otherwise it would be impossible to tell
6182 * for sure to which packet the hardware time stamp belongs.
6183 *
6184 * Incoming time stamping has to be configured via the hardware
6185 * filters. Not all combinations are supported, in particular event
6186 * type has to be specified. Matching the kind of event packet is
6187 * not supported, with the exception of "all V2 events regardless of
6188 * level 2 or 4".
6189 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006190 **/
6191static int igb_hwtstamp_ioctl(struct net_device *netdev,
6192 struct ifreq *ifr, int cmd)
6193{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006194 struct igb_adapter *adapter = netdev_priv(netdev);
6195 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006196 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006197 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6198 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006199 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006200 bool is_l4 = false;
6201 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006202 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006203
6204 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6205 return -EFAULT;
6206
6207 /* reserved for future extensions */
6208 if (config.flags)
6209 return -EINVAL;
6210
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006211 switch (config.tx_type) {
6212 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006213 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006214 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006215 break;
6216 default:
6217 return -ERANGE;
6218 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006219
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006220 switch (config.rx_filter) {
6221 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006222 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006223 break;
6224 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6225 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6226 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6227 case HWTSTAMP_FILTER_ALL:
6228 /*
6229 * register TSYNCRXCFG must be set, therefore it is not
6230 * possible to time stamp both Sync and Delay_Req messages
6231 * => fall back to time stamping all packets
6232 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006233 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006234 config.rx_filter = HWTSTAMP_FILTER_ALL;
6235 break;
6236 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006237 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006238 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006239 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006240 break;
6241 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006242 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006243 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006244 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006245 break;
6246 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6247 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006248 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006249 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006250 is_l2 = true;
6251 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006252 config.rx_filter = HWTSTAMP_FILTER_SOME;
6253 break;
6254 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6255 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006256 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006257 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006258 is_l2 = true;
6259 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006260 config.rx_filter = HWTSTAMP_FILTER_SOME;
6261 break;
6262 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6263 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6264 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006265 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006266 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006267 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006268 break;
6269 default:
6270 return -ERANGE;
6271 }
6272
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006273 if (hw->mac.type == e1000_82575) {
6274 if (tsync_rx_ctl | tsync_tx_ctl)
6275 return -EINVAL;
6276 return 0;
6277 }
6278
Nick Nunley757b77e2010-03-26 11:36:47 +00006279 /*
6280 * Per-packet timestamping only works if all packets are
6281 * timestamped, so enable timestamping in all packets as
6282 * long as one rx filter was configured.
6283 */
6284 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6285 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6286 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6287 }
6288
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006289 /* enable/disable TX */
6290 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006291 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6292 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006293 wr32(E1000_TSYNCTXCTL, regval);
6294
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006295 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006296 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006297 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6298 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006299 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006300
6301 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006302 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6303
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006304 /* define ethertype filter for timestamped packets */
6305 if (is_l2)
6306 wr32(E1000_ETQF(3),
6307 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6308 E1000_ETQF_1588 | /* enable timestamping */
6309 ETH_P_1588)); /* 1588 eth protocol type */
6310 else
6311 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006312
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006313#define PTP_PORT 319
6314 /* L4 Queue Filter[3]: filter by destination port and protocol */
6315 if (is_l4) {
6316 u32 ftqf = (IPPROTO_UDP /* UDP */
6317 | E1000_FTQF_VF_BP /* VF not compared */
6318 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6319 | E1000_FTQF_MASK); /* mask all inputs */
6320 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006321
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006322 wr32(E1000_IMIR(3), htons(PTP_PORT));
6323 wr32(E1000_IMIREXT(3),
6324 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6325 if (hw->mac.type == e1000_82576) {
6326 /* enable source port check */
6327 wr32(E1000_SPQF(3), htons(PTP_PORT));
6328 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6329 }
6330 wr32(E1000_FTQF(3), ftqf);
6331 } else {
6332 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6333 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 wrfl();
6335
6336 adapter->hwtstamp_config = config;
6337
6338 /* clear TX/RX time stamp registers, just to be sure */
6339 regval = rd32(E1000_TXSTMPH);
6340 regval = rd32(E1000_RXSTMPH);
6341
6342 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6343 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006344}
6345
6346/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006347 * igb_ioctl -
6348 * @netdev:
6349 * @ifreq:
6350 * @cmd:
6351 **/
6352static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6353{
6354 switch (cmd) {
6355 case SIOCGMIIPHY:
6356 case SIOCGMIIREG:
6357 case SIOCSMIIREG:
6358 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006359 case SIOCSHWTSTAMP:
6360 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006361 default:
6362 return -EOPNOTSUPP;
6363 }
6364}
6365
Alexander Duyck009bc062009-07-23 18:08:35 +00006366s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6367{
6368 struct igb_adapter *adapter = hw->back;
6369 u16 cap_offset;
6370
Jon Masonbdaae042011-06-27 07:44:01 +00006371 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006372 if (!cap_offset)
6373 return -E1000_ERR_CONFIG;
6374
6375 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6376
6377 return 0;
6378}
6379
6380s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6381{
6382 struct igb_adapter *adapter = hw->back;
6383 u16 cap_offset;
6384
Jon Masonbdaae042011-06-27 07:44:01 +00006385 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006386 if (!cap_offset)
6387 return -E1000_ERR_CONFIG;
6388
6389 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6390
6391 return 0;
6392}
6393
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006394static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006395{
6396 struct igb_adapter *adapter = netdev_priv(netdev);
6397 struct e1000_hw *hw = &adapter->hw;
6398 u32 ctrl, rctl;
6399
6400 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006401
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006402 if (features & NETIF_F_HW_VLAN_RX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006403 /* enable VLAN tag insert/strip */
6404 ctrl = rd32(E1000_CTRL);
6405 ctrl |= E1000_CTRL_VME;
6406 wr32(E1000_CTRL, ctrl);
6407
Alexander Duyck51466232009-10-27 23:47:35 +00006408 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006409 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006410 rctl &= ~E1000_RCTL_CFIEN;
6411 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006412 } else {
6413 /* disable VLAN tag insert/strip */
6414 ctrl = rd32(E1000_CTRL);
6415 ctrl &= ~E1000_CTRL_VME;
6416 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006417 }
6418
Alexander Duycke1739522009-02-19 20:39:44 -08006419 igb_rlpml_set(adapter);
6420
Auke Kok9d5c8242008-01-24 02:22:38 -08006421 if (!test_bit(__IGB_DOWN, &adapter->state))
6422 igb_irq_enable(adapter);
6423}
6424
6425static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6426{
6427 struct igb_adapter *adapter = netdev_priv(netdev);
6428 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006429 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006430
Alexander Duyck51466232009-10-27 23:47:35 +00006431 /* attempt to add filter to vlvf array */
6432 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006433
Alexander Duyck51466232009-10-27 23:47:35 +00006434 /* add the filter since PF can receive vlans w/o entry in vlvf */
6435 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006436
6437 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006438}
6439
6440static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6441{
6442 struct igb_adapter *adapter = netdev_priv(netdev);
6443 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006444 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006445 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006446
6447 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006448
6449 if (!test_bit(__IGB_DOWN, &adapter->state))
6450 igb_irq_enable(adapter);
6451
Alexander Duyck51466232009-10-27 23:47:35 +00006452 /* remove vlan from VLVF table array */
6453 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006454
Alexander Duyck51466232009-10-27 23:47:35 +00006455 /* if vid was not present in VLVF just remove it from table */
6456 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006457 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006458
6459 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006460}
6461
6462static void igb_restore_vlan(struct igb_adapter *adapter)
6463{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006464 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006465
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006466 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6467 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006468}
6469
David Decotigny14ad2512011-04-27 18:32:43 +00006470int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006471{
Alexander Duyck090b1792009-10-27 23:51:55 +00006472 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006473 struct e1000_mac_info *mac = &adapter->hw.mac;
6474
6475 mac->autoneg = 0;
6476
David Decotigny14ad2512011-04-27 18:32:43 +00006477 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6478 * for the switch() below to work */
6479 if ((spd & 1) || (dplx & ~1))
6480 goto err_inval;
6481
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006482 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6483 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006484 spd != SPEED_1000 &&
6485 dplx != DUPLEX_FULL)
6486 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006487
David Decotigny14ad2512011-04-27 18:32:43 +00006488 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006489 case SPEED_10 + DUPLEX_HALF:
6490 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6491 break;
6492 case SPEED_10 + DUPLEX_FULL:
6493 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6494 break;
6495 case SPEED_100 + DUPLEX_HALF:
6496 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6497 break;
6498 case SPEED_100 + DUPLEX_FULL:
6499 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6500 break;
6501 case SPEED_1000 + DUPLEX_FULL:
6502 mac->autoneg = 1;
6503 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6504 break;
6505 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6506 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006507 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006508 }
6509 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006510
6511err_inval:
6512 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6513 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006514}
6515
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006516static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006517{
6518 struct net_device *netdev = pci_get_drvdata(pdev);
6519 struct igb_adapter *adapter = netdev_priv(netdev);
6520 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006521 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006522 u32 wufc = adapter->wol;
6523#ifdef CONFIG_PM
6524 int retval = 0;
6525#endif
6526
6527 netif_device_detach(netdev);
6528
Alexander Duycka88f10e2008-07-08 15:13:38 -07006529 if (netif_running(netdev))
6530 igb_close(netdev);
6531
Alexander Duyck047e0032009-10-27 15:49:27 +00006532 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006533
6534#ifdef CONFIG_PM
6535 retval = pci_save_state(pdev);
6536 if (retval)
6537 return retval;
6538#endif
6539
6540 status = rd32(E1000_STATUS);
6541 if (status & E1000_STATUS_LU)
6542 wufc &= ~E1000_WUFC_LNKC;
6543
6544 if (wufc) {
6545 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006546 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006547
6548 /* turn on all-multi mode if wake on multicast is enabled */
6549 if (wufc & E1000_WUFC_MC) {
6550 rctl = rd32(E1000_RCTL);
6551 rctl |= E1000_RCTL_MPE;
6552 wr32(E1000_RCTL, rctl);
6553 }
6554
6555 ctrl = rd32(E1000_CTRL);
6556 /* advertise wake from D3Cold */
6557 #define E1000_CTRL_ADVD3WUC 0x00100000
6558 /* phy power management enable */
6559 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6560 ctrl |= E1000_CTRL_ADVD3WUC;
6561 wr32(E1000_CTRL, ctrl);
6562
Auke Kok9d5c8242008-01-24 02:22:38 -08006563 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006564 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006565
6566 wr32(E1000_WUC, E1000_WUC_PME_EN);
6567 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006568 } else {
6569 wr32(E1000_WUC, 0);
6570 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006571 }
6572
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006573 *enable_wake = wufc || adapter->en_mng_pt;
6574 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006575 igb_power_down_link(adapter);
6576 else
6577 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006578
6579 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6580 * would have already happened in close and is redundant. */
6581 igb_release_hw_control(adapter);
6582
6583 pci_disable_device(pdev);
6584
Auke Kok9d5c8242008-01-24 02:22:38 -08006585 return 0;
6586}
6587
6588#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006589static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6590{
6591 int retval;
6592 bool wake;
6593
6594 retval = __igb_shutdown(pdev, &wake);
6595 if (retval)
6596 return retval;
6597
6598 if (wake) {
6599 pci_prepare_to_sleep(pdev);
6600 } else {
6601 pci_wake_from_d3(pdev, false);
6602 pci_set_power_state(pdev, PCI_D3hot);
6603 }
6604
6605 return 0;
6606}
6607
Auke Kok9d5c8242008-01-24 02:22:38 -08006608static int igb_resume(struct pci_dev *pdev)
6609{
6610 struct net_device *netdev = pci_get_drvdata(pdev);
6611 struct igb_adapter *adapter = netdev_priv(netdev);
6612 struct e1000_hw *hw = &adapter->hw;
6613 u32 err;
6614
6615 pci_set_power_state(pdev, PCI_D0);
6616 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006617 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006618
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006619 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006620 if (err) {
6621 dev_err(&pdev->dev,
6622 "igb: Cannot enable PCI device from suspend\n");
6623 return err;
6624 }
6625 pci_set_master(pdev);
6626
6627 pci_enable_wake(pdev, PCI_D3hot, 0);
6628 pci_enable_wake(pdev, PCI_D3cold, 0);
6629
Alexander Duyck047e0032009-10-27 15:49:27 +00006630 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006631 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6632 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006633 }
6634
Auke Kok9d5c8242008-01-24 02:22:38 -08006635 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006636
6637 /* let the f/w know that the h/w is now under the control of the
6638 * driver. */
6639 igb_get_hw_control(adapter);
6640
Auke Kok9d5c8242008-01-24 02:22:38 -08006641 wr32(E1000_WUS, ~0);
6642
Alexander Duycka88f10e2008-07-08 15:13:38 -07006643 if (netif_running(netdev)) {
6644 err = igb_open(netdev);
6645 if (err)
6646 return err;
6647 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006648
6649 netif_device_attach(netdev);
6650
Auke Kok9d5c8242008-01-24 02:22:38 -08006651 return 0;
6652}
6653#endif
6654
6655static void igb_shutdown(struct pci_dev *pdev)
6656{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006657 bool wake;
6658
6659 __igb_shutdown(pdev, &wake);
6660
6661 if (system_state == SYSTEM_POWER_OFF) {
6662 pci_wake_from_d3(pdev, wake);
6663 pci_set_power_state(pdev, PCI_D3hot);
6664 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006665}
6666
6667#ifdef CONFIG_NET_POLL_CONTROLLER
6668/*
6669 * Polling 'interrupt' - used by things like netconsole to send skbs
6670 * without having to re-enable interrupts. It's not called while
6671 * the interrupt routine is executing.
6672 */
6673static void igb_netpoll(struct net_device *netdev)
6674{
6675 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006676 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006677 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006678
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006679 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006680 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006681 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006682 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006683 return;
6684 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006685
Alexander Duyck047e0032009-10-27 15:49:27 +00006686 for (i = 0; i < adapter->num_q_vectors; i++) {
6687 struct igb_q_vector *q_vector = adapter->q_vector[i];
6688 wr32(E1000_EIMC, q_vector->eims_value);
6689 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006690 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006691}
6692#endif /* CONFIG_NET_POLL_CONTROLLER */
6693
6694/**
6695 * igb_io_error_detected - called when PCI error is detected
6696 * @pdev: Pointer to PCI device
6697 * @state: The current pci connection state
6698 *
6699 * This function is called after a PCI bus error affecting
6700 * this device has been detected.
6701 */
6702static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6703 pci_channel_state_t state)
6704{
6705 struct net_device *netdev = pci_get_drvdata(pdev);
6706 struct igb_adapter *adapter = netdev_priv(netdev);
6707
6708 netif_device_detach(netdev);
6709
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006710 if (state == pci_channel_io_perm_failure)
6711 return PCI_ERS_RESULT_DISCONNECT;
6712
Auke Kok9d5c8242008-01-24 02:22:38 -08006713 if (netif_running(netdev))
6714 igb_down(adapter);
6715 pci_disable_device(pdev);
6716
6717 /* Request a slot slot reset. */
6718 return PCI_ERS_RESULT_NEED_RESET;
6719}
6720
6721/**
6722 * igb_io_slot_reset - called after the pci bus has been reset.
6723 * @pdev: Pointer to PCI device
6724 *
6725 * Restart the card from scratch, as if from a cold-boot. Implementation
6726 * resembles the first-half of the igb_resume routine.
6727 */
6728static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6729{
6730 struct net_device *netdev = pci_get_drvdata(pdev);
6731 struct igb_adapter *adapter = netdev_priv(netdev);
6732 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006733 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006734 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006735
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006736 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006737 dev_err(&pdev->dev,
6738 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006739 result = PCI_ERS_RESULT_DISCONNECT;
6740 } else {
6741 pci_set_master(pdev);
6742 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006743 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006744
6745 pci_enable_wake(pdev, PCI_D3hot, 0);
6746 pci_enable_wake(pdev, PCI_D3cold, 0);
6747
6748 igb_reset(adapter);
6749 wr32(E1000_WUS, ~0);
6750 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006751 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006752
Jeff Kirsherea943d42008-12-11 20:34:19 -08006753 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6754 if (err) {
6755 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6756 "failed 0x%0x\n", err);
6757 /* non-fatal, continue */
6758 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006759
Alexander Duyck40a914f2008-11-27 00:24:37 -08006760 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006761}
6762
6763/**
6764 * igb_io_resume - called when traffic can start flowing again.
6765 * @pdev: Pointer to PCI device
6766 *
6767 * This callback is called when the error recovery driver tells us that
6768 * its OK to resume normal operation. Implementation resembles the
6769 * second-half of the igb_resume routine.
6770 */
6771static void igb_io_resume(struct pci_dev *pdev)
6772{
6773 struct net_device *netdev = pci_get_drvdata(pdev);
6774 struct igb_adapter *adapter = netdev_priv(netdev);
6775
Auke Kok9d5c8242008-01-24 02:22:38 -08006776 if (netif_running(netdev)) {
6777 if (igb_up(adapter)) {
6778 dev_err(&pdev->dev, "igb_up failed after reset\n");
6779 return;
6780 }
6781 }
6782
6783 netif_device_attach(netdev);
6784
6785 /* let the f/w know that the h/w is now under the control of the
6786 * driver. */
6787 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006788}
6789
Alexander Duyck26ad9172009-10-05 06:32:49 +00006790static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6791 u8 qsel)
6792{
6793 u32 rar_low, rar_high;
6794 struct e1000_hw *hw = &adapter->hw;
6795
6796 /* HW expects these in little endian so we reverse the byte order
6797 * from network order (big endian) to little endian
6798 */
6799 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6800 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6801 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6802
6803 /* Indicate to hardware the Address is Valid. */
6804 rar_high |= E1000_RAH_AV;
6805
6806 if (hw->mac.type == e1000_82575)
6807 rar_high |= E1000_RAH_POOL_1 * qsel;
6808 else
6809 rar_high |= E1000_RAH_POOL_1 << qsel;
6810
6811 wr32(E1000_RAL(index), rar_low);
6812 wrfl();
6813 wr32(E1000_RAH(index), rar_high);
6814 wrfl();
6815}
6816
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006817static int igb_set_vf_mac(struct igb_adapter *adapter,
6818 int vf, unsigned char *mac_addr)
6819{
6820 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006821 /* VF MAC addresses start at end of receive addresses and moves
6822 * torwards the first, as a result a collision should not be possible */
6823 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006824
Alexander Duyck37680112009-02-19 20:40:30 -08006825 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006826
Alexander Duyck26ad9172009-10-05 06:32:49 +00006827 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006828
6829 return 0;
6830}
6831
Williams, Mitch A8151d292010-02-10 01:44:24 +00006832static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6833{
6834 struct igb_adapter *adapter = netdev_priv(netdev);
6835 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6836 return -EINVAL;
6837 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6838 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6839 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6840 " change effective.");
6841 if (test_bit(__IGB_DOWN, &adapter->state)) {
6842 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6843 " but the PF device is not up.\n");
6844 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6845 " attempting to use the VF device.\n");
6846 }
6847 return igb_set_vf_mac(adapter, vf, mac);
6848}
6849
Lior Levy17dc5662011-02-08 02:28:46 +00006850static int igb_link_mbps(int internal_link_speed)
6851{
6852 switch (internal_link_speed) {
6853 case SPEED_100:
6854 return 100;
6855 case SPEED_1000:
6856 return 1000;
6857 default:
6858 return 0;
6859 }
6860}
6861
6862static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6863 int link_speed)
6864{
6865 int rf_dec, rf_int;
6866 u32 bcnrc_val;
6867
6868 if (tx_rate != 0) {
6869 /* Calculate the rate factor values to set */
6870 rf_int = link_speed / tx_rate;
6871 rf_dec = (link_speed - (rf_int * tx_rate));
6872 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6873
6874 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6875 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6876 E1000_RTTBCNRC_RF_INT_MASK);
6877 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6878 } else {
6879 bcnrc_val = 0;
6880 }
6881
6882 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6883 wr32(E1000_RTTBCNRC, bcnrc_val);
6884}
6885
6886static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6887{
6888 int actual_link_speed, i;
6889 bool reset_rate = false;
6890
6891 /* VF TX rate limit was not set or not supported */
6892 if ((adapter->vf_rate_link_speed == 0) ||
6893 (adapter->hw.mac.type != e1000_82576))
6894 return;
6895
6896 actual_link_speed = igb_link_mbps(adapter->link_speed);
6897 if (actual_link_speed != adapter->vf_rate_link_speed) {
6898 reset_rate = true;
6899 adapter->vf_rate_link_speed = 0;
6900 dev_info(&adapter->pdev->dev,
6901 "Link speed has been changed. VF Transmit "
6902 "rate is disabled\n");
6903 }
6904
6905 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6906 if (reset_rate)
6907 adapter->vf_data[i].tx_rate = 0;
6908
6909 igb_set_vf_rate_limit(&adapter->hw, i,
6910 adapter->vf_data[i].tx_rate,
6911 actual_link_speed);
6912 }
6913}
6914
Williams, Mitch A8151d292010-02-10 01:44:24 +00006915static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6916{
Lior Levy17dc5662011-02-08 02:28:46 +00006917 struct igb_adapter *adapter = netdev_priv(netdev);
6918 struct e1000_hw *hw = &adapter->hw;
6919 int actual_link_speed;
6920
6921 if (hw->mac.type != e1000_82576)
6922 return -EOPNOTSUPP;
6923
6924 actual_link_speed = igb_link_mbps(adapter->link_speed);
6925 if ((vf >= adapter->vfs_allocated_count) ||
6926 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6927 (tx_rate < 0) || (tx_rate > actual_link_speed))
6928 return -EINVAL;
6929
6930 adapter->vf_rate_link_speed = actual_link_speed;
6931 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6932 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6933
6934 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006935}
6936
6937static int igb_ndo_get_vf_config(struct net_device *netdev,
6938 int vf, struct ifla_vf_info *ivi)
6939{
6940 struct igb_adapter *adapter = netdev_priv(netdev);
6941 if (vf >= adapter->vfs_allocated_count)
6942 return -EINVAL;
6943 ivi->vf = vf;
6944 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006945 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006946 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6947 ivi->qos = adapter->vf_data[vf].pf_qos;
6948 return 0;
6949}
6950
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006951static void igb_vmm_control(struct igb_adapter *adapter)
6952{
6953 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006954 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006955
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006956 switch (hw->mac.type) {
6957 case e1000_82575:
6958 default:
6959 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006960 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006961 case e1000_82576:
6962 /* notify HW that the MAC is adding vlan tags */
6963 reg = rd32(E1000_DTXCTL);
6964 reg |= E1000_DTXCTL_VLAN_ADDED;
6965 wr32(E1000_DTXCTL, reg);
6966 case e1000_82580:
6967 /* enable replication vlan tag stripping */
6968 reg = rd32(E1000_RPLOLR);
6969 reg |= E1000_RPLOLR_STRVLAN;
6970 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006971 case e1000_i350:
6972 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006973 break;
6974 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006975
Alexander Duyckd4960302009-10-27 15:53:45 +00006976 if (adapter->vfs_allocated_count) {
6977 igb_vmdq_set_loopback_pf(hw, true);
6978 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006979 igb_vmdq_set_anti_spoofing_pf(hw, true,
6980 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006981 } else {
6982 igb_vmdq_set_loopback_pf(hw, false);
6983 igb_vmdq_set_replication_pf(hw, false);
6984 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006985}
6986
Auke Kok9d5c8242008-01-24 02:22:38 -08006987/* igb_main.c */