blob: cae4abb48501730d8c980a717bfbc724e95fc3d8 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000048#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080052#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070054#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070055#include <linux/dca.h>
56#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080057#include "igb.h"
58
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define MAJ 3
60#define MIN 0
61#define BUILD 6
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000063__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080064char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000068static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080069
Auke Kok9d5c8242008-01-24 02:22:38 -080070static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000074static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000111static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000114static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000124static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000133static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000137static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700140static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700142static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000143static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000148static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000164static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800165
Auke Kok9d5c8242008-01-24 02:22:38 -0800166#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000167static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static int igb_resume(struct pci_dev *);
169#endif
170static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700171#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700172static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
173static struct notifier_block dca_notifier = {
174 .notifier_call = igb_notify_dca,
175 .next = NULL,
176 .priority = 0
177};
178#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void igb_netpoll(struct net_device *);
182#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800183#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000184static unsigned int max_vfs = 0;
185module_param(max_vfs, uint, 0);
186MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
187 "per physical function");
188#endif /* CONFIG_PCI_IOV */
189
Auke Kok9d5c8242008-01-24 02:22:38 -0800190static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
191 pci_channel_state_t);
192static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
193static void igb_io_resume(struct pci_dev *);
194
195static struct pci_error_handlers igb_err_handler = {
196 .error_detected = igb_io_error_detected,
197 .slot_reset = igb_io_slot_reset,
198 .resume = igb_io_resume,
199};
200
201
202static struct pci_driver igb_driver = {
203 .name = igb_driver_name,
204 .id_table = igb_pci_tbl,
205 .probe = igb_probe,
206 .remove = __devexit_p(igb_remove),
207#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300208 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800209 .suspend = igb_suspend,
210 .resume = igb_resume,
211#endif
212 .shutdown = igb_shutdown,
213 .err_handler = &igb_err_handler
214};
215
216MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
217MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
218MODULE_LICENSE("GPL");
219MODULE_VERSION(DRV_VERSION);
220
Taku Izumic97ec422010-04-27 14:39:30 +0000221struct igb_reg_info {
222 u32 ofs;
223 char *name;
224};
225
226static const struct igb_reg_info igb_reg_info_tbl[] = {
227
228 /* General Registers */
229 {E1000_CTRL, "CTRL"},
230 {E1000_STATUS, "STATUS"},
231 {E1000_CTRL_EXT, "CTRL_EXT"},
232
233 /* Interrupt Registers */
234 {E1000_ICR, "ICR"},
235
236 /* RX Registers */
237 {E1000_RCTL, "RCTL"},
238 {E1000_RDLEN(0), "RDLEN"},
239 {E1000_RDH(0), "RDH"},
240 {E1000_RDT(0), "RDT"},
241 {E1000_RXDCTL(0), "RXDCTL"},
242 {E1000_RDBAL(0), "RDBAL"},
243 {E1000_RDBAH(0), "RDBAH"},
244
245 /* TX Registers */
246 {E1000_TCTL, "TCTL"},
247 {E1000_TDBAL(0), "TDBAL"},
248 {E1000_TDBAH(0), "TDBAH"},
249 {E1000_TDLEN(0), "TDLEN"},
250 {E1000_TDH(0), "TDH"},
251 {E1000_TDT(0), "TDT"},
252 {E1000_TXDCTL(0), "TXDCTL"},
253 {E1000_TDFH, "TDFH"},
254 {E1000_TDFT, "TDFT"},
255 {E1000_TDFHS, "TDFHS"},
256 {E1000_TDFPC, "TDFPC"},
257
258 /* List Terminator */
259 {}
260};
261
262/*
263 * igb_regdump - register printout routine
264 */
265static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
266{
267 int n = 0;
268 char rname[16];
269 u32 regs[8];
270
271 switch (reginfo->ofs) {
272 case E1000_RDLEN(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDLEN(n));
275 break;
276 case E1000_RDH(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RDH(n));
279 break;
280 case E1000_RDT(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDT(n));
283 break;
284 case E1000_RXDCTL(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RXDCTL(n));
287 break;
288 case E1000_RDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_RDBAL(n));
291 break;
292 case E1000_RDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDBAH(n));
295 break;
296 case E1000_TDBAL(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDBAL(n));
299 break;
300 case E1000_TDBAH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDBAH(n));
303 break;
304 case E1000_TDLEN(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDLEN(n));
307 break;
308 case E1000_TDH(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TDH(n));
311 break;
312 case E1000_TDT(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_TDT(n));
315 break;
316 case E1000_TXDCTL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_TXDCTL(n));
319 break;
320 default:
321 printk(KERN_INFO "%-15s %08x\n",
322 reginfo->name, rd32(reginfo->ofs));
323 return;
324 }
325
326 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
327 printk(KERN_INFO "%-15s ", rname);
328 for (n = 0; n < 4; n++)
329 printk(KERN_CONT "%08x ", regs[n]);
330 printk(KERN_CONT "\n");
331}
332
333/*
334 * igb_dump - Print registers, tx-rings and rx-rings
335 */
336static void igb_dump(struct igb_adapter *adapter)
337{
338 struct net_device *netdev = adapter->netdev;
339 struct e1000_hw *hw = &adapter->hw;
340 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000341 struct igb_ring *tx_ring;
342 union e1000_adv_tx_desc *tx_desc;
343 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000344 struct igb_ring *rx_ring;
345 union e1000_adv_rx_desc *rx_desc;
346 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000347 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000348
349 if (!netif_msg_hw(adapter))
350 return;
351
352 /* Print netdevice Info */
353 if (netdev) {
354 dev_info(&adapter->pdev->dev, "Net device Info\n");
355 printk(KERN_INFO "Device Name state "
356 "trans_start last_rx\n");
357 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
358 netdev->name,
359 netdev->state,
360 netdev->trans_start,
361 netdev->last_rx);
362 }
363
364 /* Print Registers */
365 dev_info(&adapter->pdev->dev, "Register Dump\n");
366 printk(KERN_INFO " Register Name Value\n");
367 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
368 reginfo->name; reginfo++) {
369 igb_regdump(hw, reginfo);
370 }
371
372 /* Print TX Ring Summary */
373 if (!netdev || !netif_running(netdev))
374 goto exit;
375
376 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
377 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
378 " leng ntw timestamp\n");
379 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000380 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000381 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000382 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyck8542db02011-08-26 07:44:43 +0000383 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumic97ec422010-04-27 14:39:30 +0000384 n, tx_ring->next_to_use, tx_ring->next_to_clean,
385 (u64)buffer_info->dma,
386 buffer_info->length,
387 buffer_info->next_to_watch,
388 (u64)buffer_info->time_stamp);
389 }
390
391 /* Print TX Rings */
392 if (!netif_msg_tx_done(adapter))
393 goto rx_ring_summary;
394
395 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
396
397 /* Transmit Descriptor Formats
398 *
399 * Advanced Transmit Descriptor
400 * +--------------------------------------------------------------+
401 * 0 | Buffer Address [63:0] |
402 * +--------------------------------------------------------------+
403 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
404 * +--------------------------------------------------------------+
405 * 63 46 45 40 39 38 36 35 32 31 24 15 0
406 */
407
408 for (n = 0; n < adapter->num_tx_queues; n++) {
409 tx_ring = adapter->tx_ring[n];
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
412 printk(KERN_INFO "------------------------------------\n");
413 printk(KERN_INFO "T [desc] [address 63:0 ] "
414 "[PlPOCIStDDM Ln] [bi->dma ] "
415 "leng ntw timestamp bi->skb\n");
416
417 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000418 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000419 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000420 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000421 u0 = (struct my_u0 *)tx_desc;
422 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
Alexander Duyck8542db02011-08-26 07:44:43 +0000423 " %04X %p %016llX %p", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000424 le64_to_cpu(u0->a),
425 le64_to_cpu(u0->b),
426 (u64)buffer_info->dma,
427 buffer_info->length,
428 buffer_info->next_to_watch,
429 (u64)buffer_info->time_stamp,
430 buffer_info->skb);
431 if (i == tx_ring->next_to_use &&
432 i == tx_ring->next_to_clean)
433 printk(KERN_CONT " NTC/U\n");
434 else if (i == tx_ring->next_to_use)
435 printk(KERN_CONT " NTU\n");
436 else if (i == tx_ring->next_to_clean)
437 printk(KERN_CONT " NTC\n");
438 else
439 printk(KERN_CONT "\n");
440
441 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
442 print_hex_dump(KERN_INFO, "",
443 DUMP_PREFIX_ADDRESS,
444 16, 1, phys_to_virt(buffer_info->dma),
445 buffer_info->length, true);
446 }
447 }
448
449 /* Print RX Rings Summary */
450rx_ring_summary:
451 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
452 printk(KERN_INFO "Queue [NTU] [NTC]\n");
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO " %5d %5X %5X\n", n,
456 rx_ring->next_to_use, rx_ring->next_to_clean);
457 }
458
459 /* Print RX Rings */
460 if (!netif_msg_rx_status(adapter))
461 goto exit;
462
463 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
464
465 /* Advanced Receive Descriptor (Read) Format
466 * 63 1 0
467 * +-----------------------------------------------------+
468 * 0 | Packet Buffer Address [63:1] |A0/NSE|
469 * +----------------------------------------------+------+
470 * 8 | Header Buffer Address [63:1] | DD |
471 * +-----------------------------------------------------+
472 *
473 *
474 * Advanced Receive Descriptor (Write-Back) Format
475 *
476 * 63 48 47 32 31 30 21 20 17 16 4 3 0
477 * +------------------------------------------------------+
478 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
479 * | Checksum Ident | | | | Type | Type |
480 * +------------------------------------------------------+
481 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
482 * +------------------------------------------------------+
483 * 63 48 47 32 31 20 19 0
484 */
485
486 for (n = 0; n < adapter->num_rx_queues; n++) {
487 rx_ring = adapter->rx_ring[n];
488 printk(KERN_INFO "------------------------------------\n");
489 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
490 printk(KERN_INFO "------------------------------------\n");
491 printk(KERN_INFO "R [desc] [ PktBuf A0] "
492 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
493 "<-- Adv Rx Read format\n");
494 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
495 "[vl er S cks ln] ---------------- [bi->skb] "
496 "<-- Adv Rx Write-Back format\n");
497
498 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000499 struct igb_rx_buffer *buffer_info;
500 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000501 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000502 u0 = (struct my_u0 *)rx_desc;
503 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
504 if (staterr & E1000_RXD_STAT_DD) {
505 /* Descriptor Done */
506 printk(KERN_INFO "RWB[0x%03X] %016llX "
507 "%016llX ---------------- %p", i,
508 le64_to_cpu(u0->a),
509 le64_to_cpu(u0->b),
510 buffer_info->skb);
511 } else {
512 printk(KERN_INFO "R [0x%03X] %016llX "
513 "%016llX %016llX %p", i,
514 le64_to_cpu(u0->a),
515 le64_to_cpu(u0->b),
516 (u64)buffer_info->dma,
517 buffer_info->skb);
518
519 if (netif_msg_pktdata(adapter)) {
520 print_hex_dump(KERN_INFO, "",
521 DUMP_PREFIX_ADDRESS,
522 16, 1,
523 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000524 IGB_RX_HDR_LEN, true);
525 print_hex_dump(KERN_INFO, "",
526 DUMP_PREFIX_ADDRESS,
527 16, 1,
528 phys_to_virt(
529 buffer_info->page_dma +
530 buffer_info->page_offset),
531 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000532 }
533 }
534
535 if (i == rx_ring->next_to_use)
536 printk(KERN_CONT " NTU\n");
537 else if (i == rx_ring->next_to_clean)
538 printk(KERN_CONT " NTC\n");
539 else
540 printk(KERN_CONT "\n");
541
542 }
543 }
544
545exit:
546 return;
547}
548
549
Patrick Ohly38c845c2009-02-12 05:03:41 +0000550/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000551 * igb_read_clock - read raw cycle counter (to be used by time counter)
552 */
553static cycle_t igb_read_clock(const struct cyclecounter *tc)
554{
555 struct igb_adapter *adapter =
556 container_of(tc, struct igb_adapter, cycles);
557 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000558 u64 stamp = 0;
559 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000560
Alexander Duyck55cac242009-11-19 12:42:21 +0000561 /*
562 * The timestamp latches on lowest register read. For the 82580
563 * the lowest register is SYSTIMR instead of SYSTIML. However we never
564 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
565 */
566 if (hw->mac.type == e1000_82580) {
567 stamp = rd32(E1000_SYSTIMR) >> 8;
568 shift = IGB_82580_TSYNC_SHIFT;
569 }
570
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000571 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
572 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000573 return stamp;
574}
575
Auke Kok9d5c8242008-01-24 02:22:38 -0800576/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000577 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800578 * used by hardware layer to print debugging information
579 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000580struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800581{
582 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000583 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800584}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000585
586/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 * igb_init_module - Driver Registration Routine
588 *
589 * igb_init_module is the first routine called when the driver is
590 * loaded. All it does is register with the PCI subsystem.
591 **/
592static int __init igb_init_module(void)
593{
594 int ret;
595 printk(KERN_INFO "%s - version %s\n",
596 igb_driver_string, igb_driver_version);
597
598 printk(KERN_INFO "%s\n", igb_copyright);
599
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700600#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700601 dca_register_notify(&dca_notifier);
602#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800603 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800604 return ret;
605}
606
607module_init(igb_init_module);
608
609/**
610 * igb_exit_module - Driver Exit Cleanup Routine
611 *
612 * igb_exit_module is called just before the driver is removed
613 * from memory.
614 **/
615static void __exit igb_exit_module(void)
616{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700617#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700618 dca_unregister_notify(&dca_notifier);
619#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800620 pci_unregister_driver(&igb_driver);
621}
622
623module_exit(igb_exit_module);
624
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800625#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
626/**
627 * igb_cache_ring_register - Descriptor ring to register mapping
628 * @adapter: board private structure to initialize
629 *
630 * Once we know the feature-set enabled for the device, we'll cache
631 * the register offset the descriptor ring is assigned to.
632 **/
633static void igb_cache_ring_register(struct igb_adapter *adapter)
634{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000635 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000636 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800637
638 switch (adapter->hw.mac.type) {
639 case e1000_82576:
640 /* The queues are allocated for virtualization such that VF 0
641 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
642 * In order to avoid collision we start at the first free queue
643 * and continue consuming queues in the same sequence
644 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000646 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000647 adapter->rx_ring[i]->reg_idx = rbase_offset +
648 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000649 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800650 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000651 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000652 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000656 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000657 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800658 break;
659 }
660}
661
Alexander Duyck047e0032009-10-27 15:49:27 +0000662static void igb_free_queues(struct igb_adapter *adapter)
663{
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000665
Alexander Duyck3025a442010-02-17 01:02:39 +0000666 for (i = 0; i < adapter->num_tx_queues; i++) {
667 kfree(adapter->tx_ring[i]);
668 adapter->tx_ring[i] = NULL;
669 }
670 for (i = 0; i < adapter->num_rx_queues; i++) {
671 kfree(adapter->rx_ring[i]);
672 adapter->rx_ring[i] = NULL;
673 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000674 adapter->num_rx_queues = 0;
675 adapter->num_tx_queues = 0;
676}
677
Auke Kok9d5c8242008-01-24 02:22:38 -0800678/**
679 * igb_alloc_queues - Allocate memory for all rings
680 * @adapter: board private structure to initialize
681 *
682 * We allocate one ring per queue at run-time since we don't know the
683 * number of queues at compile-time.
684 **/
685static int igb_alloc_queues(struct igb_adapter *adapter)
686{
Alexander Duyck3025a442010-02-17 01:02:39 +0000687 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800688 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000689 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800690
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700691 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000692 if (orig_node == -1) {
693 int cur_node = next_online_node(adapter->node);
694 if (cur_node == MAX_NUMNODES)
695 cur_node = first_online_node;
696 adapter->node = cur_node;
697 }
698 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
699 adapter->node);
700 if (!ring)
701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000702 if (!ring)
703 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800704 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700705 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000706 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000707 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000708 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000709 /* For 82575, context index must be unique per ring. */
710 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000711 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000712 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700713 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000714 /* Restore the adapter's original node */
715 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000716
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000718 if (orig_node == -1) {
719 int cur_node = next_online_node(adapter->node);
720 if (cur_node == MAX_NUMNODES)
721 cur_node = first_online_node;
722 adapter->node = cur_node;
723 }
724 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
725 adapter->node);
726 if (!ring)
727 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000728 if (!ring)
729 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800730 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700731 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000732 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000733 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000734 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000735 /* set flag indicating ring supports SCTP checksum offload */
736 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000737 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000738 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800739 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000740 /* Restore the adapter's original node */
741 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800742
743 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000744
Auke Kok9d5c8242008-01-24 02:22:38 -0800745 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800746
Alexander Duyck047e0032009-10-27 15:49:27 +0000747err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000748 /* Restore the adapter's original node */
749 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000750 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700751
Alexander Duyck047e0032009-10-27 15:49:27 +0000752 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700753}
754
Alexander Duyck4be000c2011-08-26 07:45:52 +0000755/**
756 * igb_write_ivar - configure ivar for given MSI-X vector
757 * @hw: pointer to the HW structure
758 * @msix_vector: vector number we are allocating to a given ring
759 * @index: row index of IVAR register to write within IVAR table
760 * @offset: column offset of in IVAR, should be multiple of 8
761 *
762 * This function is intended to handle the writing of the IVAR register
763 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
764 * each containing an cause allocation for an Rx and Tx ring, and a
765 * variable number of rows depending on the number of queues supported.
766 **/
767static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
768 int index, int offset)
769{
770 u32 ivar = array_rd32(E1000_IVAR0, index);
771
772 /* clear any bits that are currently set */
773 ivar &= ~((u32)0xFF << offset);
774
775 /* write vector and valid bit */
776 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
777
778 array_wr32(E1000_IVAR0, index, ivar);
779}
780
Auke Kok9d5c8242008-01-24 02:22:38 -0800781#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000782static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800783{
Alexander Duyck047e0032009-10-27 15:49:27 +0000784 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800785 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000786 int rx_queue = IGB_N0_QUEUE;
787 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000788 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000789
Alexander Duyck0ba82992011-08-26 07:45:47 +0000790 if (q_vector->rx.ring)
791 rx_queue = q_vector->rx.ring->reg_idx;
792 if (q_vector->tx.ring)
793 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700794
795 switch (hw->mac.type) {
796 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800797 /* The 82575 assigns vectors using a bitmask, which matches the
798 bitmask for the EICR/EIMS/EIMC registers. To assign one
799 or more queues to a vector, we write the appropriate bits
800 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000801 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800802 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800804 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000805 if (!adapter->msix_entries && msix_vector == 0)
806 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800807 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000808 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700809 break;
810 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000811 /*
812 * 82576 uses a table that essentially consists of 2 columns
813 * with 8 rows. The ordering is column-major so we use the
814 * lower 3 bits as the row index, and the 4th bit as the
815 * column offset.
816 */
817 if (rx_queue > IGB_N0_QUEUE)
818 igb_write_ivar(hw, msix_vector,
819 rx_queue & 0x7,
820 (rx_queue & 0x8) << 1);
821 if (tx_queue > IGB_N0_QUEUE)
822 igb_write_ivar(hw, msix_vector,
823 tx_queue & 0x7,
824 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000825 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700826 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000827 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000828 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000829 /*
830 * On 82580 and newer adapters the scheme is similar to 82576
831 * however instead of ordering column-major we have things
832 * ordered row-major. So we traverse the table by using
833 * bit 0 as the column offset, and the remaining bits as the
834 * row index.
835 */
836 if (rx_queue > IGB_N0_QUEUE)
837 igb_write_ivar(hw, msix_vector,
838 rx_queue >> 1,
839 (rx_queue & 0x1) << 4);
840 if (tx_queue > IGB_N0_QUEUE)
841 igb_write_ivar(hw, msix_vector,
842 tx_queue >> 1,
843 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000844 q_vector->eims_value = 1 << msix_vector;
845 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700846 default:
847 BUG();
848 break;
849 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000850
851 /* add q_vector eims value to global eims_enable_mask */
852 adapter->eims_enable_mask |= q_vector->eims_value;
853
854 /* configure q_vector to set itr on first interrupt */
855 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800856}
857
858/**
859 * igb_configure_msix - Configure MSI-X hardware
860 *
861 * igb_configure_msix sets up the hardware to properly
862 * generate MSI-X interrupts.
863 **/
864static void igb_configure_msix(struct igb_adapter *adapter)
865{
866 u32 tmp;
867 int i, vector = 0;
868 struct e1000_hw *hw = &adapter->hw;
869
870 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800871
872 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700873 switch (hw->mac.type) {
874 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800875 tmp = rd32(E1000_CTRL_EXT);
876 /* enable MSI-X PBA support*/
877 tmp |= E1000_CTRL_EXT_PBA_CLR;
878
879 /* Auto-Mask interrupts upon ICR read. */
880 tmp |= E1000_CTRL_EXT_EIAME;
881 tmp |= E1000_CTRL_EXT_IRCA;
882
883 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000884
885 /* enable msix_other interrupt */
886 array_wr32(E1000_MSIXBM(0), vector++,
887 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700888 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800889
Alexander Duyck2d064c02008-07-08 15:10:12 -0700890 break;
891
892 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000893 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000894 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000895 /* Turn on MSI-X capability first, or our settings
896 * won't stick. And it will take days to debug. */
897 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
898 E1000_GPIE_PBA | E1000_GPIE_EIAME |
899 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700900
Alexander Duyck047e0032009-10-27 15:49:27 +0000901 /* enable msix_other interrupt */
902 adapter->eims_other = 1 << vector;
903 tmp = (vector++ | E1000_IVAR_VALID) << 8;
904
905 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700906 break;
907 default:
908 /* do nothing, since nothing else supports MSI-X */
909 break;
910 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000911
912 adapter->eims_enable_mask |= adapter->eims_other;
913
Alexander Duyck26b39272010-02-17 01:00:41 +0000914 for (i = 0; i < adapter->num_q_vectors; i++)
915 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000916
Auke Kok9d5c8242008-01-24 02:22:38 -0800917 wrfl();
918}
919
920/**
921 * igb_request_msix - Initialize MSI-X interrupts
922 *
923 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
924 * kernel.
925 **/
926static int igb_request_msix(struct igb_adapter *adapter)
927{
928 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000929 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800930 int i, err = 0, vector = 0;
931
Auke Kok9d5c8242008-01-24 02:22:38 -0800932 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800933 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800934 if (err)
935 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000936 vector++;
937
938 for (i = 0; i < adapter->num_q_vectors; i++) {
939 struct igb_q_vector *q_vector = adapter->q_vector[i];
940
941 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
942
Alexander Duyck0ba82992011-08-26 07:45:47 +0000943 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000944 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000945 q_vector->rx.ring->queue_index);
946 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000948 q_vector->tx.ring->queue_index);
949 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000950 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000951 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000952 else
953 sprintf(q_vector->name, "%s-unused", netdev->name);
954
955 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800956 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 q_vector);
958 if (err)
959 goto out;
960 vector++;
961 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800962
Auke Kok9d5c8242008-01-24 02:22:38 -0800963 igb_configure_msix(adapter);
964 return 0;
965out:
966 return err;
967}
968
969static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
970{
971 if (adapter->msix_entries) {
972 pci_disable_msix(adapter->pdev);
973 kfree(adapter->msix_entries);
974 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000975 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800976 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000977 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800978}
979
Alexander Duyck047e0032009-10-27 15:49:27 +0000980/**
981 * igb_free_q_vectors - Free memory allocated for interrupt vectors
982 * @adapter: board private structure to initialize
983 *
984 * This function frees the memory allocated to the q_vectors. In addition if
985 * NAPI is enabled it will delete any references to the NAPI struct prior
986 * to freeing the q_vector.
987 **/
988static void igb_free_q_vectors(struct igb_adapter *adapter)
989{
990 int v_idx;
991
992 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
993 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
994 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000995 if (!q_vector)
996 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000997 netif_napi_del(&q_vector->napi);
998 kfree(q_vector);
999 }
1000 adapter->num_q_vectors = 0;
1001}
1002
1003/**
1004 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1005 *
1006 * This function resets the device so that it has 0 rx queues, tx queues, and
1007 * MSI-X interrupts allocated.
1008 */
1009static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1010{
1011 igb_free_queues(adapter);
1012 igb_free_q_vectors(adapter);
1013 igb_reset_interrupt_capability(adapter);
1014}
Auke Kok9d5c8242008-01-24 02:22:38 -08001015
1016/**
1017 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1018 *
1019 * Attempt to configure interrupts using the best available
1020 * capabilities of the hardware and kernel.
1021 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001022static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001023{
1024 int err;
1025 int numvecs, i;
1026
Alexander Duyck83b71802009-02-06 23:15:45 +00001027 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001028 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001029 if (adapter->vfs_allocated_count)
1030 adapter->num_tx_queues = 1;
1031 else
1032 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001033
Alexander Duyck047e0032009-10-27 15:49:27 +00001034 /* start with one vector for every rx queue */
1035 numvecs = adapter->num_rx_queues;
1036
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001037 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001038 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1039 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001040
1041 /* store the number of vectors reserved for queues */
1042 adapter->num_q_vectors = numvecs;
1043
1044 /* add 1 vector for link status interrupts */
1045 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001046 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1047 GFP_KERNEL);
1048 if (!adapter->msix_entries)
1049 goto msi_only;
1050
1051 for (i = 0; i < numvecs; i++)
1052 adapter->msix_entries[i].entry = i;
1053
1054 err = pci_enable_msix(adapter->pdev,
1055 adapter->msix_entries,
1056 numvecs);
1057 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001058 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001059
1060 igb_reset_interrupt_capability(adapter);
1061
1062 /* If we can't do MSI-X, try MSI */
1063msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001064#ifdef CONFIG_PCI_IOV
1065 /* disable SR-IOV for non MSI-X configurations */
1066 if (adapter->vf_data) {
1067 struct e1000_hw *hw = &adapter->hw;
1068 /* disable iov and allow time for transactions to clear */
1069 pci_disable_sriov(adapter->pdev);
1070 msleep(500);
1071
1072 kfree(adapter->vf_data);
1073 adapter->vf_data = NULL;
1074 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001075 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001076 msleep(100);
1077 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1078 }
1079#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001080 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001081 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001082 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001083 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001084 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001085 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001086 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001087 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001088out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001089 /* Notify the stack of the (possibly) reduced queue counts. */
1090 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1091 return netif_set_real_num_rx_queues(adapter->netdev,
1092 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001093}
1094
1095/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001096 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1097 * @adapter: board private structure to initialize
1098 *
1099 * We allocate one q_vector per queue interrupt. If allocation fails we
1100 * return -ENOMEM.
1101 **/
1102static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1103{
1104 struct igb_q_vector *q_vector;
1105 struct e1000_hw *hw = &adapter->hw;
1106 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001107 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001108
1109 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001110 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1111 adapter->num_tx_queues)) &&
1112 (adapter->num_rx_queues == v_idx))
1113 adapter->node = orig_node;
1114 if (orig_node == -1) {
1115 int cur_node = next_online_node(adapter->node);
1116 if (cur_node == MAX_NUMNODES)
1117 cur_node = first_online_node;
1118 adapter->node = cur_node;
1119 }
1120 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1121 adapter->node);
1122 if (!q_vector)
1123 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001125 if (!q_vector)
1126 goto err_out;
1127 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001128 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1129 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001130 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1131 adapter->q_vector[v_idx] = q_vector;
1132 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001133 /* Restore the adapter's original node */
1134 adapter->node = orig_node;
1135
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 return 0;
1137
1138err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001139 /* Restore the adapter's original node */
1140 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001141 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001142 return -ENOMEM;
1143}
1144
1145static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1146 int ring_idx, int v_idx)
1147{
Alexander Duyck3025a442010-02-17 01:02:39 +00001148 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001149
Alexander Duyck0ba82992011-08-26 07:45:47 +00001150 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1151 q_vector->rx.ring->q_vector = q_vector;
1152 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001153 q_vector->itr_val = adapter->rx_itr_setting;
1154 if (q_vector->itr_val && q_vector->itr_val <= 3)
1155 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001156}
1157
1158static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1159 int ring_idx, int v_idx)
1160{
Alexander Duyck3025a442010-02-17 01:02:39 +00001161 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001162
Alexander Duyck0ba82992011-08-26 07:45:47 +00001163 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1164 q_vector->tx.ring->q_vector = q_vector;
1165 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001166 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001167 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001168 if (q_vector->itr_val && q_vector->itr_val <= 3)
1169 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001170}
1171
1172/**
1173 * igb_map_ring_to_vector - maps allocated queues to vectors
1174 *
1175 * This function maps the recently allocated queues to vectors.
1176 **/
1177static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1178{
1179 int i;
1180 int v_idx = 0;
1181
1182 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1183 (adapter->num_q_vectors < adapter->num_tx_queues))
1184 return -ENOMEM;
1185
1186 if (adapter->num_q_vectors >=
1187 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1188 for (i = 0; i < adapter->num_rx_queues; i++)
1189 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1190 for (i = 0; i < adapter->num_tx_queues; i++)
1191 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1192 } else {
1193 for (i = 0; i < adapter->num_rx_queues; i++) {
1194 if (i < adapter->num_tx_queues)
1195 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1196 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1197 }
1198 for (; i < adapter->num_tx_queues; i++)
1199 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1200 }
1201 return 0;
1202}
1203
1204/**
1205 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1206 *
1207 * This function initializes the interrupts and allocates all of the queues.
1208 **/
1209static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1210{
1211 struct pci_dev *pdev = adapter->pdev;
1212 int err;
1213
Ben Hutchings21adef32010-09-27 08:28:39 +00001214 err = igb_set_interrupt_capability(adapter);
1215 if (err)
1216 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001217
1218 err = igb_alloc_q_vectors(adapter);
1219 if (err) {
1220 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1221 goto err_alloc_q_vectors;
1222 }
1223
1224 err = igb_alloc_queues(adapter);
1225 if (err) {
1226 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1227 goto err_alloc_queues;
1228 }
1229
1230 err = igb_map_ring_to_vector(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1233 goto err_map_queues;
1234 }
1235
1236
1237 return 0;
1238err_map_queues:
1239 igb_free_queues(adapter);
1240err_alloc_queues:
1241 igb_free_q_vectors(adapter);
1242err_alloc_q_vectors:
1243 igb_reset_interrupt_capability(adapter);
1244 return err;
1245}
1246
1247/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001248 * igb_request_irq - initialize interrupts
1249 *
1250 * Attempts to configure interrupts using the best available
1251 * capabilities of the hardware and kernel.
1252 **/
1253static int igb_request_irq(struct igb_adapter *adapter)
1254{
1255 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001256 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001257 int err = 0;
1258
1259 if (adapter->msix_entries) {
1260 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001261 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001262 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001263 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001264 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001265 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001266 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001267 igb_free_all_tx_resources(adapter);
1268 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001269 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001270 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001271 adapter->num_q_vectors = 1;
1272 err = igb_alloc_q_vectors(adapter);
1273 if (err) {
1274 dev_err(&pdev->dev,
1275 "Unable to allocate memory for vectors\n");
1276 goto request_done;
1277 }
1278 err = igb_alloc_queues(adapter);
1279 if (err) {
1280 dev_err(&pdev->dev,
1281 "Unable to allocate memory for queues\n");
1282 igb_free_q_vectors(adapter);
1283 goto request_done;
1284 }
1285 igb_setup_all_tx_resources(adapter);
1286 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001287 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001288 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001289 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001290
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001291 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001292 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001293 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001294 if (!err)
1295 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001296
Auke Kok9d5c8242008-01-24 02:22:38 -08001297 /* fall back to legacy interrupts */
1298 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001299 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001300 }
1301
Joe Perchesa0607fd2009-11-18 23:29:17 -08001302 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001303 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001304
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001305 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001306 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1307 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001308
1309request_done:
1310 return err;
1311}
1312
1313static void igb_free_irq(struct igb_adapter *adapter)
1314{
Auke Kok9d5c8242008-01-24 02:22:38 -08001315 if (adapter->msix_entries) {
1316 int vector = 0, i;
1317
Alexander Duyck047e0032009-10-27 15:49:27 +00001318 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001319
Alexander Duyck047e0032009-10-27 15:49:27 +00001320 for (i = 0; i < adapter->num_q_vectors; i++) {
1321 struct igb_q_vector *q_vector = adapter->q_vector[i];
1322 free_irq(adapter->msix_entries[vector++].vector,
1323 q_vector);
1324 }
1325 } else {
1326 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001327 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001328}
1329
1330/**
1331 * igb_irq_disable - Mask off interrupt generation on the NIC
1332 * @adapter: board private structure
1333 **/
1334static void igb_irq_disable(struct igb_adapter *adapter)
1335{
1336 struct e1000_hw *hw = &adapter->hw;
1337
Alexander Duyck25568a52009-10-27 23:49:59 +00001338 /*
1339 * we need to be careful when disabling interrupts. The VFs are also
1340 * mapped into these registers and so clearing the bits can cause
1341 * issues on the VF drivers so we only need to clear what we set
1342 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001343 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001344 u32 regval = rd32(E1000_EIAM);
1345 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1346 wr32(E1000_EIMC, adapter->eims_enable_mask);
1347 regval = rd32(E1000_EIAC);
1348 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001349 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001350
1351 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001352 wr32(E1000_IMC, ~0);
1353 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001354 if (adapter->msix_entries) {
1355 int i;
1356 for (i = 0; i < adapter->num_q_vectors; i++)
1357 synchronize_irq(adapter->msix_entries[i].vector);
1358 } else {
1359 synchronize_irq(adapter->pdev->irq);
1360 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001361}
1362
1363/**
1364 * igb_irq_enable - Enable default interrupt generation settings
1365 * @adapter: board private structure
1366 **/
1367static void igb_irq_enable(struct igb_adapter *adapter)
1368{
1369 struct e1000_hw *hw = &adapter->hw;
1370
1371 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001372 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001373 u32 regval = rd32(E1000_EIAC);
1374 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1375 regval = rd32(E1000_EIAM);
1376 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001377 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001378 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001379 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001380 ims |= E1000_IMS_VMMB;
1381 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001382 if (adapter->hw.mac.type == e1000_82580)
1383 ims |= E1000_IMS_DRSTA;
1384
Alexander Duyck25568a52009-10-27 23:49:59 +00001385 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001386 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001387 wr32(E1000_IMS, IMS_ENABLE_MASK |
1388 E1000_IMS_DRSTA);
1389 wr32(E1000_IAM, IMS_ENABLE_MASK |
1390 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001391 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001392}
1393
1394static void igb_update_mng_vlan(struct igb_adapter *adapter)
1395{
Alexander Duyck51466232009-10-27 23:47:35 +00001396 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001397 u16 vid = adapter->hw.mng_cookie.vlan_id;
1398 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001399
Alexander Duyck51466232009-10-27 23:47:35 +00001400 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1401 /* add VID to filter table */
1402 igb_vfta_set(hw, vid, true);
1403 adapter->mng_vlan_id = vid;
1404 } else {
1405 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1406 }
1407
1408 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1409 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001410 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001411 /* remove VID from filter table */
1412 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001413 }
1414}
1415
1416/**
1417 * igb_release_hw_control - release control of the h/w to f/w
1418 * @adapter: address of board private structure
1419 *
1420 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1421 * For ASF and Pass Through versions of f/w this means that the
1422 * driver is no longer loaded.
1423 *
1424 **/
1425static void igb_release_hw_control(struct igb_adapter *adapter)
1426{
1427 struct e1000_hw *hw = &adapter->hw;
1428 u32 ctrl_ext;
1429
1430 /* Let firmware take over control of h/w */
1431 ctrl_ext = rd32(E1000_CTRL_EXT);
1432 wr32(E1000_CTRL_EXT,
1433 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1434}
1435
Auke Kok9d5c8242008-01-24 02:22:38 -08001436/**
1437 * igb_get_hw_control - get control of the h/w from f/w
1438 * @adapter: address of board private structure
1439 *
1440 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1441 * For ASF and Pass Through versions of f/w this means that
1442 * the driver is loaded.
1443 *
1444 **/
1445static void igb_get_hw_control(struct igb_adapter *adapter)
1446{
1447 struct e1000_hw *hw = &adapter->hw;
1448 u32 ctrl_ext;
1449
1450 /* Let firmware know the driver has taken over */
1451 ctrl_ext = rd32(E1000_CTRL_EXT);
1452 wr32(E1000_CTRL_EXT,
1453 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1454}
1455
Auke Kok9d5c8242008-01-24 02:22:38 -08001456/**
1457 * igb_configure - configure the hardware for RX and TX
1458 * @adapter: private board structure
1459 **/
1460static void igb_configure(struct igb_adapter *adapter)
1461{
1462 struct net_device *netdev = adapter->netdev;
1463 int i;
1464
1465 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001466 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001467
1468 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001469
Alexander Duyck85b430b2009-10-27 15:50:29 +00001470 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001471 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001473
1474 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001476
1477 igb_rx_fifo_flush_82575(&adapter->hw);
1478
Alexander Duyckc493ea42009-03-20 00:16:50 +00001479 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 * at least 1 descriptor unused to make sure
1481 * next_to_use != next_to_clean */
1482 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001483 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001484 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001485 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001486}
1487
Nick Nunley88a268c2010-02-17 01:01:59 +00001488/**
1489 * igb_power_up_link - Power up the phy/serdes link
1490 * @adapter: address of board private structure
1491 **/
1492void igb_power_up_link(struct igb_adapter *adapter)
1493{
1494 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1495 igb_power_up_phy_copper(&adapter->hw);
1496 else
1497 igb_power_up_serdes_link_82575(&adapter->hw);
1498}
1499
1500/**
1501 * igb_power_down_link - Power down the phy/serdes link
1502 * @adapter: address of board private structure
1503 */
1504static void igb_power_down_link(struct igb_adapter *adapter)
1505{
1506 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1507 igb_power_down_phy_copper_82575(&adapter->hw);
1508 else
1509 igb_shutdown_serdes_link_82575(&adapter->hw);
1510}
Auke Kok9d5c8242008-01-24 02:22:38 -08001511
1512/**
1513 * igb_up - Open the interface and prepare it to handle traffic
1514 * @adapter: board private structure
1515 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001516int igb_up(struct igb_adapter *adapter)
1517{
1518 struct e1000_hw *hw = &adapter->hw;
1519 int i;
1520
1521 /* hardware has been reset, we need to reload some things */
1522 igb_configure(adapter);
1523
1524 clear_bit(__IGB_DOWN, &adapter->state);
1525
Alexander Duyck047e0032009-10-27 15:49:27 +00001526 for (i = 0; i < adapter->num_q_vectors; i++) {
1527 struct igb_q_vector *q_vector = adapter->q_vector[i];
1528 napi_enable(&q_vector->napi);
1529 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001530 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001531 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001532 else
1533 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001534
1535 /* Clear any pending interrupts. */
1536 rd32(E1000_ICR);
1537 igb_irq_enable(adapter);
1538
Alexander Duyckd4960302009-10-27 15:53:45 +00001539 /* notify VFs that reset has been completed */
1540 if (adapter->vfs_allocated_count) {
1541 u32 reg_data = rd32(E1000_CTRL_EXT);
1542 reg_data |= E1000_CTRL_EXT_PFRSTD;
1543 wr32(E1000_CTRL_EXT, reg_data);
1544 }
1545
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001546 netif_tx_start_all_queues(adapter->netdev);
1547
Alexander Duyck25568a52009-10-27 23:49:59 +00001548 /* start the watchdog. */
1549 hw->mac.get_link_status = 1;
1550 schedule_work(&adapter->watchdog_task);
1551
Auke Kok9d5c8242008-01-24 02:22:38 -08001552 return 0;
1553}
1554
1555void igb_down(struct igb_adapter *adapter)
1556{
Auke Kok9d5c8242008-01-24 02:22:38 -08001557 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001558 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 u32 tctl, rctl;
1560 int i;
1561
1562 /* signal that we're down so the interrupt handler does not
1563 * reschedule our watchdog timer */
1564 set_bit(__IGB_DOWN, &adapter->state);
1565
1566 /* disable receives in the hardware */
1567 rctl = rd32(E1000_RCTL);
1568 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1569 /* flush and sleep below */
1570
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001571 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001572
1573 /* disable transmits in the hardware */
1574 tctl = rd32(E1000_TCTL);
1575 tctl &= ~E1000_TCTL_EN;
1576 wr32(E1000_TCTL, tctl);
1577 /* flush both disables and wait for them to finish */
1578 wrfl();
1579 msleep(10);
1580
Alexander Duyck047e0032009-10-27 15:49:27 +00001581 for (i = 0; i < adapter->num_q_vectors; i++) {
1582 struct igb_q_vector *q_vector = adapter->q_vector[i];
1583 napi_disable(&q_vector->napi);
1584 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001585
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 igb_irq_disable(adapter);
1587
1588 del_timer_sync(&adapter->watchdog_timer);
1589 del_timer_sync(&adapter->phy_info_timer);
1590
Auke Kok9d5c8242008-01-24 02:22:38 -08001591 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001592
1593 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001594 spin_lock(&adapter->stats64_lock);
1595 igb_update_stats(adapter, &adapter->stats64);
1596 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001597
Auke Kok9d5c8242008-01-24 02:22:38 -08001598 adapter->link_speed = 0;
1599 adapter->link_duplex = 0;
1600
Jeff Kirsher30236822008-06-24 17:01:15 -07001601 if (!pci_channel_offline(adapter->pdev))
1602 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 igb_clean_all_tx_rings(adapter);
1604 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001605#ifdef CONFIG_IGB_DCA
1606
1607 /* since we reset the hardware DCA settings were cleared */
1608 igb_setup_dca(adapter);
1609#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001610}
1611
1612void igb_reinit_locked(struct igb_adapter *adapter)
1613{
1614 WARN_ON(in_interrupt());
1615 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1616 msleep(1);
1617 igb_down(adapter);
1618 igb_up(adapter);
1619 clear_bit(__IGB_RESETTING, &adapter->state);
1620}
1621
1622void igb_reset(struct igb_adapter *adapter)
1623{
Alexander Duyck090b1792009-10-27 23:51:55 +00001624 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001625 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001626 struct e1000_mac_info *mac = &hw->mac;
1627 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1629 u16 hwm;
1630
1631 /* Repartition Pba for greater than 9k mtu
1632 * To take effect CTRL.RST is required.
1633 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001634 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001635 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001636 case e1000_82580:
1637 pba = rd32(E1000_RXPBS);
1638 pba = igb_rxpbs_adjust_82580(pba);
1639 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001640 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001641 pba = rd32(E1000_RXPBS);
1642 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 break;
1644 case e1000_82575:
1645 default:
1646 pba = E1000_PBA_34K;
1647 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001648 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001649
Alexander Duyck2d064c02008-07-08 15:10:12 -07001650 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1651 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001652 /* adjust PBA for jumbo frames */
1653 wr32(E1000_PBA, pba);
1654
1655 /* To maintain wire speed transmits, the Tx FIFO should be
1656 * large enough to accommodate two full transmit packets,
1657 * rounded up to the next 1KB and expressed in KB. Likewise,
1658 * the Rx FIFO should be large enough to accommodate at least
1659 * one full receive packet and is similarly rounded up and
1660 * expressed in KB. */
1661 pba = rd32(E1000_PBA);
1662 /* upper 16 bits has Tx packet buffer allocation size in KB */
1663 tx_space = pba >> 16;
1664 /* lower 16 bits has Rx packet buffer allocation size in KB */
1665 pba &= 0xffff;
1666 /* the tx fifo also stores 16 bytes of information about the tx
1667 * but don't include ethernet FCS because hardware appends it */
1668 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001669 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001670 ETH_FCS_LEN) * 2;
1671 min_tx_space = ALIGN(min_tx_space, 1024);
1672 min_tx_space >>= 10;
1673 /* software strips receive CRC, so leave room for it */
1674 min_rx_space = adapter->max_frame_size;
1675 min_rx_space = ALIGN(min_rx_space, 1024);
1676 min_rx_space >>= 10;
1677
1678 /* If current Tx allocation is less than the min Tx FIFO size,
1679 * and the min Tx FIFO size is less than the current Rx FIFO
1680 * allocation, take space away from current Rx allocation */
1681 if (tx_space < min_tx_space &&
1682 ((min_tx_space - tx_space) < pba)) {
1683 pba = pba - (min_tx_space - tx_space);
1684
1685 /* if short on rx space, rx wins and must trump tx
1686 * adjustment */
1687 if (pba < min_rx_space)
1688 pba = min_rx_space;
1689 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001690 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001691 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001692
1693 /* flow control settings */
1694 /* The high water mark must be low enough to fit one full frame
1695 * (or the size used for early receive) above it in the Rx FIFO.
1696 * Set it to the lower of:
1697 * - 90% of the Rx FIFO size, or
1698 * - the full Rx FIFO size minus one full frame */
1699 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001701
Alexander Duyckd405ea32009-12-23 13:21:27 +00001702 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1703 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001704 fc->pause_time = 0xFFFF;
1705 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001706 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001707
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001708 /* disable receive for all VFs and wait one second */
1709 if (adapter->vfs_allocated_count) {
1710 int i;
1711 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001712 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001713
1714 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001715 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001716
1717 /* disable transmits and receives */
1718 wr32(E1000_VFRE, 0);
1719 wr32(E1000_VFTE, 0);
1720 }
1721
Auke Kok9d5c8242008-01-24 02:22:38 -08001722 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001723 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001724 wr32(E1000_WUC, 0);
1725
Alexander Duyck330a6d62009-10-27 23:51:35 +00001726 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001727 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001728 if (hw->mac.type > e1000_82580) {
1729 if (adapter->flags & IGB_FLAG_DMAC) {
1730 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001731
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001732 /*
1733 * DMA Coalescing high water mark needs to be higher
1734 * than * the * Rx threshold. The Rx threshold is
1735 * currently * pba - 6, so we * should use a high water
1736 * mark of pba * - 4. */
1737 hwm = (pba - 4) << 10;
1738
1739 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1740 & E1000_DMACR_DMACTHR_MASK);
1741
1742 /* transition to L0x or L1 if available..*/
1743 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1744
1745 /* watchdog timer= +-1000 usec in 32usec intervals */
1746 reg |= (1000 >> 5);
1747 wr32(E1000_DMACR, reg);
1748
1749 /* no lower threshold to disable coalescing(smart fifb)
1750 * -UTRESH=0*/
1751 wr32(E1000_DMCRTRH, 0);
1752
1753 /* set hwm to PBA - 2 * max frame size */
1754 wr32(E1000_FCRTC, hwm);
1755
1756 /*
1757 * This sets the time to wait before requesting tran-
1758 * sition to * low power state to number of usecs needed
1759 * to receive 1 512 * byte frame at gigabit line rate
1760 */
1761 reg = rd32(E1000_DMCTLX);
1762 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1763
1764 /* Delay 255 usec before entering Lx state. */
1765 reg |= 0xFF;
1766 wr32(E1000_DMCTLX, reg);
1767
1768 /* free space in Tx packet buffer to wake from DMAC */
1769 wr32(E1000_DMCTXTH,
1770 (IGB_MIN_TXPBSIZE -
1771 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1772 >> 6);
1773
1774 /* make low power state decision controlled by DMAC */
1775 reg = rd32(E1000_PCIEMISC);
1776 reg |= E1000_PCIEMISC_LX_DECISION;
1777 wr32(E1000_PCIEMISC, reg);
1778 } /* end if IGB_FLAG_DMAC set */
1779 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001780 if (hw->mac.type == e1000_82580) {
1781 u32 reg = rd32(E1000_PCIEMISC);
1782 wr32(E1000_PCIEMISC,
1783 reg & ~E1000_PCIEMISC_LX_DECISION);
1784 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001785 if (!netif_running(adapter->netdev))
1786 igb_power_down_link(adapter);
1787
Auke Kok9d5c8242008-01-24 02:22:38 -08001788 igb_update_mng_vlan(adapter);
1789
1790 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1791 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1792
Alexander Duyck330a6d62009-10-27 23:51:35 +00001793 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001794}
1795
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001796static u32 igb_fix_features(struct net_device *netdev, u32 features)
1797{
1798 /*
1799 * Since there is no support for separate rx/tx vlan accel
1800 * enable/disable make sure tx flag is always in same state as rx.
1801 */
1802 if (features & NETIF_F_HW_VLAN_RX)
1803 features |= NETIF_F_HW_VLAN_TX;
1804 else
1805 features &= ~NETIF_F_HW_VLAN_TX;
1806
1807 return features;
1808}
1809
Michał Mirosławac52caa2011-06-08 08:38:01 +00001810static int igb_set_features(struct net_device *netdev, u32 features)
1811{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001812 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001813
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001814 if (changed & NETIF_F_HW_VLAN_RX)
1815 igb_vlan_mode(netdev, features);
1816
Michał Mirosławac52caa2011-06-08 08:38:01 +00001817 return 0;
1818}
1819
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001820static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001821 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001822 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001823 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001824 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001825 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001826 .ndo_set_mac_address = igb_set_mac,
1827 .ndo_change_mtu = igb_change_mtu,
1828 .ndo_do_ioctl = igb_ioctl,
1829 .ndo_tx_timeout = igb_tx_timeout,
1830 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001831 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1832 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001833 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1834 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1835 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1836 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001837#ifdef CONFIG_NET_POLL_CONTROLLER
1838 .ndo_poll_controller = igb_netpoll,
1839#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001840 .ndo_fix_features = igb_fix_features,
1841 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001842};
1843
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001844/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001845 * igb_probe - Device Initialization Routine
1846 * @pdev: PCI device information struct
1847 * @ent: entry in igb_pci_tbl
1848 *
1849 * Returns 0 on success, negative on failure
1850 *
1851 * igb_probe initializes an adapter identified by a pci_dev structure.
1852 * The OS initialization, configuring of the adapter private structure,
1853 * and a hardware reset occur.
1854 **/
1855static int __devinit igb_probe(struct pci_dev *pdev,
1856 const struct pci_device_id *ent)
1857{
1858 struct net_device *netdev;
1859 struct igb_adapter *adapter;
1860 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001861 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001862 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001863 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001864 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1865 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001866 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001867 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001868 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001869
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001870 /* Catch broken hardware that put the wrong VF device ID in
1871 * the PCIe SR-IOV capability.
1872 */
1873 if (pdev->is_virtfn) {
1874 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1875 pci_name(pdev), pdev->vendor, pdev->device);
1876 return -EINVAL;
1877 }
1878
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001879 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001880 if (err)
1881 return err;
1882
1883 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001884 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001886 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001887 if (!err)
1888 pci_using_dac = 1;
1889 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001890 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001891 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001892 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001893 if (err) {
1894 dev_err(&pdev->dev, "No usable DMA "
1895 "configuration, aborting\n");
1896 goto err_dma;
1897 }
1898 }
1899 }
1900
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001901 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1902 IORESOURCE_MEM),
1903 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001904 if (err)
1905 goto err_pci_reg;
1906
Frans Pop19d5afd2009-10-02 10:04:12 -07001907 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001908
Auke Kok9d5c8242008-01-24 02:22:38 -08001909 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001910 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001911
1912 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001913 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001914 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001915 if (!netdev)
1916 goto err_alloc_etherdev;
1917
1918 SET_NETDEV_DEV(netdev, &pdev->dev);
1919
1920 pci_set_drvdata(pdev, netdev);
1921 adapter = netdev_priv(netdev);
1922 adapter->netdev = netdev;
1923 adapter->pdev = pdev;
1924 hw = &adapter->hw;
1925 hw->back = adapter;
1926 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1927
1928 mmio_start = pci_resource_start(pdev, 0);
1929 mmio_len = pci_resource_len(pdev, 0);
1930
1931 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001932 hw->hw_addr = ioremap(mmio_start, mmio_len);
1933 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001934 goto err_ioremap;
1935
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001936 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001937 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001938 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001939
1940 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1941
1942 netdev->mem_start = mmio_start;
1943 netdev->mem_end = mmio_start + mmio_len;
1944
Auke Kok9d5c8242008-01-24 02:22:38 -08001945 /* PCI config space info */
1946 hw->vendor_id = pdev->vendor;
1947 hw->device_id = pdev->device;
1948 hw->revision_id = pdev->revision;
1949 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1950 hw->subsystem_device_id = pdev->subsystem_device;
1951
Auke Kok9d5c8242008-01-24 02:22:38 -08001952 /* Copy the default MAC, PHY and NVM function pointers */
1953 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1954 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1955 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1956 /* Initialize skew-specific constants */
1957 err = ei->get_invariants(hw);
1958 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001959 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001960
Alexander Duyck450c87c2009-02-06 23:22:11 +00001961 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001962 err = igb_sw_init(adapter);
1963 if (err)
1964 goto err_sw_init;
1965
1966 igb_get_bus_info_pcie(hw);
1967
1968 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001969
1970 /* Copper options */
1971 if (hw->phy.media_type == e1000_media_type_copper) {
1972 hw->phy.mdix = AUTO_ALL_MODES;
1973 hw->phy.disable_polarity_correction = false;
1974 hw->phy.ms_type = e1000_ms_hw_default;
1975 }
1976
1977 if (igb_check_reset_block(hw))
1978 dev_info(&pdev->dev,
1979 "PHY reset is blocked due to SOL/IDER session.\n");
1980
Michał Mirosławac52caa2011-06-08 08:38:01 +00001981 netdev->hw_features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001982 NETIF_F_IP_CSUM |
Michał Mirosławac52caa2011-06-08 08:38:01 +00001983 NETIF_F_IPV6_CSUM |
1984 NETIF_F_TSO |
1985 NETIF_F_TSO6 |
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001986 NETIF_F_RXCSUM |
1987 NETIF_F_HW_VLAN_RX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001988
1989 netdev->features = netdev->hw_features |
Auke Kok9d5c8242008-01-24 02:22:38 -08001990 NETIF_F_HW_VLAN_TX |
Auke Kok9d5c8242008-01-24 02:22:38 -08001991 NETIF_F_HW_VLAN_FILTER;
1992
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001993 netdev->vlan_features |= NETIF_F_TSO;
1994 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001995 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001996 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001997 netdev->vlan_features |= NETIF_F_SG;
1998
Yi Zou7b872a52010-09-22 17:57:58 +00001999 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002000 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002001 netdev->vlan_features |= NETIF_F_HIGHDMA;
2002 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002003
Michał Mirosławac52caa2011-06-08 08:38:01 +00002004 if (hw->mac.type >= e1000_82576) {
2005 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002006 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002007 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002008
Jiri Pirko01789342011-08-16 06:29:00 +00002009 netdev->priv_flags |= IFF_UNICAST_FLT;
2010
Alexander Duyck330a6d62009-10-27 23:51:35 +00002011 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002012
2013 /* before reading the NVM, reset the controller to put the device in a
2014 * known good starting state */
2015 hw->mac.ops.reset_hw(hw);
2016
2017 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08002018 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002019 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2020 err = -EIO;
2021 goto err_eeprom;
2022 }
2023
2024 /* copy the MAC address out of the NVM */
2025 if (hw->mac.ops.read_mac_addr(hw))
2026 dev_err(&pdev->dev, "NVM Read Error\n");
2027
2028 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2029 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2030
2031 if (!is_valid_ether_addr(netdev->perm_addr)) {
2032 dev_err(&pdev->dev, "Invalid MAC Address\n");
2033 err = -EIO;
2034 goto err_eeprom;
2035 }
2036
Joe Perchesc061b182010-08-23 18:20:03 +00002037 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002038 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002039 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002040 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002041
2042 INIT_WORK(&adapter->reset_task, igb_reset_task);
2043 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2044
Alexander Duyck450c87c2009-02-06 23:22:11 +00002045 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002046 adapter->fc_autoneg = true;
2047 hw->mac.autoneg = true;
2048 hw->phy.autoneg_advertised = 0x2f;
2049
Alexander Duyck0cce1192009-07-23 18:10:24 +00002050 hw->fc.requested_mode = e1000_fc_default;
2051 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002052
Auke Kok9d5c8242008-01-24 02:22:38 -08002053 igb_validate_mdi_setting(hw);
2054
Auke Kok9d5c8242008-01-24 02:22:38 -08002055 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2056 * enable the ACPI Magic Packet filter
2057 */
2058
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002059 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002060 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002061 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002062 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2063 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2064 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002065 else if (hw->bus.func == 1)
2066 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002067
2068 if (eeprom_data & eeprom_apme_mask)
2069 adapter->eeprom_wol |= E1000_WUFC_MAG;
2070
2071 /* now that we have the eeprom settings, apply the special cases where
2072 * the eeprom may be wrong or the board simply won't support wake on
2073 * lan on a particular port */
2074 switch (pdev->device) {
2075 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2076 adapter->eeprom_wol = 0;
2077 break;
2078 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002079 case E1000_DEV_ID_82576_FIBER:
2080 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002081 /* Wake events only supported on port A for dual fiber
2082 * regardless of eeprom setting */
2083 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2084 adapter->eeprom_wol = 0;
2085 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002086 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002087 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002088 /* if quad port adapter, disable WoL on all but port A */
2089 if (global_quad_port_a != 0)
2090 adapter->eeprom_wol = 0;
2091 else
2092 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2093 /* Reset for multiple quad port adapters */
2094 if (++global_quad_port_a == 4)
2095 global_quad_port_a = 0;
2096 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002097 }
2098
2099 /* initialize the wol settings based on the eeprom settings */
2100 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002101 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002102
2103 /* reset the hardware with the new settings */
2104 igb_reset(adapter);
2105
2106 /* let the f/w know that the h/w is now under the control of the
2107 * driver. */
2108 igb_get_hw_control(adapter);
2109
Auke Kok9d5c8242008-01-24 02:22:38 -08002110 strcpy(netdev->name, "eth%d");
2111 err = register_netdev(netdev);
2112 if (err)
2113 goto err_register;
2114
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002115 igb_vlan_mode(netdev, netdev->features);
2116
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002117 /* carrier off reporting is important to ethtool even BEFORE open */
2118 netif_carrier_off(netdev);
2119
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002120#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002121 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002122 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002123 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002124 igb_setup_dca(adapter);
2125 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002126
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002127#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002128 /* do hw tstamp init after resetting */
2129 igb_init_hw_timer(adapter);
2130
Auke Kok9d5c8242008-01-24 02:22:38 -08002131 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2132 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002133 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002134 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002135 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002136 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002137 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002138 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2139 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2140 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2141 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002142 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002143
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002144 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2145 if (ret_val)
2146 strcpy(part_str, "Unknown");
2147 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002148 dev_info(&pdev->dev,
2149 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2150 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002151 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002152 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002153 switch (hw->mac.type) {
2154 case e1000_i350:
2155 igb_set_eee_i350(hw);
2156 break;
2157 default:
2158 break;
2159 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002160 return 0;
2161
2162err_register:
2163 igb_release_hw_control(adapter);
2164err_eeprom:
2165 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002166 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002167
2168 if (hw->flash_address)
2169 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002170err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002171 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002172 iounmap(hw->hw_addr);
2173err_ioremap:
2174 free_netdev(netdev);
2175err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002176 pci_release_selected_regions(pdev,
2177 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002178err_pci_reg:
2179err_dma:
2180 pci_disable_device(pdev);
2181 return err;
2182}
2183
2184/**
2185 * igb_remove - Device Removal Routine
2186 * @pdev: PCI device information struct
2187 *
2188 * igb_remove is called by the PCI subsystem to alert the driver
2189 * that it should release a PCI device. The could be caused by a
2190 * Hot-Plug event, or because the driver is going to be removed from
2191 * memory.
2192 **/
2193static void __devexit igb_remove(struct pci_dev *pdev)
2194{
2195 struct net_device *netdev = pci_get_drvdata(pdev);
2196 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002197 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002198
Tejun Heo760141a2010-12-12 16:45:14 +01002199 /*
2200 * The watchdog timer may be rescheduled, so explicitly
2201 * disable watchdog from being rescheduled.
2202 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002203 set_bit(__IGB_DOWN, &adapter->state);
2204 del_timer_sync(&adapter->watchdog_timer);
2205 del_timer_sync(&adapter->phy_info_timer);
2206
Tejun Heo760141a2010-12-12 16:45:14 +01002207 cancel_work_sync(&adapter->reset_task);
2208 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002209
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002210#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002211 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002212 dev_info(&pdev->dev, "DCA disabled\n");
2213 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002214 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002215 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002216 }
2217#endif
2218
Auke Kok9d5c8242008-01-24 02:22:38 -08002219 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2220 * would have already happened in close and is redundant. */
2221 igb_release_hw_control(adapter);
2222
2223 unregister_netdev(netdev);
2224
Alexander Duyck047e0032009-10-27 15:49:27 +00002225 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002226
Alexander Duyck37680112009-02-19 20:40:30 -08002227#ifdef CONFIG_PCI_IOV
2228 /* reclaim resources allocated to VFs */
2229 if (adapter->vf_data) {
2230 /* disable iov and allow time for transactions to clear */
2231 pci_disable_sriov(pdev);
2232 msleep(500);
2233
2234 kfree(adapter->vf_data);
2235 adapter->vf_data = NULL;
2236 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002237 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002238 msleep(100);
2239 dev_info(&pdev->dev, "IOV Disabled\n");
2240 }
2241#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002242
Alexander Duyck28b07592009-02-06 23:20:31 +00002243 iounmap(hw->hw_addr);
2244 if (hw->flash_address)
2245 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002246 pci_release_selected_regions(pdev,
2247 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002248
2249 free_netdev(netdev);
2250
Frans Pop19d5afd2009-10-02 10:04:12 -07002251 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002252
Auke Kok9d5c8242008-01-24 02:22:38 -08002253 pci_disable_device(pdev);
2254}
2255
2256/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002257 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2258 * @adapter: board private structure to initialize
2259 *
2260 * This function initializes the vf specific data storage and then attempts to
2261 * allocate the VFs. The reason for ordering it this way is because it is much
2262 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2263 * the memory for the VFs.
2264 **/
2265static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2266{
2267#ifdef CONFIG_PCI_IOV
2268 struct pci_dev *pdev = adapter->pdev;
2269
Alexander Duycka6b623e2009-10-27 23:47:53 +00002270 if (adapter->vfs_allocated_count) {
2271 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2272 sizeof(struct vf_data_storage),
2273 GFP_KERNEL);
2274 /* if allocation failed then we do not support SR-IOV */
2275 if (!adapter->vf_data) {
2276 adapter->vfs_allocated_count = 0;
2277 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2278 "Data Storage\n");
2279 }
2280 }
2281
2282 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2283 kfree(adapter->vf_data);
2284 adapter->vf_data = NULL;
2285#endif /* CONFIG_PCI_IOV */
2286 adapter->vfs_allocated_count = 0;
2287#ifdef CONFIG_PCI_IOV
2288 } else {
2289 unsigned char mac_addr[ETH_ALEN];
2290 int i;
2291 dev_info(&pdev->dev, "%d vfs allocated\n",
2292 adapter->vfs_allocated_count);
2293 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2294 random_ether_addr(mac_addr);
2295 igb_set_vf_mac(adapter, i, mac_addr);
2296 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002297 /* DMA Coalescing is not supported in IOV mode. */
2298 if (adapter->flags & IGB_FLAG_DMAC)
2299 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002300 }
2301#endif /* CONFIG_PCI_IOV */
2302}
2303
Alexander Duyck115f4592009-11-12 18:37:00 +00002304
2305/**
2306 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2307 * @adapter: board private structure to initialize
2308 *
2309 * igb_init_hw_timer initializes the function pointer and values for the hw
2310 * timer found in hardware.
2311 **/
2312static void igb_init_hw_timer(struct igb_adapter *adapter)
2313{
2314 struct e1000_hw *hw = &adapter->hw;
2315
2316 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002317 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002318 case e1000_82580:
2319 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2320 adapter->cycles.read = igb_read_clock;
2321 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2322 adapter->cycles.mult = 1;
2323 /*
2324 * The 82580 timesync updates the system timer every 8ns by 8ns
2325 * and the value cannot be shifted. Instead we need to shift
2326 * the registers to generate a 64bit timer value. As a result
2327 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2328 * 24 in order to generate a larger value for synchronization.
2329 */
2330 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2331 /* disable system timer temporarily by setting bit 31 */
2332 wr32(E1000_TSAUXC, 0x80000000);
2333 wrfl();
2334
2335 /* Set registers so that rollover occurs soon to test this. */
2336 wr32(E1000_SYSTIMR, 0x00000000);
2337 wr32(E1000_SYSTIML, 0x80000000);
2338 wr32(E1000_SYSTIMH, 0x000000FF);
2339 wrfl();
2340
2341 /* enable system timer by clearing bit 31 */
2342 wr32(E1000_TSAUXC, 0x0);
2343 wrfl();
2344
2345 timecounter_init(&adapter->clock,
2346 &adapter->cycles,
2347 ktime_to_ns(ktime_get_real()));
2348 /*
2349 * Synchronize our NIC clock against system wall clock. NIC
2350 * time stamp reading requires ~3us per sample, each sample
2351 * was pretty stable even under load => only require 10
2352 * samples for each offset comparison.
2353 */
2354 memset(&adapter->compare, 0, sizeof(adapter->compare));
2355 adapter->compare.source = &adapter->clock;
2356 adapter->compare.target = ktime_get_real;
2357 adapter->compare.num_samples = 10;
2358 timecompare_update(&adapter->compare, 0);
2359 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002360 case e1000_82576:
2361 /*
2362 * Initialize hardware timer: we keep it running just in case
2363 * that some program needs it later on.
2364 */
2365 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2366 adapter->cycles.read = igb_read_clock;
2367 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2368 adapter->cycles.mult = 1;
2369 /**
2370 * Scale the NIC clock cycle by a large factor so that
2371 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002372 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002373 * factor are a) that the clock register overflows more quickly
2374 * (not such a big deal) and b) that the increment per tick has
2375 * to fit into 24 bits. As a result we need to use a shift of
2376 * 19 so we can fit a value of 16 into the TIMINCA register.
2377 */
2378 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2379 wr32(E1000_TIMINCA,
2380 (1 << E1000_TIMINCA_16NS_SHIFT) |
2381 (16 << IGB_82576_TSYNC_SHIFT));
2382
2383 /* Set registers so that rollover occurs soon to test this. */
2384 wr32(E1000_SYSTIML, 0x00000000);
2385 wr32(E1000_SYSTIMH, 0xFF800000);
2386 wrfl();
2387
2388 timecounter_init(&adapter->clock,
2389 &adapter->cycles,
2390 ktime_to_ns(ktime_get_real()));
2391 /*
2392 * Synchronize our NIC clock against system wall clock. NIC
2393 * time stamp reading requires ~3us per sample, each sample
2394 * was pretty stable even under load => only require 10
2395 * samples for each offset comparison.
2396 */
2397 memset(&adapter->compare, 0, sizeof(adapter->compare));
2398 adapter->compare.source = &adapter->clock;
2399 adapter->compare.target = ktime_get_real;
2400 adapter->compare.num_samples = 10;
2401 timecompare_update(&adapter->compare, 0);
2402 break;
2403 case e1000_82575:
2404 /* 82575 does not support timesync */
2405 default:
2406 break;
2407 }
2408
2409}
2410
Alexander Duycka6b623e2009-10-27 23:47:53 +00002411/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002412 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2413 * @adapter: board private structure to initialize
2414 *
2415 * igb_sw_init initializes the Adapter private data structure.
2416 * Fields are initialized based on PCI device information and
2417 * OS network device settings (MTU size).
2418 **/
2419static int __devinit igb_sw_init(struct igb_adapter *adapter)
2420{
2421 struct e1000_hw *hw = &adapter->hw;
2422 struct net_device *netdev = adapter->netdev;
2423 struct pci_dev *pdev = adapter->pdev;
2424
2425 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2426
Alexander Duyck13fde972011-10-05 13:35:24 +00002427 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002428 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2429 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002430
2431 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002432 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2433 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2434
Alexander Duyck13fde972011-10-05 13:35:24 +00002435 /* set default work limits */
2436 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2437
Alexander Duyck153285f2011-08-26 07:43:32 +00002438 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2439 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002440 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2441
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002442 adapter->node = -1;
2443
Eric Dumazet12dcd862010-10-15 17:27:10 +00002444 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002445#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002446 switch (hw->mac.type) {
2447 case e1000_82576:
2448 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002449 if (max_vfs > 7) {
2450 dev_warn(&pdev->dev,
2451 "Maximum of 7 VFs per PF, using max\n");
2452 adapter->vfs_allocated_count = 7;
2453 } else
2454 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002455 break;
2456 default:
2457 break;
2458 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002459#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002460 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002461 /* i350 cannot do RSS and SR-IOV at the same time */
2462 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2463 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002464
2465 /*
2466 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2467 * then we should combine the queues into a queue pair in order to
2468 * conserve interrupts due to limited supply
2469 */
2470 if ((adapter->rss_queues > 4) ||
2471 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2472 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2473
Alexander Duycka6b623e2009-10-27 23:47:53 +00002474 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002475 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002476 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2477 return -ENOMEM;
2478 }
2479
Alexander Duycka6b623e2009-10-27 23:47:53 +00002480 igb_probe_vfs(adapter);
2481
Auke Kok9d5c8242008-01-24 02:22:38 -08002482 /* Explicitly disable IRQ since the NIC can be in any state. */
2483 igb_irq_disable(adapter);
2484
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002485 if (hw->mac.type == e1000_i350)
2486 adapter->flags &= ~IGB_FLAG_DMAC;
2487
Auke Kok9d5c8242008-01-24 02:22:38 -08002488 set_bit(__IGB_DOWN, &adapter->state);
2489 return 0;
2490}
2491
2492/**
2493 * igb_open - Called when a network interface is made active
2494 * @netdev: network interface device structure
2495 *
2496 * Returns 0 on success, negative value on failure
2497 *
2498 * The open entry point is called when a network interface is made
2499 * active by the system (IFF_UP). At this point all resources needed
2500 * for transmit and receive operations are allocated, the interrupt
2501 * handler is registered with the OS, the watchdog timer is started,
2502 * and the stack is notified that the interface is ready.
2503 **/
2504static int igb_open(struct net_device *netdev)
2505{
2506 struct igb_adapter *adapter = netdev_priv(netdev);
2507 struct e1000_hw *hw = &adapter->hw;
2508 int err;
2509 int i;
2510
2511 /* disallow open during test */
2512 if (test_bit(__IGB_TESTING, &adapter->state))
2513 return -EBUSY;
2514
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002515 netif_carrier_off(netdev);
2516
Auke Kok9d5c8242008-01-24 02:22:38 -08002517 /* allocate transmit descriptors */
2518 err = igb_setup_all_tx_resources(adapter);
2519 if (err)
2520 goto err_setup_tx;
2521
2522 /* allocate receive descriptors */
2523 err = igb_setup_all_rx_resources(adapter);
2524 if (err)
2525 goto err_setup_rx;
2526
Nick Nunley88a268c2010-02-17 01:01:59 +00002527 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002528
Auke Kok9d5c8242008-01-24 02:22:38 -08002529 /* before we allocate an interrupt, we must be ready to handle it.
2530 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2531 * as soon as we call pci_request_irq, so we have to setup our
2532 * clean_rx handler before we do so. */
2533 igb_configure(adapter);
2534
2535 err = igb_request_irq(adapter);
2536 if (err)
2537 goto err_req_irq;
2538
2539 /* From here on the code is the same as igb_up() */
2540 clear_bit(__IGB_DOWN, &adapter->state);
2541
Alexander Duyck047e0032009-10-27 15:49:27 +00002542 for (i = 0; i < adapter->num_q_vectors; i++) {
2543 struct igb_q_vector *q_vector = adapter->q_vector[i];
2544 napi_enable(&q_vector->napi);
2545 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002546
2547 /* Clear any pending interrupts. */
2548 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002549
2550 igb_irq_enable(adapter);
2551
Alexander Duyckd4960302009-10-27 15:53:45 +00002552 /* notify VFs that reset has been completed */
2553 if (adapter->vfs_allocated_count) {
2554 u32 reg_data = rd32(E1000_CTRL_EXT);
2555 reg_data |= E1000_CTRL_EXT_PFRSTD;
2556 wr32(E1000_CTRL_EXT, reg_data);
2557 }
2558
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002559 netif_tx_start_all_queues(netdev);
2560
Alexander Duyck25568a52009-10-27 23:49:59 +00002561 /* start the watchdog. */
2562 hw->mac.get_link_status = 1;
2563 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002564
2565 return 0;
2566
2567err_req_irq:
2568 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002569 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002570 igb_free_all_rx_resources(adapter);
2571err_setup_rx:
2572 igb_free_all_tx_resources(adapter);
2573err_setup_tx:
2574 igb_reset(adapter);
2575
2576 return err;
2577}
2578
2579/**
2580 * igb_close - Disables a network interface
2581 * @netdev: network interface device structure
2582 *
2583 * Returns 0, this is not allowed to fail
2584 *
2585 * The close entry point is called when an interface is de-activated
2586 * by the OS. The hardware is still under the driver's control, but
2587 * needs to be disabled. A global MAC reset is issued to stop the
2588 * hardware, and all transmit and receive resources are freed.
2589 **/
2590static int igb_close(struct net_device *netdev)
2591{
2592 struct igb_adapter *adapter = netdev_priv(netdev);
2593
2594 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2595 igb_down(adapter);
2596
2597 igb_free_irq(adapter);
2598
2599 igb_free_all_tx_resources(adapter);
2600 igb_free_all_rx_resources(adapter);
2601
Auke Kok9d5c8242008-01-24 02:22:38 -08002602 return 0;
2603}
2604
2605/**
2606 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002607 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2608 *
2609 * Return 0 on success, negative on failure
2610 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002611int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002612{
Alexander Duyck59d71982010-04-27 13:09:25 +00002613 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002614 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002615 int size;
2616
Alexander Duyck06034642011-08-26 07:44:22 +00002617 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002618 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2619 if (!tx_ring->tx_buffer_info)
2620 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002621 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002622 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002623
2624 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002625 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002626 tx_ring->size = ALIGN(tx_ring->size, 4096);
2627
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002628 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002629 tx_ring->desc = dma_alloc_coherent(dev,
2630 tx_ring->size,
2631 &tx_ring->dma,
2632 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002633 set_dev_node(dev, orig_node);
2634 if (!tx_ring->desc)
2635 tx_ring->desc = dma_alloc_coherent(dev,
2636 tx_ring->size,
2637 &tx_ring->dma,
2638 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002639
2640 if (!tx_ring->desc)
2641 goto err;
2642
Auke Kok9d5c8242008-01-24 02:22:38 -08002643 tx_ring->next_to_use = 0;
2644 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002645
Auke Kok9d5c8242008-01-24 02:22:38 -08002646 return 0;
2647
2648err:
Alexander Duyck06034642011-08-26 07:44:22 +00002649 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002650 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002651 "Unable to allocate memory for the transmit descriptor ring\n");
2652 return -ENOMEM;
2653}
2654
2655/**
2656 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2657 * (Descriptors) for all queues
2658 * @adapter: board private structure
2659 *
2660 * Return 0 on success, negative on failure
2661 **/
2662static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2663{
Alexander Duyck439705e2009-10-27 23:49:20 +00002664 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 int i, err = 0;
2666
2667 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002668 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002669 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002670 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002671 "Allocation for Tx Queue %u failed\n", i);
2672 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002673 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002674 break;
2675 }
2676 }
2677
2678 return err;
2679}
2680
2681/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002682 * igb_setup_tctl - configure the transmit control registers
2683 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002684 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002685void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002686{
Auke Kok9d5c8242008-01-24 02:22:38 -08002687 struct e1000_hw *hw = &adapter->hw;
2688 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002689
Alexander Duyck85b430b2009-10-27 15:50:29 +00002690 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2691 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002692
2693 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002694 tctl = rd32(E1000_TCTL);
2695 tctl &= ~E1000_TCTL_CT;
2696 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2697 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2698
2699 igb_config_collision_dist(hw);
2700
Auke Kok9d5c8242008-01-24 02:22:38 -08002701 /* Enable transmits */
2702 tctl |= E1000_TCTL_EN;
2703
2704 wr32(E1000_TCTL, tctl);
2705}
2706
2707/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002708 * igb_configure_tx_ring - Configure transmit ring after Reset
2709 * @adapter: board private structure
2710 * @ring: tx ring to configure
2711 *
2712 * Configure a transmit ring after a reset.
2713 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002714void igb_configure_tx_ring(struct igb_adapter *adapter,
2715 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002716{
2717 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002718 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002719 u64 tdba = ring->dma;
2720 int reg_idx = ring->reg_idx;
2721
2722 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002723 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002724 wrfl();
2725 mdelay(10);
2726
2727 wr32(E1000_TDLEN(reg_idx),
2728 ring->count * sizeof(union e1000_adv_tx_desc));
2729 wr32(E1000_TDBAL(reg_idx),
2730 tdba & 0x00000000ffffffffULL);
2731 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2732
Alexander Duyckfce99e32009-10-27 15:51:27 +00002733 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002734 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002735 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002736
2737 txdctl |= IGB_TX_PTHRESH;
2738 txdctl |= IGB_TX_HTHRESH << 8;
2739 txdctl |= IGB_TX_WTHRESH << 16;
2740
2741 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2742 wr32(E1000_TXDCTL(reg_idx), txdctl);
2743}
2744
2745/**
2746 * igb_configure_tx - Configure transmit Unit after Reset
2747 * @adapter: board private structure
2748 *
2749 * Configure the Tx unit of the MAC after a reset.
2750 **/
2751static void igb_configure_tx(struct igb_adapter *adapter)
2752{
2753 int i;
2754
2755 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002756 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002757}
2758
2759/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002760 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002761 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2762 *
2763 * Returns 0 on success, negative on failure
2764 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002765int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002766{
Alexander Duyck59d71982010-04-27 13:09:25 +00002767 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002768 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002769 int size, desc_len;
2770
Alexander Duyck06034642011-08-26 07:44:22 +00002771 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002772 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2773 if (!rx_ring->rx_buffer_info)
2774 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002775 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002777
2778 desc_len = sizeof(union e1000_adv_rx_desc);
2779
2780 /* Round up to nearest 4K */
2781 rx_ring->size = rx_ring->count * desc_len;
2782 rx_ring->size = ALIGN(rx_ring->size, 4096);
2783
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002784 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002785 rx_ring->desc = dma_alloc_coherent(dev,
2786 rx_ring->size,
2787 &rx_ring->dma,
2788 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002789 set_dev_node(dev, orig_node);
2790 if (!rx_ring->desc)
2791 rx_ring->desc = dma_alloc_coherent(dev,
2792 rx_ring->size,
2793 &rx_ring->dma,
2794 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002795
2796 if (!rx_ring->desc)
2797 goto err;
2798
2799 rx_ring->next_to_clean = 0;
2800 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002801
Auke Kok9d5c8242008-01-24 02:22:38 -08002802 return 0;
2803
2804err:
Alexander Duyck06034642011-08-26 07:44:22 +00002805 vfree(rx_ring->rx_buffer_info);
2806 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002807 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2808 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002809 return -ENOMEM;
2810}
2811
2812/**
2813 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2814 * (Descriptors) for all queues
2815 * @adapter: board private structure
2816 *
2817 * Return 0 on success, negative on failure
2818 **/
2819static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2820{
Alexander Duyck439705e2009-10-27 23:49:20 +00002821 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002822 int i, err = 0;
2823
2824 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002825 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002826 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002827 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002828 "Allocation for Rx Queue %u failed\n", i);
2829 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002830 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002831 break;
2832 }
2833 }
2834
2835 return err;
2836}
2837
2838/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002839 * igb_setup_mrqc - configure the multiple receive queue control registers
2840 * @adapter: Board private structure
2841 **/
2842static void igb_setup_mrqc(struct igb_adapter *adapter)
2843{
2844 struct e1000_hw *hw = &adapter->hw;
2845 u32 mrqc, rxcsum;
2846 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2847 union e1000_reta {
2848 u32 dword;
2849 u8 bytes[4];
2850 } reta;
2851 static const u8 rsshash[40] = {
2852 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2853 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2854 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2855 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2856
2857 /* Fill out hash function seeds */
2858 for (j = 0; j < 10; j++) {
2859 u32 rsskey = rsshash[(j * 4)];
2860 rsskey |= rsshash[(j * 4) + 1] << 8;
2861 rsskey |= rsshash[(j * 4) + 2] << 16;
2862 rsskey |= rsshash[(j * 4) + 3] << 24;
2863 array_wr32(E1000_RSSRK(0), j, rsskey);
2864 }
2865
Alexander Duycka99955f2009-11-12 18:37:19 +00002866 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002867
2868 if (adapter->vfs_allocated_count) {
2869 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2870 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002871 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002872 case e1000_82580:
2873 num_rx_queues = 1;
2874 shift = 0;
2875 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002876 case e1000_82576:
2877 shift = 3;
2878 num_rx_queues = 2;
2879 break;
2880 case e1000_82575:
2881 shift = 2;
2882 shift2 = 6;
2883 default:
2884 break;
2885 }
2886 } else {
2887 if (hw->mac.type == e1000_82575)
2888 shift = 6;
2889 }
2890
2891 for (j = 0; j < (32 * 4); j++) {
2892 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2893 if (shift2)
2894 reta.bytes[j & 3] |= num_rx_queues << shift2;
2895 if ((j & 3) == 3)
2896 wr32(E1000_RETA(j >> 2), reta.dword);
2897 }
2898
2899 /*
2900 * Disable raw packet checksumming so that RSS hash is placed in
2901 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2902 * offloads as they are enabled by default
2903 */
2904 rxcsum = rd32(E1000_RXCSUM);
2905 rxcsum |= E1000_RXCSUM_PCSD;
2906
2907 if (adapter->hw.mac.type >= e1000_82576)
2908 /* Enable Receive Checksum Offload for SCTP */
2909 rxcsum |= E1000_RXCSUM_CRCOFL;
2910
2911 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2912 wr32(E1000_RXCSUM, rxcsum);
2913
2914 /* If VMDq is enabled then we set the appropriate mode for that, else
2915 * we default to RSS so that an RSS hash is calculated per packet even
2916 * if we are only using one queue */
2917 if (adapter->vfs_allocated_count) {
2918 if (hw->mac.type > e1000_82575) {
2919 /* Set the default pool for the PF's first queue */
2920 u32 vtctl = rd32(E1000_VT_CTL);
2921 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2922 E1000_VT_CTL_DISABLE_DEF_POOL);
2923 vtctl |= adapter->vfs_allocated_count <<
2924 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2925 wr32(E1000_VT_CTL, vtctl);
2926 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002927 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002928 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2929 else
2930 mrqc = E1000_MRQC_ENABLE_VMDQ;
2931 } else {
2932 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2933 }
2934 igb_vmm_control(adapter);
2935
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002936 /*
2937 * Generate RSS hash based on TCP port numbers and/or
2938 * IPv4/v6 src and dst addresses since UDP cannot be
2939 * hashed reliably due to IP fragmentation
2940 */
2941 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2942 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2943 E1000_MRQC_RSS_FIELD_IPV6 |
2944 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2945 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002946
2947 wr32(E1000_MRQC, mrqc);
2948}
2949
2950/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002951 * igb_setup_rctl - configure the receive control registers
2952 * @adapter: Board private structure
2953 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002954void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002955{
2956 struct e1000_hw *hw = &adapter->hw;
2957 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002958
2959 rctl = rd32(E1000_RCTL);
2960
2961 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002962 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002963
Alexander Duyck69d728b2008-11-25 01:04:03 -08002964 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002965 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002966
Auke Kok87cb7e82008-07-08 15:08:29 -07002967 /*
2968 * enable stripping of CRC. It's unlikely this will break BMC
2969 * redirection as it did with e1000. Newer features require
2970 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002971 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002972 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002973
Alexander Duyck559e9c42009-10-27 23:52:50 +00002974 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002975 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002976
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002977 /* enable LPE to prevent packets larger than max_frame_size */
2978 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002979
Alexander Duyck952f72a2009-10-27 15:51:07 +00002980 /* disable queue 0 to prevent tail write w/o re-config */
2981 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002982
Alexander Duycke1739522009-02-19 20:39:44 -08002983 /* Attention!!! For SR-IOV PF driver operations you must enable
2984 * queue drop for all VF and PF queues to prevent head of line blocking
2985 * if an un-trusted VF does not provide descriptors to hardware.
2986 */
2987 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002988 /* set all queue drop enable bits */
2989 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002990 }
2991
Auke Kok9d5c8242008-01-24 02:22:38 -08002992 wr32(E1000_RCTL, rctl);
2993}
2994
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002995static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2996 int vfn)
2997{
2998 struct e1000_hw *hw = &adapter->hw;
2999 u32 vmolr;
3000
3001 /* if it isn't the PF check to see if VFs are enabled and
3002 * increase the size to support vlan tags */
3003 if (vfn < adapter->vfs_allocated_count &&
3004 adapter->vf_data[vfn].vlans_enabled)
3005 size += VLAN_TAG_SIZE;
3006
3007 vmolr = rd32(E1000_VMOLR(vfn));
3008 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3009 vmolr |= size | E1000_VMOLR_LPE;
3010 wr32(E1000_VMOLR(vfn), vmolr);
3011
3012 return 0;
3013}
3014
Auke Kok9d5c8242008-01-24 02:22:38 -08003015/**
Alexander Duycke1739522009-02-19 20:39:44 -08003016 * igb_rlpml_set - set maximum receive packet size
3017 * @adapter: board private structure
3018 *
3019 * Configure maximum receivable packet size.
3020 **/
3021static void igb_rlpml_set(struct igb_adapter *adapter)
3022{
Alexander Duyck153285f2011-08-26 07:43:32 +00003023 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003024 struct e1000_hw *hw = &adapter->hw;
3025 u16 pf_id = adapter->vfs_allocated_count;
3026
Alexander Duycke1739522009-02-19 20:39:44 -08003027 if (pf_id) {
3028 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003029 /*
3030 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3031 * to our max jumbo frame size, in case we need to enable
3032 * jumbo frames on one of the rings later.
3033 * This will not pass over-length frames into the default
3034 * queue because it's gated by the VMOLR.RLPML.
3035 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003036 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003037 }
3038
3039 wr32(E1000_RLPML, max_frame_size);
3040}
3041
Williams, Mitch A8151d292010-02-10 01:44:24 +00003042static inline void igb_set_vmolr(struct igb_adapter *adapter,
3043 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003044{
3045 struct e1000_hw *hw = &adapter->hw;
3046 u32 vmolr;
3047
3048 /*
3049 * This register exists only on 82576 and newer so if we are older then
3050 * we should exit and do nothing
3051 */
3052 if (hw->mac.type < e1000_82576)
3053 return;
3054
3055 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003056 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3057 if (aupe)
3058 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3059 else
3060 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003061
3062 /* clear all bits that might not be set */
3063 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3064
Alexander Duycka99955f2009-11-12 18:37:19 +00003065 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003066 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3067 /*
3068 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3069 * multicast packets
3070 */
3071 if (vfn <= adapter->vfs_allocated_count)
3072 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3073
3074 wr32(E1000_VMOLR(vfn), vmolr);
3075}
3076
Alexander Duycke1739522009-02-19 20:39:44 -08003077/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003078 * igb_configure_rx_ring - Configure a receive ring after Reset
3079 * @adapter: board private structure
3080 * @ring: receive ring to be configured
3081 *
3082 * Configure the Rx unit of the MAC after a reset.
3083 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003084void igb_configure_rx_ring(struct igb_adapter *adapter,
3085 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003086{
3087 struct e1000_hw *hw = &adapter->hw;
3088 u64 rdba = ring->dma;
3089 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003090 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003091
3092 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003093 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003094
3095 /* Set DMA base address registers */
3096 wr32(E1000_RDBAL(reg_idx),
3097 rdba & 0x00000000ffffffffULL);
3098 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3099 wr32(E1000_RDLEN(reg_idx),
3100 ring->count * sizeof(union e1000_adv_rx_desc));
3101
3102 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003103 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003104 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003105 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003106
Alexander Duyck952f72a2009-10-27 15:51:07 +00003107 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003108 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003109#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003110 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003111#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003112 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003113#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003114 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Nick Nunley757b77e2010-03-26 11:36:47 +00003115 if (hw->mac.type == e1000_82580)
3116 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003117 /* Only set Drop Enable if we are supporting multiple queues */
3118 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3119 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003120
3121 wr32(E1000_SRRCTL(reg_idx), srrctl);
3122
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003123 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003124 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003125
Alexander Duyck85b430b2009-10-27 15:50:29 +00003126 rxdctl |= IGB_RX_PTHRESH;
3127 rxdctl |= IGB_RX_HTHRESH << 8;
3128 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003129
3130 /* enable receive descriptor fetching */
3131 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003132 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3133}
3134
3135/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003136 * igb_configure_rx - Configure receive Unit after Reset
3137 * @adapter: board private structure
3138 *
3139 * Configure the Rx unit of the MAC after a reset.
3140 **/
3141static void igb_configure_rx(struct igb_adapter *adapter)
3142{
Hannes Eder91075842009-02-18 19:36:04 -08003143 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003144
Alexander Duyck68d480c2009-10-05 06:33:08 +00003145 /* set UTA to appropriate mode */
3146 igb_set_uta(adapter);
3147
Alexander Duyck26ad9172009-10-05 06:32:49 +00003148 /* set the correct pool for the PF default MAC address in entry 0 */
3149 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3150 adapter->vfs_allocated_count);
3151
Alexander Duyck06cf2662009-10-27 15:53:25 +00003152 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3153 * the Base and Length of the Rx Descriptor Ring */
3154 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003155 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003156}
3157
3158/**
3159 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003160 * @tx_ring: Tx descriptor ring for a specific queue
3161 *
3162 * Free all transmit software resources
3163 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003164void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003165{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003166 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003167
Alexander Duyck06034642011-08-26 07:44:22 +00003168 vfree(tx_ring->tx_buffer_info);
3169 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003170
Alexander Duyck439705e2009-10-27 23:49:20 +00003171 /* if not set, then don't free */
3172 if (!tx_ring->desc)
3173 return;
3174
Alexander Duyck59d71982010-04-27 13:09:25 +00003175 dma_free_coherent(tx_ring->dev, tx_ring->size,
3176 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003177
3178 tx_ring->desc = NULL;
3179}
3180
3181/**
3182 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3183 * @adapter: board private structure
3184 *
3185 * Free all transmit software resources
3186 **/
3187static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3188{
3189 int i;
3190
3191 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003192 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003193}
3194
Alexander Duyckebe42d12011-08-26 07:45:09 +00003195void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3196 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003197{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003198 if (tx_buffer->skb) {
3199 dev_kfree_skb_any(tx_buffer->skb);
3200 if (tx_buffer->dma)
3201 dma_unmap_single(ring->dev,
3202 tx_buffer->dma,
3203 tx_buffer->length,
3204 DMA_TO_DEVICE);
3205 } else if (tx_buffer->dma) {
3206 dma_unmap_page(ring->dev,
3207 tx_buffer->dma,
3208 tx_buffer->length,
3209 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003210 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003211 tx_buffer->next_to_watch = NULL;
3212 tx_buffer->skb = NULL;
3213 tx_buffer->dma = 0;
3214 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003215}
3216
3217/**
3218 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003219 * @tx_ring: ring to be cleaned
3220 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003221static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003222{
Alexander Duyck06034642011-08-26 07:44:22 +00003223 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003224 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003225 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003226
Alexander Duyck06034642011-08-26 07:44:22 +00003227 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003228 return;
3229 /* Free all the Tx ring sk_buffs */
3230
3231 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003232 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003233 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003234 }
3235
Alexander Duyck06034642011-08-26 07:44:22 +00003236 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3237 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003238
3239 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003240 memset(tx_ring->desc, 0, tx_ring->size);
3241
3242 tx_ring->next_to_use = 0;
3243 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003244}
3245
3246/**
3247 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3248 * @adapter: board private structure
3249 **/
3250static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3251{
3252 int i;
3253
3254 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003255 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003256}
3257
3258/**
3259 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003260 * @rx_ring: ring to clean the resources from
3261 *
3262 * Free all receive software resources
3263 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003264void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003265{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003266 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003267
Alexander Duyck06034642011-08-26 07:44:22 +00003268 vfree(rx_ring->rx_buffer_info);
3269 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003270
Alexander Duyck439705e2009-10-27 23:49:20 +00003271 /* if not set, then don't free */
3272 if (!rx_ring->desc)
3273 return;
3274
Alexander Duyck59d71982010-04-27 13:09:25 +00003275 dma_free_coherent(rx_ring->dev, rx_ring->size,
3276 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003277
3278 rx_ring->desc = NULL;
3279}
3280
3281/**
3282 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3283 * @adapter: board private structure
3284 *
3285 * Free all receive software resources
3286 **/
3287static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3288{
3289 int i;
3290
3291 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003292 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003293}
3294
3295/**
3296 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003297 * @rx_ring: ring to free buffers from
3298 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003299static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003300{
Auke Kok9d5c8242008-01-24 02:22:38 -08003301 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003302 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003303
Alexander Duyck06034642011-08-26 07:44:22 +00003304 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003305 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003306
Auke Kok9d5c8242008-01-24 02:22:38 -08003307 /* Free all the Rx ring sk_buffs */
3308 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003309 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003310 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003311 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003312 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003313 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003314 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003315 buffer_info->dma = 0;
3316 }
3317
3318 if (buffer_info->skb) {
3319 dev_kfree_skb(buffer_info->skb);
3320 buffer_info->skb = NULL;
3321 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003322 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003323 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003324 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003325 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003326 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003327 buffer_info->page_dma = 0;
3328 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003329 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003330 put_page(buffer_info->page);
3331 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003332 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003333 }
3334 }
3335
Alexander Duyck06034642011-08-26 07:44:22 +00003336 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3337 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003338
3339 /* Zero out the descriptor ring */
3340 memset(rx_ring->desc, 0, rx_ring->size);
3341
3342 rx_ring->next_to_clean = 0;
3343 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003344}
3345
3346/**
3347 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3348 * @adapter: board private structure
3349 **/
3350static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3351{
3352 int i;
3353
3354 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003355 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003356}
3357
3358/**
3359 * igb_set_mac - Change the Ethernet Address of the NIC
3360 * @netdev: network interface device structure
3361 * @p: pointer to an address structure
3362 *
3363 * Returns 0 on success, negative on failure
3364 **/
3365static int igb_set_mac(struct net_device *netdev, void *p)
3366{
3367 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003368 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003369 struct sockaddr *addr = p;
3370
3371 if (!is_valid_ether_addr(addr->sa_data))
3372 return -EADDRNOTAVAIL;
3373
3374 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003375 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003376
Alexander Duyck26ad9172009-10-05 06:32:49 +00003377 /* set the correct pool for the new PF MAC address in entry 0 */
3378 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3379 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003380
Auke Kok9d5c8242008-01-24 02:22:38 -08003381 return 0;
3382}
3383
3384/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003385 * igb_write_mc_addr_list - write multicast addresses to MTA
3386 * @netdev: network interface device structure
3387 *
3388 * Writes multicast address list to the MTA hash table.
3389 * Returns: -ENOMEM on failure
3390 * 0 on no addresses written
3391 * X on writing X addresses to MTA
3392 **/
3393static int igb_write_mc_addr_list(struct net_device *netdev)
3394{
3395 struct igb_adapter *adapter = netdev_priv(netdev);
3396 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003397 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003398 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003399 int i;
3400
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003401 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003402 /* nothing to program, so clear mc list */
3403 igb_update_mc_addr_list(hw, NULL, 0);
3404 igb_restore_vf_multicasts(adapter);
3405 return 0;
3406 }
3407
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003408 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003409 if (!mta_list)
3410 return -ENOMEM;
3411
Alexander Duyck68d480c2009-10-05 06:33:08 +00003412 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003413 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003414 netdev_for_each_mc_addr(ha, netdev)
3415 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003416
Alexander Duyck68d480c2009-10-05 06:33:08 +00003417 igb_update_mc_addr_list(hw, mta_list, i);
3418 kfree(mta_list);
3419
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003420 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003421}
3422
3423/**
3424 * igb_write_uc_addr_list - write unicast addresses to RAR table
3425 * @netdev: network interface device structure
3426 *
3427 * Writes unicast address list to the RAR table.
3428 * Returns: -ENOMEM on failure/insufficient address space
3429 * 0 on no addresses written
3430 * X on writing X addresses to the RAR table
3431 **/
3432static int igb_write_uc_addr_list(struct net_device *netdev)
3433{
3434 struct igb_adapter *adapter = netdev_priv(netdev);
3435 struct e1000_hw *hw = &adapter->hw;
3436 unsigned int vfn = adapter->vfs_allocated_count;
3437 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3438 int count = 0;
3439
3440 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003441 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003442 return -ENOMEM;
3443
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003444 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003445 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003446
3447 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003448 if (!rar_entries)
3449 break;
3450 igb_rar_set_qsel(adapter, ha->addr,
3451 rar_entries--,
3452 vfn);
3453 count++;
3454 }
3455 }
3456 /* write the addresses in reverse order to avoid write combining */
3457 for (; rar_entries > 0 ; rar_entries--) {
3458 wr32(E1000_RAH(rar_entries), 0);
3459 wr32(E1000_RAL(rar_entries), 0);
3460 }
3461 wrfl();
3462
3463 return count;
3464}
3465
3466/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003467 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003468 * @netdev: network interface device structure
3469 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003470 * The set_rx_mode entry point is called whenever the unicast or multicast
3471 * address lists or the network interface flags are updated. This routine is
3472 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003473 * promiscuous mode, and all-multi behavior.
3474 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003475static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003476{
3477 struct igb_adapter *adapter = netdev_priv(netdev);
3478 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003479 unsigned int vfn = adapter->vfs_allocated_count;
3480 u32 rctl, vmolr = 0;
3481 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003482
3483 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003484 rctl = rd32(E1000_RCTL);
3485
Alexander Duyck68d480c2009-10-05 06:33:08 +00003486 /* clear the effected bits */
3487 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3488
Patrick McHardy746b9f02008-07-16 20:15:45 -07003489 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003490 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003491 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003492 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003493 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003494 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003495 vmolr |= E1000_VMOLR_MPME;
3496 } else {
3497 /*
3498 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003499 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003500 * that we can at least receive multicast traffic
3501 */
3502 count = igb_write_mc_addr_list(netdev);
3503 if (count < 0) {
3504 rctl |= E1000_RCTL_MPE;
3505 vmolr |= E1000_VMOLR_MPME;
3506 } else if (count) {
3507 vmolr |= E1000_VMOLR_ROMPE;
3508 }
3509 }
3510 /*
3511 * Write addresses to available RAR registers, if there is not
3512 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003513 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003514 */
3515 count = igb_write_uc_addr_list(netdev);
3516 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003517 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003518 vmolr |= E1000_VMOLR_ROPE;
3519 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003520 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003521 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003522 wr32(E1000_RCTL, rctl);
3523
Alexander Duyck68d480c2009-10-05 06:33:08 +00003524 /*
3525 * In order to support SR-IOV and eventually VMDq it is necessary to set
3526 * the VMOLR to enable the appropriate modes. Without this workaround
3527 * we will have issues with VLAN tag stripping not being done for frames
3528 * that are only arriving because we are the default pool
3529 */
3530 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003531 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003532
Alexander Duyck68d480c2009-10-05 06:33:08 +00003533 vmolr |= rd32(E1000_VMOLR(vfn)) &
3534 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3535 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003536 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003537}
3538
Greg Rose13800462010-11-06 02:08:26 +00003539static void igb_check_wvbr(struct igb_adapter *adapter)
3540{
3541 struct e1000_hw *hw = &adapter->hw;
3542 u32 wvbr = 0;
3543
3544 switch (hw->mac.type) {
3545 case e1000_82576:
3546 case e1000_i350:
3547 if (!(wvbr = rd32(E1000_WVBR)))
3548 return;
3549 break;
3550 default:
3551 break;
3552 }
3553
3554 adapter->wvbr |= wvbr;
3555}
3556
3557#define IGB_STAGGERED_QUEUE_OFFSET 8
3558
3559static void igb_spoof_check(struct igb_adapter *adapter)
3560{
3561 int j;
3562
3563 if (!adapter->wvbr)
3564 return;
3565
3566 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3567 if (adapter->wvbr & (1 << j) ||
3568 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3569 dev_warn(&adapter->pdev->dev,
3570 "Spoof event(s) detected on VF %d\n", j);
3571 adapter->wvbr &=
3572 ~((1 << j) |
3573 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3574 }
3575 }
3576}
3577
Auke Kok9d5c8242008-01-24 02:22:38 -08003578/* Need to wait a few seconds after link up to get diagnostic information from
3579 * the phy */
3580static void igb_update_phy_info(unsigned long data)
3581{
3582 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003583 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003584}
3585
3586/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003587 * igb_has_link - check shared code for link and determine up/down
3588 * @adapter: pointer to driver private info
3589 **/
Nick Nunley31455352010-02-17 01:01:21 +00003590bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003591{
3592 struct e1000_hw *hw = &adapter->hw;
3593 bool link_active = false;
3594 s32 ret_val = 0;
3595
3596 /* get_link_status is set on LSC (link status) interrupt or
3597 * rx sequence error interrupt. get_link_status will stay
3598 * false until the e1000_check_for_link establishes link
3599 * for copper adapters ONLY
3600 */
3601 switch (hw->phy.media_type) {
3602 case e1000_media_type_copper:
3603 if (hw->mac.get_link_status) {
3604 ret_val = hw->mac.ops.check_for_link(hw);
3605 link_active = !hw->mac.get_link_status;
3606 } else {
3607 link_active = true;
3608 }
3609 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003610 case e1000_media_type_internal_serdes:
3611 ret_val = hw->mac.ops.check_for_link(hw);
3612 link_active = hw->mac.serdes_has_link;
3613 break;
3614 default:
3615 case e1000_media_type_unknown:
3616 break;
3617 }
3618
3619 return link_active;
3620}
3621
Stefan Assmann563988d2011-04-05 04:27:15 +00003622static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3623{
3624 bool ret = false;
3625 u32 ctrl_ext, thstat;
3626
3627 /* check for thermal sensor event on i350, copper only */
3628 if (hw->mac.type == e1000_i350) {
3629 thstat = rd32(E1000_THSTAT);
3630 ctrl_ext = rd32(E1000_CTRL_EXT);
3631
3632 if ((hw->phy.media_type == e1000_media_type_copper) &&
3633 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3634 ret = !!(thstat & event);
3635 }
3636 }
3637
3638 return ret;
3639}
3640
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003641/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003642 * igb_watchdog - Timer Call-back
3643 * @data: pointer to adapter cast into an unsigned long
3644 **/
3645static void igb_watchdog(unsigned long data)
3646{
3647 struct igb_adapter *adapter = (struct igb_adapter *)data;
3648 /* Do the rest outside of interrupt context */
3649 schedule_work(&adapter->watchdog_task);
3650}
3651
3652static void igb_watchdog_task(struct work_struct *work)
3653{
3654 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003655 struct igb_adapter,
3656 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003657 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003658 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003659 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003660 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003661
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003662 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003663 if (link) {
3664 if (!netif_carrier_ok(netdev)) {
3665 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003666 hw->mac.ops.get_speed_and_duplex(hw,
3667 &adapter->link_speed,
3668 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003669
3670 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003671 /* Links status message must follow this format */
3672 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003673 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003674 netdev->name,
3675 adapter->link_speed,
3676 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003677 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003678 ((ctrl & E1000_CTRL_TFCE) &&
3679 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3680 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3681 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003682
Stefan Assmann563988d2011-04-05 04:27:15 +00003683 /* check for thermal sensor event */
3684 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3685 printk(KERN_INFO "igb: %s The network adapter "
3686 "link speed was downshifted "
3687 "because it overheated.\n",
3688 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003689 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003690
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003691 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003692 adapter->tx_timeout_factor = 1;
3693 switch (adapter->link_speed) {
3694 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003695 adapter->tx_timeout_factor = 14;
3696 break;
3697 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003698 /* maybe add some timeout factor ? */
3699 break;
3700 }
3701
3702 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003703
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003704 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003705 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003706
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003707 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003708 if (!test_bit(__IGB_DOWN, &adapter->state))
3709 mod_timer(&adapter->phy_info_timer,
3710 round_jiffies(jiffies + 2 * HZ));
3711 }
3712 } else {
3713 if (netif_carrier_ok(netdev)) {
3714 adapter->link_speed = 0;
3715 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003716
3717 /* check for thermal sensor event */
3718 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3719 printk(KERN_ERR "igb: %s The network adapter "
3720 "was stopped because it "
3721 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003722 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003723 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003724
Alexander Duyck527d47c2008-11-27 00:21:39 -08003725 /* Links status message must follow this format */
3726 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3727 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003728 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003729
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003730 igb_ping_all_vfs(adapter);
3731
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003732 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003733 if (!test_bit(__IGB_DOWN, &adapter->state))
3734 mod_timer(&adapter->phy_info_timer,
3735 round_jiffies(jiffies + 2 * HZ));
3736 }
3737 }
3738
Eric Dumazet12dcd862010-10-15 17:27:10 +00003739 spin_lock(&adapter->stats64_lock);
3740 igb_update_stats(adapter, &adapter->stats64);
3741 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003742
Alexander Duyckdbabb062009-11-12 18:38:16 +00003743 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003744 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003745 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003746 /* We've lost link, so the controller stops DMA,
3747 * but we've got queued Tx work that's never going
3748 * to get done, so reset controller to flush Tx.
3749 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003750 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3751 adapter->tx_timeout_count++;
3752 schedule_work(&adapter->reset_task);
3753 /* return immediately since reset is imminent */
3754 return;
3755 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003756 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003757
Alexander Duyckdbabb062009-11-12 18:38:16 +00003758 /* Force detection of hung controller every watchdog period */
3759 tx_ring->detect_tx_hung = true;
3760 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003761
Auke Kok9d5c8242008-01-24 02:22:38 -08003762 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003763 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003764 u32 eics = 0;
3765 for (i = 0; i < adapter->num_q_vectors; i++) {
3766 struct igb_q_vector *q_vector = adapter->q_vector[i];
3767 eics |= q_vector->eims_value;
3768 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003769 wr32(E1000_EICS, eics);
3770 } else {
3771 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3772 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003773
Greg Rose13800462010-11-06 02:08:26 +00003774 igb_spoof_check(adapter);
3775
Auke Kok9d5c8242008-01-24 02:22:38 -08003776 /* Reset the timer */
3777 if (!test_bit(__IGB_DOWN, &adapter->state))
3778 mod_timer(&adapter->watchdog_timer,
3779 round_jiffies(jiffies + 2 * HZ));
3780}
3781
3782enum latency_range {
3783 lowest_latency = 0,
3784 low_latency = 1,
3785 bulk_latency = 2,
3786 latency_invalid = 255
3787};
3788
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003789/**
3790 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3791 *
3792 * Stores a new ITR value based on strictly on packet size. This
3793 * algorithm is less sophisticated than that used in igb_update_itr,
3794 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003795 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003796 * were determined based on theoretical maximum wire speed and testing
3797 * data, in order to minimize response time while increasing bulk
3798 * throughput.
3799 * This functionality is controlled by the InterruptThrottleRate module
3800 * parameter (see igb_param.c)
3801 * NOTE: This function is called only when operating in a multiqueue
3802 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003803 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003804 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003805static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003806{
Alexander Duyck047e0032009-10-27 15:49:27 +00003807 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003808 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003809 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003810 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003811
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003812 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3813 * ints/sec - ITR timer value of 120 ticks.
3814 */
3815 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003816 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003817 goto set_itr_val;
3818 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003819
Alexander Duyck0ba82992011-08-26 07:45:47 +00003820 packets = q_vector->rx.total_packets;
3821 if (packets)
3822 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003823
Alexander Duyck0ba82992011-08-26 07:45:47 +00003824 packets = q_vector->tx.total_packets;
3825 if (packets)
3826 avg_wire_size = max_t(u32, avg_wire_size,
3827 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003828
3829 /* if avg_wire_size isn't set no work was done */
3830 if (!avg_wire_size)
3831 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003832
3833 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3834 avg_wire_size += 24;
3835
3836 /* Don't starve jumbo frames */
3837 avg_wire_size = min(avg_wire_size, 3000);
3838
3839 /* Give a little boost to mid-size frames */
3840 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3841 new_val = avg_wire_size / 3;
3842 else
3843 new_val = avg_wire_size / 2;
3844
Alexander Duyck0ba82992011-08-26 07:45:47 +00003845 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3846 if (new_val < IGB_20K_ITR &&
3847 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3848 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3849 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003850
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003851set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003852 if (new_val != q_vector->itr_val) {
3853 q_vector->itr_val = new_val;
3854 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003855 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003856clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003857 q_vector->rx.total_bytes = 0;
3858 q_vector->rx.total_packets = 0;
3859 q_vector->tx.total_bytes = 0;
3860 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003861}
3862
3863/**
3864 * igb_update_itr - update the dynamic ITR value based on statistics
3865 * Stores a new ITR value based on packets and byte
3866 * counts during the last interrupt. The advantage of per interrupt
3867 * computation is faster updates and more accurate ITR for the current
3868 * traffic pattern. Constants in this function were computed
3869 * based on theoretical maximum wire speed and thresholds were set based
3870 * on testing data as well as attempting to minimize response time
3871 * while increasing bulk throughput.
3872 * this functionality is controlled by the InterruptThrottleRate module
3873 * parameter (see igb_param.c)
3874 * NOTE: These calculations are only valid when operating in a single-
3875 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003876 * @q_vector: pointer to q_vector
3877 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003878 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003879static void igb_update_itr(struct igb_q_vector *q_vector,
3880 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003881{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003882 unsigned int packets = ring_container->total_packets;
3883 unsigned int bytes = ring_container->total_bytes;
3884 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003885
Alexander Duyck0ba82992011-08-26 07:45:47 +00003886 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003887 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003888 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003889
Alexander Duyck0ba82992011-08-26 07:45:47 +00003890 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003891 case lowest_latency:
3892 /* handle TSO and jumbo frames */
3893 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003894 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003895 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003896 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 break;
3898 case low_latency: /* 50 usec aka 20000 ints/s */
3899 if (bytes > 10000) {
3900 /* this if handles the TSO accounting */
3901 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003902 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003903 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003904 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003905 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003906 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003907 }
3908 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 }
3913 break;
3914 case bulk_latency: /* 250 usec aka 4000 ints/s */
3915 if (bytes > 25000) {
3916 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003917 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003918 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003920 }
3921 break;
3922 }
3923
Alexander Duyck0ba82992011-08-26 07:45:47 +00003924 /* clear work counters since we have the values we need */
3925 ring_container->total_bytes = 0;
3926 ring_container->total_packets = 0;
3927
3928 /* write updated itr to ring container */
3929 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003930}
3931
Alexander Duyck0ba82992011-08-26 07:45:47 +00003932static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003933{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003934 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003935 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003936 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003937
3938 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3939 if (adapter->link_speed != SPEED_1000) {
3940 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003941 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003942 goto set_itr_now;
3943 }
3944
Alexander Duyck0ba82992011-08-26 07:45:47 +00003945 igb_update_itr(q_vector, &q_vector->tx);
3946 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003947
Alexander Duyck0ba82992011-08-26 07:45:47 +00003948 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003949
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003950 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003951 if (current_itr == lowest_latency &&
3952 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3953 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003954 current_itr = low_latency;
3955
Auke Kok9d5c8242008-01-24 02:22:38 -08003956 switch (current_itr) {
3957 /* counts and packets in update_itr are dependent on these numbers */
3958 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003959 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003960 break;
3961 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003962 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003963 break;
3964 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003965 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003966 break;
3967 default:
3968 break;
3969 }
3970
3971set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003972 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003973 /* this attempts to bias the interrupt rate towards Bulk
3974 * by adding intermediate steps when interrupt rate is
3975 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003976 new_itr = new_itr > q_vector->itr_val ?
3977 max((new_itr * q_vector->itr_val) /
3978 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003979 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003980 new_itr;
3981 /* Don't write the value here; it resets the adapter's
3982 * internal timer, and causes us to delay far longer than
3983 * we should between interrupts. Instead, we write the ITR
3984 * value at the beginning of the next interrupt so the timing
3985 * ends up being correct.
3986 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003987 q_vector->itr_val = new_itr;
3988 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003989 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003990}
3991
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003992void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3993 u32 type_tucmd, u32 mss_l4len_idx)
3994{
3995 struct e1000_adv_tx_context_desc *context_desc;
3996 u16 i = tx_ring->next_to_use;
3997
3998 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3999
4000 i++;
4001 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4002
4003 /* set bits to identify this as an advanced context descriptor */
4004 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4005
4006 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004007 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004008 mss_l4len_idx |= tx_ring->reg_idx << 4;
4009
4010 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4011 context_desc->seqnum_seed = 0;
4012 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4013 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4014}
4015
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004016static int igb_tso(struct igb_ring *tx_ring,
4017 struct igb_tx_buffer *first,
4018 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004019{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004020 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004021 u32 vlan_macip_lens, type_tucmd;
4022 u32 mss_l4len_idx, l4len;
4023
4024 if (!skb_is_gso(skb))
4025 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004026
4027 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004028 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004029 if (err)
4030 return err;
4031 }
4032
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004033 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4034 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004035
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004036 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004037 struct iphdr *iph = ip_hdr(skb);
4038 iph->tot_len = 0;
4039 iph->check = 0;
4040 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4041 iph->daddr, 0,
4042 IPPROTO_TCP,
4043 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004044 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004045 first->tx_flags |= IGB_TX_FLAGS_TSO |
4046 IGB_TX_FLAGS_CSUM |
4047 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004048 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004049 ipv6_hdr(skb)->payload_len = 0;
4050 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4051 &ipv6_hdr(skb)->daddr,
4052 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004053 first->tx_flags |= IGB_TX_FLAGS_TSO |
4054 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004055 }
4056
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004057 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004058 l4len = tcp_hdrlen(skb);
4059 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004060
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004061 /* update gso size and bytecount with header size */
4062 first->gso_segs = skb_shinfo(skb)->gso_segs;
4063 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4064
Auke Kok9d5c8242008-01-24 02:22:38 -08004065 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004066 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4067 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004068
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004069 /* VLAN MACLEN IPLEN */
4070 vlan_macip_lens = skb_network_header_len(skb);
4071 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004072 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004073
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004074 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004075
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004076 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004077}
4078
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004079static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004080{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004081 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004082 u32 vlan_macip_lens = 0;
4083 u32 mss_l4len_idx = 0;
4084 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004085
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004086 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004087 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4088 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004089 } else {
4090 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004091 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004092 case __constant_htons(ETH_P_IP):
4093 vlan_macip_lens |= skb_network_header_len(skb);
4094 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4095 l4_hdr = ip_hdr(skb)->protocol;
4096 break;
4097 case __constant_htons(ETH_P_IPV6):
4098 vlan_macip_lens |= skb_network_header_len(skb);
4099 l4_hdr = ipv6_hdr(skb)->nexthdr;
4100 break;
4101 default:
4102 if (unlikely(net_ratelimit())) {
4103 dev_warn(tx_ring->dev,
4104 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004105 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004106 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004107 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004108 }
4109
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004110 switch (l4_hdr) {
4111 case IPPROTO_TCP:
4112 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4113 mss_l4len_idx = tcp_hdrlen(skb) <<
4114 E1000_ADVTXD_L4LEN_SHIFT;
4115 break;
4116 case IPPROTO_SCTP:
4117 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4118 mss_l4len_idx = sizeof(struct sctphdr) <<
4119 E1000_ADVTXD_L4LEN_SHIFT;
4120 break;
4121 case IPPROTO_UDP:
4122 mss_l4len_idx = sizeof(struct udphdr) <<
4123 E1000_ADVTXD_L4LEN_SHIFT;
4124 break;
4125 default:
4126 if (unlikely(net_ratelimit())) {
4127 dev_warn(tx_ring->dev,
4128 "partial checksum but l4 proto=%x!\n",
4129 l4_hdr);
4130 }
4131 break;
4132 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004133
4134 /* update TX checksum flag */
4135 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004136 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004137
4138 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004139 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004140
4141 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004142}
4143
Alexander Duycke032afc2011-08-26 07:44:48 +00004144static __le32 igb_tx_cmd_type(u32 tx_flags)
4145{
4146 /* set type for advanced descriptor with frame checksum insertion */
4147 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4148 E1000_ADVTXD_DCMD_IFCS |
4149 E1000_ADVTXD_DCMD_DEXT);
4150
4151 /* set HW vlan bit if vlan is present */
4152 if (tx_flags & IGB_TX_FLAGS_VLAN)
4153 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4154
4155 /* set timestamp bit if present */
4156 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4157 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4158
4159 /* set segmentation bits for TSO */
4160 if (tx_flags & IGB_TX_FLAGS_TSO)
4161 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4162
4163 return cmd_type;
4164}
4165
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004166static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4167 union e1000_adv_tx_desc *tx_desc,
4168 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004169{
4170 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4171
4172 /* 82575 requires a unique index per ring if any offload is enabled */
4173 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004174 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004175 olinfo_status |= tx_ring->reg_idx << 4;
4176
4177 /* insert L4 checksum */
4178 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4179 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4180
4181 /* insert IPv4 checksum */
4182 if (tx_flags & IGB_TX_FLAGS_IPV4)
4183 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4184 }
4185
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004186 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004187}
4188
Alexander Duyckebe42d12011-08-26 07:45:09 +00004189/*
4190 * The largest size we can write to the descriptor is 65535. In order to
4191 * maintain a power of two alignment we have to limit ourselves to 32K.
4192 */
4193#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004194#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004195
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004196static void igb_tx_map(struct igb_ring *tx_ring,
4197 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004198 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004199{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004200 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004201 struct igb_tx_buffer *tx_buffer_info;
4202 union e1000_adv_tx_desc *tx_desc;
4203 dma_addr_t dma;
4204 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4205 unsigned int data_len = skb->data_len;
4206 unsigned int size = skb_headlen(skb);
4207 unsigned int paylen = skb->len - hdr_len;
4208 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004209 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004210 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004211
4212 tx_desc = IGB_TX_DESC(tx_ring, i);
4213
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004214 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004215 cmd_type = igb_tx_cmd_type(tx_flags);
4216
4217 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4218 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004219 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004220
Alexander Duyckebe42d12011-08-26 07:45:09 +00004221 /* record length, and DMA address */
4222 first->length = size;
4223 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004224 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004225
Alexander Duyckebe42d12011-08-26 07:45:09 +00004226 for (;;) {
4227 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4228 tx_desc->read.cmd_type_len =
4229 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004230
Alexander Duyckebe42d12011-08-26 07:45:09 +00004231 i++;
4232 tx_desc++;
4233 if (i == tx_ring->count) {
4234 tx_desc = IGB_TX_DESC(tx_ring, 0);
4235 i = 0;
4236 }
4237
4238 dma += IGB_MAX_DATA_PER_TXD;
4239 size -= IGB_MAX_DATA_PER_TXD;
4240
4241 tx_desc->read.olinfo_status = 0;
4242 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4243 }
4244
4245 if (likely(!data_len))
4246 break;
4247
4248 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4249
Alexander Duyck65689fe2009-03-20 00:17:43 +00004250 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004251 tx_desc++;
4252 if (i == tx_ring->count) {
4253 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004254 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004255 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004256
Alexander Duyckebe42d12011-08-26 07:45:09 +00004257 size = frag->size;
4258 data_len -= size;
4259
4260 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4261 size, DMA_TO_DEVICE);
4262 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004263 goto dma_error;
4264
Alexander Duyckebe42d12011-08-26 07:45:09 +00004265 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4266 tx_buffer_info->length = size;
4267 tx_buffer_info->dma = dma;
4268
4269 tx_desc->read.olinfo_status = 0;
4270 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4271
4272 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004273 }
4274
Alexander Duyckebe42d12011-08-26 07:45:09 +00004275 /* write last descriptor with RS and EOP bits */
4276 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4277 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004278
4279 /* set the timestamp */
4280 first->time_stamp = jiffies;
4281
Alexander Duyckebe42d12011-08-26 07:45:09 +00004282 /*
4283 * Force memory writes to complete before letting h/w know there
4284 * are new descriptors to fetch. (Only applicable for weak-ordered
4285 * memory model archs, such as IA-64).
4286 *
4287 * We also need this memory barrier to make certain all of the
4288 * status bits have been updated before next_to_watch is written.
4289 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004290 wmb();
4291
Alexander Duyckebe42d12011-08-26 07:45:09 +00004292 /* set next_to_watch value indicating a packet is present */
4293 first->next_to_watch = tx_desc;
4294
4295 i++;
4296 if (i == tx_ring->count)
4297 i = 0;
4298
Auke Kok9d5c8242008-01-24 02:22:38 -08004299 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004300
Alexander Duyckfce99e32009-10-27 15:51:27 +00004301 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004302
Auke Kok9d5c8242008-01-24 02:22:38 -08004303 /* we need this if more than one processor can write to our tail
4304 * at a time, it syncronizes IO on IA64/Altix systems */
4305 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004306
4307 return;
4308
4309dma_error:
4310 dev_err(tx_ring->dev, "TX DMA map failed\n");
4311
4312 /* clear dma mappings for failed tx_buffer_info map */
4313 for (;;) {
4314 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4315 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4316 if (tx_buffer_info == first)
4317 break;
4318 if (i == 0)
4319 i = tx_ring->count;
4320 i--;
4321 }
4322
4323 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004324}
4325
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004326static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004327{
Alexander Duycke694e962009-10-27 15:53:06 +00004328 struct net_device *netdev = tx_ring->netdev;
4329
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004330 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004331
Auke Kok9d5c8242008-01-24 02:22:38 -08004332 /* Herbert's original patch had:
4333 * smp_mb__after_netif_stop_queue();
4334 * but since that doesn't exist yet, just open code it. */
4335 smp_mb();
4336
4337 /* We need to check again in a case another CPU has just
4338 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004339 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004340 return -EBUSY;
4341
4342 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004343 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004344
4345 u64_stats_update_begin(&tx_ring->tx_syncp2);
4346 tx_ring->tx_stats.restart_queue2++;
4347 u64_stats_update_end(&tx_ring->tx_syncp2);
4348
Auke Kok9d5c8242008-01-24 02:22:38 -08004349 return 0;
4350}
4351
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004352static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004353{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004354 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004355 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004356 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004357}
4358
Alexander Duyckcd392f52011-08-26 07:43:59 +00004359netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4360 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004361{
Alexander Duyck8542db02011-08-26 07:44:43 +00004362 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004363 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004364 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004365 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004366 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004367
Auke Kok9d5c8242008-01-24 02:22:38 -08004368 /* need: 1 descriptor per page,
4369 * + 2 desc gap to keep tail from touching head,
4370 * + 1 desc for skb->data,
4371 * + 1 desc for context descriptor,
4372 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004373 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004374 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004375 return NETDEV_TX_BUSY;
4376 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004377
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004378 /* record the location of the first descriptor for this packet */
4379 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4380 first->skb = skb;
4381 first->bytecount = skb->len;
4382 first->gso_segs = 1;
4383
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004384 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4385 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004386 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004387 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004388
Jesse Grosseab6d182010-10-20 13:56:03 +00004389 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004390 tx_flags |= IGB_TX_FLAGS_VLAN;
4391 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4392 }
4393
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004394 /* record initial flags and protocol */
4395 first->tx_flags = tx_flags;
4396 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004397
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004398 tso = igb_tso(tx_ring, first, &hdr_len);
4399 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004400 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004401 else if (!tso)
4402 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004403
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004404 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004405
4406 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004407 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004408
Auke Kok9d5c8242008-01-24 02:22:38 -08004409 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004410
4411out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004412 igb_unmap_and_free_tx_resource(tx_ring, first);
4413
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004414 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004415}
4416
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004417static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4418 struct sk_buff *skb)
4419{
4420 unsigned int r_idx = skb->queue_mapping;
4421
4422 if (r_idx >= adapter->num_tx_queues)
4423 r_idx = r_idx % adapter->num_tx_queues;
4424
4425 return adapter->tx_ring[r_idx];
4426}
4427
Alexander Duyckcd392f52011-08-26 07:43:59 +00004428static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4429 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004430{
4431 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004432
4433 if (test_bit(__IGB_DOWN, &adapter->state)) {
4434 dev_kfree_skb_any(skb);
4435 return NETDEV_TX_OK;
4436 }
4437
4438 if (skb->len <= 0) {
4439 dev_kfree_skb_any(skb);
4440 return NETDEV_TX_OK;
4441 }
4442
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004443 /*
4444 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4445 * in order to meet this minimum size requirement.
4446 */
4447 if (skb->len < 17) {
4448 if (skb_padto(skb, 17))
4449 return NETDEV_TX_OK;
4450 skb->len = 17;
4451 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004452
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004453 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004454}
4455
4456/**
4457 * igb_tx_timeout - Respond to a Tx Hang
4458 * @netdev: network interface device structure
4459 **/
4460static void igb_tx_timeout(struct net_device *netdev)
4461{
4462 struct igb_adapter *adapter = netdev_priv(netdev);
4463 struct e1000_hw *hw = &adapter->hw;
4464
4465 /* Do the reset outside of interrupt context */
4466 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004467
Alexander Duyck55cac242009-11-19 12:42:21 +00004468 if (hw->mac.type == e1000_82580)
4469 hw->dev_spec._82575.global_device_reset = true;
4470
Auke Kok9d5c8242008-01-24 02:22:38 -08004471 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004472 wr32(E1000_EICS,
4473 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004474}
4475
4476static void igb_reset_task(struct work_struct *work)
4477{
4478 struct igb_adapter *adapter;
4479 adapter = container_of(work, struct igb_adapter, reset_task);
4480
Taku Izumic97ec422010-04-27 14:39:30 +00004481 igb_dump(adapter);
4482 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004483 igb_reinit_locked(adapter);
4484}
4485
4486/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004487 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004488 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004489 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004490 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004491 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004492static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4493 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004494{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004495 struct igb_adapter *adapter = netdev_priv(netdev);
4496
4497 spin_lock(&adapter->stats64_lock);
4498 igb_update_stats(adapter, &adapter->stats64);
4499 memcpy(stats, &adapter->stats64, sizeof(*stats));
4500 spin_unlock(&adapter->stats64_lock);
4501
4502 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004503}
4504
4505/**
4506 * igb_change_mtu - Change the Maximum Transfer Unit
4507 * @netdev: network interface device structure
4508 * @new_mtu: new value for maximum frame size
4509 *
4510 * Returns 0 on success, negative on failure
4511 **/
4512static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4513{
4514 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004515 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004516 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004517
Alexander Duyckc809d222009-10-27 23:52:13 +00004518 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004519 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004520 return -EINVAL;
4521 }
4522
Alexander Duyck153285f2011-08-26 07:43:32 +00004523#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004524 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004525 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004526 return -EINVAL;
4527 }
4528
4529 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4530 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004531
Auke Kok9d5c8242008-01-24 02:22:38 -08004532 /* igb_down has a dependency on max_frame_size */
4533 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004534
Alexander Duyck4c844852009-10-27 15:52:07 +00004535 if (netif_running(netdev))
4536 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004537
Alexander Duyck090b1792009-10-27 23:51:55 +00004538 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004539 netdev->mtu, new_mtu);
4540 netdev->mtu = new_mtu;
4541
4542 if (netif_running(netdev))
4543 igb_up(adapter);
4544 else
4545 igb_reset(adapter);
4546
4547 clear_bit(__IGB_RESETTING, &adapter->state);
4548
4549 return 0;
4550}
4551
4552/**
4553 * igb_update_stats - Update the board statistics counters
4554 * @adapter: board private structure
4555 **/
4556
Eric Dumazet12dcd862010-10-15 17:27:10 +00004557void igb_update_stats(struct igb_adapter *adapter,
4558 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004559{
4560 struct e1000_hw *hw = &adapter->hw;
4561 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004562 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004563 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004564 int i;
4565 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004566 unsigned int start;
4567 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004568
4569#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4570
4571 /*
4572 * Prevent stats update while adapter is being reset, or if the pci
4573 * connection is down.
4574 */
4575 if (adapter->link_speed == 0)
4576 return;
4577 if (pci_channel_offline(pdev))
4578 return;
4579
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004580 bytes = 0;
4581 packets = 0;
4582 for (i = 0; i < adapter->num_rx_queues; i++) {
4583 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004584 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004585
Alexander Duyck3025a442010-02-17 01:02:39 +00004586 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004587 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004588
4589 do {
4590 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4591 _bytes = ring->rx_stats.bytes;
4592 _packets = ring->rx_stats.packets;
4593 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4594 bytes += _bytes;
4595 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004596 }
4597
Alexander Duyck128e45e2009-11-12 18:37:38 +00004598 net_stats->rx_bytes = bytes;
4599 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004600
4601 bytes = 0;
4602 packets = 0;
4603 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004604 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004605 do {
4606 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4607 _bytes = ring->tx_stats.bytes;
4608 _packets = ring->tx_stats.packets;
4609 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4610 bytes += _bytes;
4611 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004612 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004613 net_stats->tx_bytes = bytes;
4614 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004615
4616 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004617 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4618 adapter->stats.gprc += rd32(E1000_GPRC);
4619 adapter->stats.gorc += rd32(E1000_GORCL);
4620 rd32(E1000_GORCH); /* clear GORCL */
4621 adapter->stats.bprc += rd32(E1000_BPRC);
4622 adapter->stats.mprc += rd32(E1000_MPRC);
4623 adapter->stats.roc += rd32(E1000_ROC);
4624
4625 adapter->stats.prc64 += rd32(E1000_PRC64);
4626 adapter->stats.prc127 += rd32(E1000_PRC127);
4627 adapter->stats.prc255 += rd32(E1000_PRC255);
4628 adapter->stats.prc511 += rd32(E1000_PRC511);
4629 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4630 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4631 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4632 adapter->stats.sec += rd32(E1000_SEC);
4633
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004634 mpc = rd32(E1000_MPC);
4635 adapter->stats.mpc += mpc;
4636 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004637 adapter->stats.scc += rd32(E1000_SCC);
4638 adapter->stats.ecol += rd32(E1000_ECOL);
4639 adapter->stats.mcc += rd32(E1000_MCC);
4640 adapter->stats.latecol += rd32(E1000_LATECOL);
4641 adapter->stats.dc += rd32(E1000_DC);
4642 adapter->stats.rlec += rd32(E1000_RLEC);
4643 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4644 adapter->stats.xontxc += rd32(E1000_XONTXC);
4645 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4646 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4647 adapter->stats.fcruc += rd32(E1000_FCRUC);
4648 adapter->stats.gptc += rd32(E1000_GPTC);
4649 adapter->stats.gotc += rd32(E1000_GOTCL);
4650 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004651 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004652 adapter->stats.ruc += rd32(E1000_RUC);
4653 adapter->stats.rfc += rd32(E1000_RFC);
4654 adapter->stats.rjc += rd32(E1000_RJC);
4655 adapter->stats.tor += rd32(E1000_TORH);
4656 adapter->stats.tot += rd32(E1000_TOTH);
4657 adapter->stats.tpr += rd32(E1000_TPR);
4658
4659 adapter->stats.ptc64 += rd32(E1000_PTC64);
4660 adapter->stats.ptc127 += rd32(E1000_PTC127);
4661 adapter->stats.ptc255 += rd32(E1000_PTC255);
4662 adapter->stats.ptc511 += rd32(E1000_PTC511);
4663 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4664 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4665
4666 adapter->stats.mptc += rd32(E1000_MPTC);
4667 adapter->stats.bptc += rd32(E1000_BPTC);
4668
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004669 adapter->stats.tpt += rd32(E1000_TPT);
4670 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004671
4672 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004673 /* read internal phy specific stats */
4674 reg = rd32(E1000_CTRL_EXT);
4675 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4676 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4677 adapter->stats.tncrs += rd32(E1000_TNCRS);
4678 }
4679
Auke Kok9d5c8242008-01-24 02:22:38 -08004680 adapter->stats.tsctc += rd32(E1000_TSCTC);
4681 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4682
4683 adapter->stats.iac += rd32(E1000_IAC);
4684 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4685 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4686 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4687 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4688 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4689 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4690 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4691 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4692
4693 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004694 net_stats->multicast = adapter->stats.mprc;
4695 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004696
4697 /* Rx Errors */
4698
4699 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004700 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004701 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004702 adapter->stats.crcerrs + adapter->stats.algnerrc +
4703 adapter->stats.ruc + adapter->stats.roc +
4704 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004705 net_stats->rx_length_errors = adapter->stats.ruc +
4706 adapter->stats.roc;
4707 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4708 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4709 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004710
4711 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004712 net_stats->tx_errors = adapter->stats.ecol +
4713 adapter->stats.latecol;
4714 net_stats->tx_aborted_errors = adapter->stats.ecol;
4715 net_stats->tx_window_errors = adapter->stats.latecol;
4716 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004717
4718 /* Tx Dropped needs to be maintained elsewhere */
4719
4720 /* Phy Stats */
4721 if (hw->phy.media_type == e1000_media_type_copper) {
4722 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004723 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004724 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4725 adapter->phy_stats.idle_errors += phy_tmp;
4726 }
4727 }
4728
4729 /* Management Stats */
4730 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4731 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4732 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004733
4734 /* OS2BMC Stats */
4735 reg = rd32(E1000_MANC);
4736 if (reg & E1000_MANC_EN_BMC2OS) {
4737 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4738 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4739 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4740 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4741 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004742}
4743
Auke Kok9d5c8242008-01-24 02:22:38 -08004744static irqreturn_t igb_msix_other(int irq, void *data)
4745{
Alexander Duyck047e0032009-10-27 15:49:27 +00004746 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004747 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004748 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004749 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004750
Alexander Duyck7f081d42010-01-07 17:41:00 +00004751 if (icr & E1000_ICR_DRSTA)
4752 schedule_work(&adapter->reset_task);
4753
Alexander Duyck047e0032009-10-27 15:49:27 +00004754 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004755 /* HW is reporting DMA is out of sync */
4756 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004757 /* The DMA Out of Sync is also indication of a spoof event
4758 * in IOV mode. Check the Wrong VM Behavior register to
4759 * see if it is really a spoof event. */
4760 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004761 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004762
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004763 /* Check for a mailbox event */
4764 if (icr & E1000_ICR_VMMB)
4765 igb_msg_task(adapter);
4766
4767 if (icr & E1000_ICR_LSC) {
4768 hw->mac.get_link_status = 1;
4769 /* guard against interrupt when we're going down */
4770 if (!test_bit(__IGB_DOWN, &adapter->state))
4771 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4772 }
4773
Alexander Duyck25568a52009-10-27 23:49:59 +00004774 if (adapter->vfs_allocated_count)
4775 wr32(E1000_IMS, E1000_IMS_LSC |
4776 E1000_IMS_VMMB |
4777 E1000_IMS_DOUTSYNC);
4778 else
4779 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004780 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004781
4782 return IRQ_HANDLED;
4783}
4784
Alexander Duyck047e0032009-10-27 15:49:27 +00004785static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004786{
Alexander Duyck26b39272010-02-17 01:00:41 +00004787 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004788 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004789
Alexander Duyck047e0032009-10-27 15:49:27 +00004790 if (!q_vector->set_itr)
4791 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004792
Alexander Duyck047e0032009-10-27 15:49:27 +00004793 if (!itr_val)
4794 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004795
Alexander Duyck26b39272010-02-17 01:00:41 +00004796 if (adapter->hw.mac.type == e1000_82575)
4797 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004798 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004799 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004800
4801 writel(itr_val, q_vector->itr_register);
4802 q_vector->set_itr = 0;
4803}
4804
4805static irqreturn_t igb_msix_ring(int irq, void *data)
4806{
4807 struct igb_q_vector *q_vector = data;
4808
4809 /* Write the ITR value calculated from the previous interrupt. */
4810 igb_write_itr(q_vector);
4811
4812 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004813
Auke Kok9d5c8242008-01-24 02:22:38 -08004814 return IRQ_HANDLED;
4815}
4816
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004817#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004818static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004819{
Alexander Duyck047e0032009-10-27 15:49:27 +00004820 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004821 struct e1000_hw *hw = &adapter->hw;
4822 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004823
Alexander Duyck047e0032009-10-27 15:49:27 +00004824 if (q_vector->cpu == cpu)
4825 goto out_no_update;
4826
Alexander Duyck0ba82992011-08-26 07:45:47 +00004827 if (q_vector->tx.ring) {
4828 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004829 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4830 if (hw->mac.type == e1000_82575) {
4831 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4832 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4833 } else {
4834 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4835 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4836 E1000_DCA_TXCTRL_CPUID_SHIFT;
4837 }
4838 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4839 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4840 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004841 if (q_vector->rx.ring) {
4842 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004843 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4844 if (hw->mac.type == e1000_82575) {
4845 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4846 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4847 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004848 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004849 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004850 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004851 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004852 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4853 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4854 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4855 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004856 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004857 q_vector->cpu = cpu;
4858out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004859 put_cpu();
4860}
4861
4862static void igb_setup_dca(struct igb_adapter *adapter)
4863{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004864 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004865 int i;
4866
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004867 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004868 return;
4869
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004870 /* Always use CB2 mode, difference is masked in the CB driver. */
4871 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4872
Alexander Duyck047e0032009-10-27 15:49:27 +00004873 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004874 adapter->q_vector[i]->cpu = -1;
4875 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004876 }
4877}
4878
4879static int __igb_notify_dca(struct device *dev, void *data)
4880{
4881 struct net_device *netdev = dev_get_drvdata(dev);
4882 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004883 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004884 struct e1000_hw *hw = &adapter->hw;
4885 unsigned long event = *(unsigned long *)data;
4886
4887 switch (event) {
4888 case DCA_PROVIDER_ADD:
4889 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004890 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004891 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004892 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004893 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004894 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004895 igb_setup_dca(adapter);
4896 break;
4897 }
4898 /* Fall Through since DCA is disabled. */
4899 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004900 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004901 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004902 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004903 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004904 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004905 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004906 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004907 }
4908 break;
4909 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004910
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004911 return 0;
4912}
4913
4914static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4915 void *p)
4916{
4917 int ret_val;
4918
4919 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4920 __igb_notify_dca);
4921
4922 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4923}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004924#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004925
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004926static void igb_ping_all_vfs(struct igb_adapter *adapter)
4927{
4928 struct e1000_hw *hw = &adapter->hw;
4929 u32 ping;
4930 int i;
4931
4932 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4933 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004934 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004935 ping |= E1000_VT_MSGTYPE_CTS;
4936 igb_write_mbx(hw, &ping, 1, i);
4937 }
4938}
4939
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004940static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4941{
4942 struct e1000_hw *hw = &adapter->hw;
4943 u32 vmolr = rd32(E1000_VMOLR(vf));
4944 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4945
Alexander Duyckd85b90042010-09-22 17:56:20 +00004946 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004947 IGB_VF_FLAG_MULTI_PROMISC);
4948 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4949
4950 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4951 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004952 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004953 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4954 } else {
4955 /*
4956 * if we have hashes and we are clearing a multicast promisc
4957 * flag we need to write the hashes to the MTA as this step
4958 * was previously skipped
4959 */
4960 if (vf_data->num_vf_mc_hashes > 30) {
4961 vmolr |= E1000_VMOLR_MPME;
4962 } else if (vf_data->num_vf_mc_hashes) {
4963 int j;
4964 vmolr |= E1000_VMOLR_ROMPE;
4965 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4966 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4967 }
4968 }
4969
4970 wr32(E1000_VMOLR(vf), vmolr);
4971
4972 /* there are flags left unprocessed, likely not supported */
4973 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4974 return -EINVAL;
4975
4976 return 0;
4977
4978}
4979
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004980static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4981 u32 *msgbuf, u32 vf)
4982{
4983 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4984 u16 *hash_list = (u16 *)&msgbuf[1];
4985 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4986 int i;
4987
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004988 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004989 * to this VF for later use to restore when the PF multi cast
4990 * list changes
4991 */
4992 vf_data->num_vf_mc_hashes = n;
4993
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004994 /* only up to 30 hash values supported */
4995 if (n > 30)
4996 n = 30;
4997
4998 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004999 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005000 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005001
5002 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005003 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005004
5005 return 0;
5006}
5007
5008static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5009{
5010 struct e1000_hw *hw = &adapter->hw;
5011 struct vf_data_storage *vf_data;
5012 int i, j;
5013
5014 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005015 u32 vmolr = rd32(E1000_VMOLR(i));
5016 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5017
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005018 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005019
5020 if ((vf_data->num_vf_mc_hashes > 30) ||
5021 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5022 vmolr |= E1000_VMOLR_MPME;
5023 } else if (vf_data->num_vf_mc_hashes) {
5024 vmolr |= E1000_VMOLR_ROMPE;
5025 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5026 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5027 }
5028 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005029 }
5030}
5031
5032static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5033{
5034 struct e1000_hw *hw = &adapter->hw;
5035 u32 pool_mask, reg, vid;
5036 int i;
5037
5038 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5039
5040 /* Find the vlan filter for this id */
5041 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5042 reg = rd32(E1000_VLVF(i));
5043
5044 /* remove the vf from the pool */
5045 reg &= ~pool_mask;
5046
5047 /* if pool is empty then remove entry from vfta */
5048 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5049 (reg & E1000_VLVF_VLANID_ENABLE)) {
5050 reg = 0;
5051 vid = reg & E1000_VLVF_VLANID_MASK;
5052 igb_vfta_set(hw, vid, false);
5053 }
5054
5055 wr32(E1000_VLVF(i), reg);
5056 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005057
5058 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005059}
5060
5061static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5062{
5063 struct e1000_hw *hw = &adapter->hw;
5064 u32 reg, i;
5065
Alexander Duyck51466232009-10-27 23:47:35 +00005066 /* The vlvf table only exists on 82576 hardware and newer */
5067 if (hw->mac.type < e1000_82576)
5068 return -1;
5069
5070 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005071 if (!adapter->vfs_allocated_count)
5072 return -1;
5073
5074 /* Find the vlan filter for this id */
5075 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5076 reg = rd32(E1000_VLVF(i));
5077 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5078 vid == (reg & E1000_VLVF_VLANID_MASK))
5079 break;
5080 }
5081
5082 if (add) {
5083 if (i == E1000_VLVF_ARRAY_SIZE) {
5084 /* Did not find a matching VLAN ID entry that was
5085 * enabled. Search for a free filter entry, i.e.
5086 * one without the enable bit set
5087 */
5088 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5089 reg = rd32(E1000_VLVF(i));
5090 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5091 break;
5092 }
5093 }
5094 if (i < E1000_VLVF_ARRAY_SIZE) {
5095 /* Found an enabled/available entry */
5096 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5097
5098 /* if !enabled we need to set this up in vfta */
5099 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005100 /* add VID to filter table */
5101 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005102 reg |= E1000_VLVF_VLANID_ENABLE;
5103 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005104 reg &= ~E1000_VLVF_VLANID_MASK;
5105 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005106 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005107
5108 /* do not modify RLPML for PF devices */
5109 if (vf >= adapter->vfs_allocated_count)
5110 return 0;
5111
5112 if (!adapter->vf_data[vf].vlans_enabled) {
5113 u32 size;
5114 reg = rd32(E1000_VMOLR(vf));
5115 size = reg & E1000_VMOLR_RLPML_MASK;
5116 size += 4;
5117 reg &= ~E1000_VMOLR_RLPML_MASK;
5118 reg |= size;
5119 wr32(E1000_VMOLR(vf), reg);
5120 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005121
Alexander Duyck51466232009-10-27 23:47:35 +00005122 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005123 return 0;
5124 }
5125 } else {
5126 if (i < E1000_VLVF_ARRAY_SIZE) {
5127 /* remove vf from the pool */
5128 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5129 /* if pool is empty then remove entry from vfta */
5130 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5131 reg = 0;
5132 igb_vfta_set(hw, vid, false);
5133 }
5134 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005135
5136 /* do not modify RLPML for PF devices */
5137 if (vf >= adapter->vfs_allocated_count)
5138 return 0;
5139
5140 adapter->vf_data[vf].vlans_enabled--;
5141 if (!adapter->vf_data[vf].vlans_enabled) {
5142 u32 size;
5143 reg = rd32(E1000_VMOLR(vf));
5144 size = reg & E1000_VMOLR_RLPML_MASK;
5145 size -= 4;
5146 reg &= ~E1000_VMOLR_RLPML_MASK;
5147 reg |= size;
5148 wr32(E1000_VMOLR(vf), reg);
5149 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005150 }
5151 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005152 return 0;
5153}
5154
5155static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5156{
5157 struct e1000_hw *hw = &adapter->hw;
5158
5159 if (vid)
5160 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5161 else
5162 wr32(E1000_VMVIR(vf), 0);
5163}
5164
5165static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5166 int vf, u16 vlan, u8 qos)
5167{
5168 int err = 0;
5169 struct igb_adapter *adapter = netdev_priv(netdev);
5170
5171 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5172 return -EINVAL;
5173 if (vlan || qos) {
5174 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5175 if (err)
5176 goto out;
5177 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5178 igb_set_vmolr(adapter, vf, !vlan);
5179 adapter->vf_data[vf].pf_vlan = vlan;
5180 adapter->vf_data[vf].pf_qos = qos;
5181 dev_info(&adapter->pdev->dev,
5182 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5183 if (test_bit(__IGB_DOWN, &adapter->state)) {
5184 dev_warn(&adapter->pdev->dev,
5185 "The VF VLAN has been set,"
5186 " but the PF device is not up.\n");
5187 dev_warn(&adapter->pdev->dev,
5188 "Bring the PF device up before"
5189 " attempting to use the VF device.\n");
5190 }
5191 } else {
5192 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5193 false, vf);
5194 igb_set_vmvir(adapter, vlan, vf);
5195 igb_set_vmolr(adapter, vf, true);
5196 adapter->vf_data[vf].pf_vlan = 0;
5197 adapter->vf_data[vf].pf_qos = 0;
5198 }
5199out:
5200 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005201}
5202
5203static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5204{
5205 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5206 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5207
5208 return igb_vlvf_set(adapter, vid, add, vf);
5209}
5210
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005211static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005212{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005213 /* clear flags - except flag that indicates PF has set the MAC */
5214 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005215 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005216
5217 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005218 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005219
5220 /* reset vlans for device */
5221 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005222 if (adapter->vf_data[vf].pf_vlan)
5223 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5224 adapter->vf_data[vf].pf_vlan,
5225 adapter->vf_data[vf].pf_qos);
5226 else
5227 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005228
5229 /* reset multicast table array for vf */
5230 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5231
5232 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005233 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005234}
5235
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005236static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5237{
5238 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5239
5240 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005241 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5242 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005243
5244 /* process remaining reset events */
5245 igb_vf_reset(adapter, vf);
5246}
5247
5248static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005249{
5250 struct e1000_hw *hw = &adapter->hw;
5251 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005252 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005253 u32 reg, msgbuf[3];
5254 u8 *addr = (u8 *)(&msgbuf[1]);
5255
5256 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005257 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005258
5259 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005260 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005261
5262 /* enable transmit and receive for vf */
5263 reg = rd32(E1000_VFTE);
5264 wr32(E1000_VFTE, reg | (1 << vf));
5265 reg = rd32(E1000_VFRE);
5266 wr32(E1000_VFRE, reg | (1 << vf));
5267
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005268 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005269
5270 /* reply to reset with ack and vf mac address */
5271 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5272 memcpy(addr, vf_mac, 6);
5273 igb_write_mbx(hw, msgbuf, 3, vf);
5274}
5275
5276static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5277{
Greg Rosede42edd2010-07-01 13:39:23 +00005278 /*
5279 * The VF MAC Address is stored in a packed array of bytes
5280 * starting at the second 32 bit word of the msg array
5281 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005282 unsigned char *addr = (char *)&msg[1];
5283 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005284
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005285 if (is_valid_ether_addr(addr))
5286 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005287
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005288 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005289}
5290
5291static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5292{
5293 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005294 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005295 u32 msg = E1000_VT_MSGTYPE_NACK;
5296
5297 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005298 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5299 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005300 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005301 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005302 }
5303}
5304
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005305static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005306{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005307 struct pci_dev *pdev = adapter->pdev;
5308 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005309 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005310 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005311 s32 retval;
5312
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005313 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005314
Alexander Duyckfef45f42009-12-11 22:57:34 -08005315 if (retval) {
5316 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005317 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005318 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5319 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5320 return;
5321 goto out;
5322 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005323
5324 /* this is a message we already processed, do nothing */
5325 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005326 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005327
5328 /*
5329 * until the vf completes a reset it should not be
5330 * allowed to start any configuration.
5331 */
5332
5333 if (msgbuf[0] == E1000_VF_RESET) {
5334 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005335 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005336 }
5337
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005338 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005339 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5340 return;
5341 retval = -1;
5342 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005343 }
5344
5345 switch ((msgbuf[0] & 0xFFFF)) {
5346 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005347 retval = -EINVAL;
5348 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5349 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5350 else
5351 dev_warn(&pdev->dev,
5352 "VF %d attempted to override administratively "
5353 "set MAC address\nReload the VF driver to "
5354 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005355 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005356 case E1000_VF_SET_PROMISC:
5357 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5358 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005359 case E1000_VF_SET_MULTICAST:
5360 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5361 break;
5362 case E1000_VF_SET_LPE:
5363 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5364 break;
5365 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005366 retval = -1;
5367 if (vf_data->pf_vlan)
5368 dev_warn(&pdev->dev,
5369 "VF %d attempted to override administratively "
5370 "set VLAN tag\nReload the VF driver to "
5371 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005372 else
5373 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005374 break;
5375 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005376 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005377 retval = -1;
5378 break;
5379 }
5380
Alexander Duyckfef45f42009-12-11 22:57:34 -08005381 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5382out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005383 /* notify the VF of the results of what it sent us */
5384 if (retval)
5385 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5386 else
5387 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5388
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005390}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005391
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005392static void igb_msg_task(struct igb_adapter *adapter)
5393{
5394 struct e1000_hw *hw = &adapter->hw;
5395 u32 vf;
5396
5397 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5398 /* process any reset requests */
5399 if (!igb_check_for_rst(hw, vf))
5400 igb_vf_reset_event(adapter, vf);
5401
5402 /* process any messages pending */
5403 if (!igb_check_for_msg(hw, vf))
5404 igb_rcv_msg_from_vf(adapter, vf);
5405
5406 /* process any acks */
5407 if (!igb_check_for_ack(hw, vf))
5408 igb_rcv_ack_from_vf(adapter, vf);
5409 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005410}
5411
Auke Kok9d5c8242008-01-24 02:22:38 -08005412/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005413 * igb_set_uta - Set unicast filter table address
5414 * @adapter: board private structure
5415 *
5416 * The unicast table address is a register array of 32-bit registers.
5417 * The table is meant to be used in a way similar to how the MTA is used
5418 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005419 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5420 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005421 **/
5422static void igb_set_uta(struct igb_adapter *adapter)
5423{
5424 struct e1000_hw *hw = &adapter->hw;
5425 int i;
5426
5427 /* The UTA table only exists on 82576 hardware and newer */
5428 if (hw->mac.type < e1000_82576)
5429 return;
5430
5431 /* we only need to do this if VMDq is enabled */
5432 if (!adapter->vfs_allocated_count)
5433 return;
5434
5435 for (i = 0; i < hw->mac.uta_reg_count; i++)
5436 array_wr32(E1000_UTA, i, ~0);
5437}
5438
5439/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005440 * igb_intr_msi - Interrupt Handler
5441 * @irq: interrupt number
5442 * @data: pointer to a network interface device structure
5443 **/
5444static irqreturn_t igb_intr_msi(int irq, void *data)
5445{
Alexander Duyck047e0032009-10-27 15:49:27 +00005446 struct igb_adapter *adapter = data;
5447 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005448 struct e1000_hw *hw = &adapter->hw;
5449 /* read ICR disables interrupts using IAM */
5450 u32 icr = rd32(E1000_ICR);
5451
Alexander Duyck047e0032009-10-27 15:49:27 +00005452 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005453
Alexander Duyck7f081d42010-01-07 17:41:00 +00005454 if (icr & E1000_ICR_DRSTA)
5455 schedule_work(&adapter->reset_task);
5456
Alexander Duyck047e0032009-10-27 15:49:27 +00005457 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005458 /* HW is reporting DMA is out of sync */
5459 adapter->stats.doosync++;
5460 }
5461
Auke Kok9d5c8242008-01-24 02:22:38 -08005462 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5463 hw->mac.get_link_status = 1;
5464 if (!test_bit(__IGB_DOWN, &adapter->state))
5465 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5466 }
5467
Alexander Duyck047e0032009-10-27 15:49:27 +00005468 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005469
5470 return IRQ_HANDLED;
5471}
5472
5473/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005474 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005475 * @irq: interrupt number
5476 * @data: pointer to a network interface device structure
5477 **/
5478static irqreturn_t igb_intr(int irq, void *data)
5479{
Alexander Duyck047e0032009-10-27 15:49:27 +00005480 struct igb_adapter *adapter = data;
5481 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005482 struct e1000_hw *hw = &adapter->hw;
5483 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5484 * need for the IMC write */
5485 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005486
5487 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5488 * not set, then the adapter didn't send an interrupt */
5489 if (!(icr & E1000_ICR_INT_ASSERTED))
5490 return IRQ_NONE;
5491
Alexander Duyck0ba82992011-08-26 07:45:47 +00005492 igb_write_itr(q_vector);
5493
Alexander Duyck7f081d42010-01-07 17:41:00 +00005494 if (icr & E1000_ICR_DRSTA)
5495 schedule_work(&adapter->reset_task);
5496
Alexander Duyck047e0032009-10-27 15:49:27 +00005497 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005498 /* HW is reporting DMA is out of sync */
5499 adapter->stats.doosync++;
5500 }
5501
Auke Kok9d5c8242008-01-24 02:22:38 -08005502 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5503 hw->mac.get_link_status = 1;
5504 /* guard against interrupt when we're going down */
5505 if (!test_bit(__IGB_DOWN, &adapter->state))
5506 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5507 }
5508
Alexander Duyck047e0032009-10-27 15:49:27 +00005509 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005510
5511 return IRQ_HANDLED;
5512}
5513
Alexander Duyck0ba82992011-08-26 07:45:47 +00005514void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005515{
Alexander Duyck047e0032009-10-27 15:49:27 +00005516 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005517 struct e1000_hw *hw = &adapter->hw;
5518
Alexander Duyck0ba82992011-08-26 07:45:47 +00005519 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5520 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5521 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5522 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005523 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005524 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005525 }
5526
5527 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5528 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005529 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005530 else
5531 igb_irq_enable(adapter);
5532 }
5533}
5534
Auke Kok9d5c8242008-01-24 02:22:38 -08005535/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005536 * igb_poll - NAPI Rx polling callback
5537 * @napi: napi polling structure
5538 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005539 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005540static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005541{
Alexander Duyck047e0032009-10-27 15:49:27 +00005542 struct igb_q_vector *q_vector = container_of(napi,
5543 struct igb_q_vector,
5544 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005545 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005546
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005547#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005548 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5549 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005550#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005551 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005552 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005553
Alexander Duyck0ba82992011-08-26 07:45:47 +00005554 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005555 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005556
Alexander Duyck16eb8812011-08-26 07:43:54 +00005557 /* If all work not completed, return budget and keep polling */
5558 if (!clean_complete)
5559 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005560
Alexander Duyck46544252009-02-19 20:39:04 -08005561 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005562 napi_complete(napi);
5563 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005564
Alexander Duyck16eb8812011-08-26 07:43:54 +00005565 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005566}
Al Viro6d8126f2008-03-16 22:23:24 +00005567
Auke Kok9d5c8242008-01-24 02:22:38 -08005568/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005569 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005570 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005571 * @shhwtstamps: timestamp structure to update
5572 * @regval: unsigned 64bit system time value.
5573 *
5574 * We need to convert the system time value stored in the RX/TXSTMP registers
5575 * into a hwtstamp which can be used by the upper level timestamping functions
5576 */
5577static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5578 struct skb_shared_hwtstamps *shhwtstamps,
5579 u64 regval)
5580{
5581 u64 ns;
5582
Alexander Duyck55cac242009-11-19 12:42:21 +00005583 /*
5584 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5585 * 24 to match clock shift we setup earlier.
5586 */
5587 if (adapter->hw.mac.type == e1000_82580)
5588 regval <<= IGB_82580_TSYNC_SHIFT;
5589
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005590 ns = timecounter_cyc2time(&adapter->clock, regval);
5591 timecompare_update(&adapter->compare, ns);
5592 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5593 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5594 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5595}
5596
5597/**
5598 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5599 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005600 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005601 *
5602 * If we were asked to do hardware stamping and such a time stamp is
5603 * available, then it must have been for this skb here because we only
5604 * allow only one such packet into the queue.
5605 */
Alexander Duyck06034642011-08-26 07:44:22 +00005606static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5607 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005608{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005609 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005610 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005611 struct skb_shared_hwtstamps shhwtstamps;
5612 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005613
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005614 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005615 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005616 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5617 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005618
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005619 regval = rd32(E1000_TXSTMPL);
5620 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5621
5622 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005623 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005624}
5625
5626/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005627 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005628 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005629 * returns true if ring is completely cleaned
5630 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005631static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005632{
Alexander Duyck047e0032009-10-27 15:49:27 +00005633 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005634 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005635 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005636 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005637 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005638 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005639 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005640
Alexander Duyck13fde972011-10-05 13:35:24 +00005641 if (test_bit(__IGB_DOWN, &adapter->state))
5642 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005643
Alexander Duyck06034642011-08-26 07:44:22 +00005644 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005645 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005646 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005647
Alexander Duyck13fde972011-10-05 13:35:24 +00005648 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005649 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005650
Alexander Duyck8542db02011-08-26 07:44:43 +00005651 /* prevent any other reads prior to eop_desc */
5652 rmb();
5653
5654 /* if next_to_watch is not set then there is no work pending */
5655 if (!eop_desc)
5656 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005657
5658 /* if DD is not set pending work has not been completed */
5659 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5660 break;
5661
Alexander Duyck8542db02011-08-26 07:44:43 +00005662 /* clear next_to_watch to prevent false hangs */
5663 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005664
Alexander Duyckebe42d12011-08-26 07:45:09 +00005665 /* update the statistics for this packet */
5666 total_bytes += tx_buffer->bytecount;
5667 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005668
Alexander Duyckebe42d12011-08-26 07:45:09 +00005669 /* retrieve hardware timestamp */
5670 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005671
Alexander Duyckebe42d12011-08-26 07:45:09 +00005672 /* free the skb */
5673 dev_kfree_skb_any(tx_buffer->skb);
5674 tx_buffer->skb = NULL;
5675
5676 /* unmap skb header data */
5677 dma_unmap_single(tx_ring->dev,
5678 tx_buffer->dma,
5679 tx_buffer->length,
5680 DMA_TO_DEVICE);
5681
5682 /* clear last DMA location and unmap remaining buffers */
5683 while (tx_desc != eop_desc) {
5684 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005685
Alexander Duyck13fde972011-10-05 13:35:24 +00005686 tx_buffer++;
5687 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005688 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005689 if (unlikely(!i)) {
5690 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005691 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005692 tx_desc = IGB_TX_DESC(tx_ring, 0);
5693 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005694
5695 /* unmap any remaining paged data */
5696 if (tx_buffer->dma) {
5697 dma_unmap_page(tx_ring->dev,
5698 tx_buffer->dma,
5699 tx_buffer->length,
5700 DMA_TO_DEVICE);
5701 }
5702 }
5703
5704 /* clear last DMA location */
5705 tx_buffer->dma = 0;
5706
5707 /* move us one more past the eop_desc for start of next pkt */
5708 tx_buffer++;
5709 tx_desc++;
5710 i++;
5711 if (unlikely(!i)) {
5712 i -= tx_ring->count;
5713 tx_buffer = tx_ring->tx_buffer_info;
5714 tx_desc = IGB_TX_DESC(tx_ring, 0);
5715 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005716 }
5717
Alexander Duyck8542db02011-08-26 07:44:43 +00005718 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005719 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005720 u64_stats_update_begin(&tx_ring->tx_syncp);
5721 tx_ring->tx_stats.bytes += total_bytes;
5722 tx_ring->tx_stats.packets += total_packets;
5723 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005724 q_vector->tx.total_bytes += total_bytes;
5725 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005726
5727 if (tx_ring->detect_tx_hung) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005728 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005729
Alexander Duyck8542db02011-08-26 07:44:43 +00005730 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005731
Auke Kok9d5c8242008-01-24 02:22:38 -08005732 /* Detect a transmit hang in hardware, this serializes the
5733 * check with the clearing of time_stamp and movement of i */
5734 tx_ring->detect_tx_hung = false;
Alexander Duyck8542db02011-08-26 07:44:43 +00005735 if (eop_desc &&
5736 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005737 (adapter->tx_timeout_factor * HZ)) &&
5738 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005739
Auke Kok9d5c8242008-01-24 02:22:38 -08005740 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005741 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005742 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005743 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005744 " TDH <%x>\n"
5745 " TDT <%x>\n"
5746 " next_to_use <%x>\n"
5747 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005748 "buffer_info[next_to_clean]\n"
5749 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005750 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005751 " jiffies <%lx>\n"
5752 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005753 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005754 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005755 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005756 tx_ring->next_to_use,
5757 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005758 tx_buffer->time_stamp,
5759 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005760 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005761 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005762 netif_stop_subqueue(tx_ring->netdev,
5763 tx_ring->queue_index);
5764
5765 /* we are about to reset, no point in enabling stuff */
5766 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005767 }
5768 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005769
5770 if (unlikely(total_packets &&
5771 netif_carrier_ok(tx_ring->netdev) &&
5772 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5773 /* Make sure that anybody stopping the queue after this
5774 * sees the new next_to_clean.
5775 */
5776 smp_mb();
5777 if (__netif_subqueue_stopped(tx_ring->netdev,
5778 tx_ring->queue_index) &&
5779 !(test_bit(__IGB_DOWN, &adapter->state))) {
5780 netif_wake_subqueue(tx_ring->netdev,
5781 tx_ring->queue_index);
5782
5783 u64_stats_update_begin(&tx_ring->tx_syncp);
5784 tx_ring->tx_stats.restart_queue++;
5785 u64_stats_update_end(&tx_ring->tx_syncp);
5786 }
5787 }
5788
5789 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005790}
5791
Alexander Duyckcd392f52011-08-26 07:43:59 +00005792static inline void igb_rx_checksum(struct igb_ring *ring,
5793 u32 status_err, struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005794{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005795 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005796
Alexander Duyck294e7d72011-08-26 07:45:57 +00005797 /* Ignore Checksum bit is set */
5798 if (status_err & E1000_RXD_STAT_IXSM)
5799 return;
5800
5801 /* Rx checksum disabled via ethtool */
5802 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005803 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005804
Auke Kok9d5c8242008-01-24 02:22:38 -08005805 /* TCP/UDP checksum error bit is set */
5806 if (status_err &
5807 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005808 /*
5809 * work around errata with sctp packets where the TCPE aka
5810 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5811 * packets, (aka let the stack check the crc32c)
5812 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005813 if (!((skb->len == 60) &&
5814 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005815 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005816 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005817 u64_stats_update_end(&ring->rx_syncp);
5818 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005819 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005820 return;
5821 }
5822 /* It must be a TCP or UDP packet with a valid checksum */
5823 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5824 skb->ip_summed = CHECKSUM_UNNECESSARY;
5825
Alexander Duyck59d71982010-04-27 13:09:25 +00005826 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005827}
5828
Nick Nunley757b77e2010-03-26 11:36:47 +00005829static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005830 struct sk_buff *skb)
5831{
5832 struct igb_adapter *adapter = q_vector->adapter;
5833 struct e1000_hw *hw = &adapter->hw;
5834 u64 regval;
5835
5836 /*
5837 * If this bit is set, then the RX registers contain the time stamp. No
5838 * other packet will be time stamped until we read these registers, so
5839 * read the registers to make them available again. Because only one
5840 * packet can be time stamped at a time, we know that the register
5841 * values must belong to this one here and therefore we don't need to
5842 * compare any of the additional attributes stored for it.
5843 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005844 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005845 * can turn into a skb_shared_hwtstamps.
5846 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005847 if (staterr & E1000_RXDADV_STAT_TSIP) {
5848 u32 *stamp = (u32 *)skb->data;
5849 regval = le32_to_cpu(*(stamp + 2));
5850 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5851 skb_pull(skb, IGB_TS_HDR_LEN);
5852 } else {
5853 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5854 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005855
Nick Nunley757b77e2010-03-26 11:36:47 +00005856 regval = rd32(E1000_RXSTMPL);
5857 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5858 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005859
5860 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5861}
Alexander Duyck44390ca2011-08-26 07:43:38 +00005862static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005863{
5864 /* HW will not DMA in data larger than the given buffer, even if it
5865 * parses the (NFS, of course) header to be larger. In that case, it
5866 * fills the header buffer and spills the rest into the page.
5867 */
5868 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5869 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005870 if (hlen > IGB_RX_HDR_LEN)
5871 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005872 return hlen;
5873}
5874
Alexander Duyckcd392f52011-08-26 07:43:59 +00005875static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005876{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005877 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005878 union e1000_adv_rx_desc *rx_desc;
5879 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005880 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005881 u32 staterr;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005882 u16 cleaned_count = igb_desc_unused(rx_ring);
5883 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005884
Alexander Duyck601369062011-08-26 07:44:05 +00005885 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005886 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5887
5888 while (staterr & E1000_RXD_STAT_DD) {
Alexander Duyck06034642011-08-26 07:44:22 +00005889 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005890 struct sk_buff *skb = buffer_info->skb;
5891 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005892
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005893 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005894 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005895
5896 i++;
5897 if (i == rx_ring->count)
5898 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005899
Alexander Duyck601369062011-08-26 07:44:05 +00005900 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005901 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005902
Alexander Duyck16eb8812011-08-26 07:43:54 +00005903 /*
5904 * This memory barrier is needed to keep us from reading
5905 * any other fields out of the rx_desc until we know the
5906 * RXD_STAT_DD bit is set
5907 */
5908 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005909
Alexander Duyck16eb8812011-08-26 07:43:54 +00005910 if (!skb_is_nonlinear(skb)) {
5911 __skb_put(skb, igb_get_hlen(rx_desc));
5912 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005913 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005914 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005915 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005916 }
5917
Alexander Duyck16eb8812011-08-26 07:43:54 +00005918 if (rx_desc->wb.upper.length) {
5919 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005920
Koki Sanagiaa913402010-04-27 01:01:19 +00005921 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005922 buffer_info->page,
5923 buffer_info->page_offset,
5924 length);
5925
Alexander Duyck16eb8812011-08-26 07:43:54 +00005926 skb->len += length;
5927 skb->data_len += length;
5928 skb->truesize += length;
5929
Alexander Duyckd1eff352009-11-12 18:38:35 +00005930 if ((page_count(buffer_info->page) != 1) ||
5931 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005932 buffer_info->page = NULL;
5933 else
5934 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005935
Alexander Duyck16eb8812011-08-26 07:43:54 +00005936 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5937 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5938 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005939 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005940
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005941 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005942 struct igb_rx_buffer *next_buffer;
5943 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005944 buffer_info->skb = next_buffer->skb;
5945 buffer_info->dma = next_buffer->dma;
5946 next_buffer->skb = skb;
5947 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005948 goto next_desc;
5949 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005950
Auke Kok9d5c8242008-01-24 02:22:38 -08005951 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005952 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005953 goto next_desc;
5954 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005955
Nick Nunley757b77e2010-03-26 11:36:47 +00005956 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5957 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005958 total_bytes += skb->len;
5959 total_packets++;
5960
Alexander Duyckcd392f52011-08-26 07:43:59 +00005961 igb_rx_checksum(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005962
Alexander Duyck16eb8812011-08-26 07:43:54 +00005963 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005964
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005965 if (staterr & E1000_RXD_STAT_VP) {
5966 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
Alexander Duyck047e0032009-10-27 15:49:27 +00005967
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005968 __vlan_hwaccel_put_tag(skb, vid);
5969 }
5970 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005971
Alexander Duyck16eb8812011-08-26 07:43:54 +00005972 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08005973next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00005974 if (!budget)
5975 break;
5976
5977 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005978 /* return some buffers to hardware, one at a time is too slow */
5979 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00005980 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005981 cleaned_count = 0;
5982 }
5983
5984 /* use prefetched values */
5985 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08005986 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5987 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005988
Auke Kok9d5c8242008-01-24 02:22:38 -08005989 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005990 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005991 rx_ring->rx_stats.packets += total_packets;
5992 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005993 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005994 q_vector->rx.total_packets += total_packets;
5995 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00005996
5997 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005998 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005999
Alexander Duyck16eb8812011-08-26 07:43:54 +00006000 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006001}
6002
Alexander Duyckc023cd82011-08-26 07:43:43 +00006003static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006004 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006005{
6006 struct sk_buff *skb = bi->skb;
6007 dma_addr_t dma = bi->dma;
6008
6009 if (dma)
6010 return true;
6011
6012 if (likely(!skb)) {
6013 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6014 IGB_RX_HDR_LEN);
6015 bi->skb = skb;
6016 if (!skb) {
6017 rx_ring->rx_stats.alloc_failed++;
6018 return false;
6019 }
6020
6021 /* initialize skb for ring */
6022 skb_record_rx_queue(skb, rx_ring->queue_index);
6023 }
6024
6025 dma = dma_map_single(rx_ring->dev, skb->data,
6026 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6027
6028 if (dma_mapping_error(rx_ring->dev, dma)) {
6029 rx_ring->rx_stats.alloc_failed++;
6030 return false;
6031 }
6032
6033 bi->dma = dma;
6034 return true;
6035}
6036
6037static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006038 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006039{
6040 struct page *page = bi->page;
6041 dma_addr_t page_dma = bi->page_dma;
6042 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6043
6044 if (page_dma)
6045 return true;
6046
6047 if (!page) {
6048 page = netdev_alloc_page(rx_ring->netdev);
6049 bi->page = page;
6050 if (unlikely(!page)) {
6051 rx_ring->rx_stats.alloc_failed++;
6052 return false;
6053 }
6054 }
6055
6056 page_dma = dma_map_page(rx_ring->dev, page,
6057 page_offset, PAGE_SIZE / 2,
6058 DMA_FROM_DEVICE);
6059
6060 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6061 rx_ring->rx_stats.alloc_failed++;
6062 return false;
6063 }
6064
6065 bi->page_dma = page_dma;
6066 bi->page_offset = page_offset;
6067 return true;
6068}
6069
Auke Kok9d5c8242008-01-24 02:22:38 -08006070/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006071 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006072 * @adapter: address of board private structure
6073 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006074void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006075{
Auke Kok9d5c8242008-01-24 02:22:38 -08006076 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006077 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006078 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006079
Alexander Duyck601369062011-08-26 07:44:05 +00006080 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006081 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006082 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006083
6084 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006085 if (!igb_alloc_mapped_skb(rx_ring, bi))
6086 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006087
Alexander Duyckc023cd82011-08-26 07:43:43 +00006088 /* Refresh the desc even if buffer_addrs didn't change
6089 * because each write-back erases this info. */
6090 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006091
Alexander Duyckc023cd82011-08-26 07:43:43 +00006092 if (!igb_alloc_mapped_page(rx_ring, bi))
6093 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006094
Alexander Duyckc023cd82011-08-26 07:43:43 +00006095 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006096
Alexander Duyckc023cd82011-08-26 07:43:43 +00006097 rx_desc++;
6098 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006099 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006100 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006101 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006102 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006103 i -= rx_ring->count;
6104 }
6105
6106 /* clear the hdr_addr for the next_to_use descriptor */
6107 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006108 }
6109
Alexander Duyckc023cd82011-08-26 07:43:43 +00006110 i += rx_ring->count;
6111
Auke Kok9d5c8242008-01-24 02:22:38 -08006112 if (rx_ring->next_to_use != i) {
6113 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006114
6115 /* Force memory writes to complete before letting h/w
6116 * know there are new descriptors to fetch. (Only
6117 * applicable for weak-ordered memory model archs,
6118 * such as IA-64). */
6119 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006120 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006121 }
6122}
6123
6124/**
6125 * igb_mii_ioctl -
6126 * @netdev:
6127 * @ifreq:
6128 * @cmd:
6129 **/
6130static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6131{
6132 struct igb_adapter *adapter = netdev_priv(netdev);
6133 struct mii_ioctl_data *data = if_mii(ifr);
6134
6135 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6136 return -EOPNOTSUPP;
6137
6138 switch (cmd) {
6139 case SIOCGMIIPHY:
6140 data->phy_id = adapter->hw.phy.addr;
6141 break;
6142 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006143 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6144 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006145 return -EIO;
6146 break;
6147 case SIOCSMIIREG:
6148 default:
6149 return -EOPNOTSUPP;
6150 }
6151 return 0;
6152}
6153
6154/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006155 * igb_hwtstamp_ioctl - control hardware time stamping
6156 * @netdev:
6157 * @ifreq:
6158 * @cmd:
6159 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006160 * Outgoing time stamping can be enabled and disabled. Play nice and
6161 * disable it when requested, although it shouldn't case any overhead
6162 * when no packet needs it. At most one packet in the queue may be
6163 * marked for time stamping, otherwise it would be impossible to tell
6164 * for sure to which packet the hardware time stamp belongs.
6165 *
6166 * Incoming time stamping has to be configured via the hardware
6167 * filters. Not all combinations are supported, in particular event
6168 * type has to be specified. Matching the kind of event packet is
6169 * not supported, with the exception of "all V2 events regardless of
6170 * level 2 or 4".
6171 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006172 **/
6173static int igb_hwtstamp_ioctl(struct net_device *netdev,
6174 struct ifreq *ifr, int cmd)
6175{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006176 struct igb_adapter *adapter = netdev_priv(netdev);
6177 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006178 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006179 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6180 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006181 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006182 bool is_l4 = false;
6183 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006184 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006185
6186 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6187 return -EFAULT;
6188
6189 /* reserved for future extensions */
6190 if (config.flags)
6191 return -EINVAL;
6192
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006193 switch (config.tx_type) {
6194 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006195 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006196 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006197 break;
6198 default:
6199 return -ERANGE;
6200 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006201
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006202 switch (config.rx_filter) {
6203 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006204 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006205 break;
6206 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6207 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6208 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6209 case HWTSTAMP_FILTER_ALL:
6210 /*
6211 * register TSYNCRXCFG must be set, therefore it is not
6212 * possible to time stamp both Sync and Delay_Req messages
6213 * => fall back to time stamping all packets
6214 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006215 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006216 config.rx_filter = HWTSTAMP_FILTER_ALL;
6217 break;
6218 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006219 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006220 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006221 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006222 break;
6223 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006224 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006225 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006226 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006227 break;
6228 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6229 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006230 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006231 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006232 is_l2 = true;
6233 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006234 config.rx_filter = HWTSTAMP_FILTER_SOME;
6235 break;
6236 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6237 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006238 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006239 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006240 is_l2 = true;
6241 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006242 config.rx_filter = HWTSTAMP_FILTER_SOME;
6243 break;
6244 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6245 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6246 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006247 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006248 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006249 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006250 break;
6251 default:
6252 return -ERANGE;
6253 }
6254
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006255 if (hw->mac.type == e1000_82575) {
6256 if (tsync_rx_ctl | tsync_tx_ctl)
6257 return -EINVAL;
6258 return 0;
6259 }
6260
Nick Nunley757b77e2010-03-26 11:36:47 +00006261 /*
6262 * Per-packet timestamping only works if all packets are
6263 * timestamped, so enable timestamping in all packets as
6264 * long as one rx filter was configured.
6265 */
6266 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6267 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6268 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6269 }
6270
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006271 /* enable/disable TX */
6272 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006273 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6274 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006275 wr32(E1000_TSYNCTXCTL, regval);
6276
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006277 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006278 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006279 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6280 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006281 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006282
6283 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006284 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6285
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006286 /* define ethertype filter for timestamped packets */
6287 if (is_l2)
6288 wr32(E1000_ETQF(3),
6289 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6290 E1000_ETQF_1588 | /* enable timestamping */
6291 ETH_P_1588)); /* 1588 eth protocol type */
6292 else
6293 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006294
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006295#define PTP_PORT 319
6296 /* L4 Queue Filter[3]: filter by destination port and protocol */
6297 if (is_l4) {
6298 u32 ftqf = (IPPROTO_UDP /* UDP */
6299 | E1000_FTQF_VF_BP /* VF not compared */
6300 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6301 | E1000_FTQF_MASK); /* mask all inputs */
6302 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006303
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006304 wr32(E1000_IMIR(3), htons(PTP_PORT));
6305 wr32(E1000_IMIREXT(3),
6306 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6307 if (hw->mac.type == e1000_82576) {
6308 /* enable source port check */
6309 wr32(E1000_SPQF(3), htons(PTP_PORT));
6310 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6311 }
6312 wr32(E1000_FTQF(3), ftqf);
6313 } else {
6314 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6315 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006316 wrfl();
6317
6318 adapter->hwtstamp_config = config;
6319
6320 /* clear TX/RX time stamp registers, just to be sure */
6321 regval = rd32(E1000_TXSTMPH);
6322 regval = rd32(E1000_RXSTMPH);
6323
6324 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6325 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006326}
6327
6328/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006329 * igb_ioctl -
6330 * @netdev:
6331 * @ifreq:
6332 * @cmd:
6333 **/
6334static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6335{
6336 switch (cmd) {
6337 case SIOCGMIIPHY:
6338 case SIOCGMIIREG:
6339 case SIOCSMIIREG:
6340 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006341 case SIOCSHWTSTAMP:
6342 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006343 default:
6344 return -EOPNOTSUPP;
6345 }
6346}
6347
Alexander Duyck009bc062009-07-23 18:08:35 +00006348s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6349{
6350 struct igb_adapter *adapter = hw->back;
6351 u16 cap_offset;
6352
Jon Masonbdaae042011-06-27 07:44:01 +00006353 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006354 if (!cap_offset)
6355 return -E1000_ERR_CONFIG;
6356
6357 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6358
6359 return 0;
6360}
6361
6362s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6363{
6364 struct igb_adapter *adapter = hw->back;
6365 u16 cap_offset;
6366
Jon Masonbdaae042011-06-27 07:44:01 +00006367 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006368 if (!cap_offset)
6369 return -E1000_ERR_CONFIG;
6370
6371 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6372
6373 return 0;
6374}
6375
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006376static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006377{
6378 struct igb_adapter *adapter = netdev_priv(netdev);
6379 struct e1000_hw *hw = &adapter->hw;
6380 u32 ctrl, rctl;
6381
6382 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006383
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006384 if (features & NETIF_F_HW_VLAN_RX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006385 /* enable VLAN tag insert/strip */
6386 ctrl = rd32(E1000_CTRL);
6387 ctrl |= E1000_CTRL_VME;
6388 wr32(E1000_CTRL, ctrl);
6389
Alexander Duyck51466232009-10-27 23:47:35 +00006390 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006391 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006392 rctl &= ~E1000_RCTL_CFIEN;
6393 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006394 } else {
6395 /* disable VLAN tag insert/strip */
6396 ctrl = rd32(E1000_CTRL);
6397 ctrl &= ~E1000_CTRL_VME;
6398 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006399 }
6400
Alexander Duycke1739522009-02-19 20:39:44 -08006401 igb_rlpml_set(adapter);
6402
Auke Kok9d5c8242008-01-24 02:22:38 -08006403 if (!test_bit(__IGB_DOWN, &adapter->state))
6404 igb_irq_enable(adapter);
6405}
6406
6407static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6408{
6409 struct igb_adapter *adapter = netdev_priv(netdev);
6410 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006411 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006412
Alexander Duyck51466232009-10-27 23:47:35 +00006413 /* attempt to add filter to vlvf array */
6414 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006415
Alexander Duyck51466232009-10-27 23:47:35 +00006416 /* add the filter since PF can receive vlans w/o entry in vlvf */
6417 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006418
6419 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006420}
6421
6422static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6423{
6424 struct igb_adapter *adapter = netdev_priv(netdev);
6425 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006426 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006427 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006428
6429 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006430
6431 if (!test_bit(__IGB_DOWN, &adapter->state))
6432 igb_irq_enable(adapter);
6433
Alexander Duyck51466232009-10-27 23:47:35 +00006434 /* remove vlan from VLVF table array */
6435 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006436
Alexander Duyck51466232009-10-27 23:47:35 +00006437 /* if vid was not present in VLVF just remove it from table */
6438 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006439 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006440
6441 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006442}
6443
6444static void igb_restore_vlan(struct igb_adapter *adapter)
6445{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006446 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006447
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006448 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6449 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006450}
6451
David Decotigny14ad2512011-04-27 18:32:43 +00006452int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006453{
Alexander Duyck090b1792009-10-27 23:51:55 +00006454 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006455 struct e1000_mac_info *mac = &adapter->hw.mac;
6456
6457 mac->autoneg = 0;
6458
David Decotigny14ad2512011-04-27 18:32:43 +00006459 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6460 * for the switch() below to work */
6461 if ((spd & 1) || (dplx & ~1))
6462 goto err_inval;
6463
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006464 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6465 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006466 spd != SPEED_1000 &&
6467 dplx != DUPLEX_FULL)
6468 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006469
David Decotigny14ad2512011-04-27 18:32:43 +00006470 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006471 case SPEED_10 + DUPLEX_HALF:
6472 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6473 break;
6474 case SPEED_10 + DUPLEX_FULL:
6475 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6476 break;
6477 case SPEED_100 + DUPLEX_HALF:
6478 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6479 break;
6480 case SPEED_100 + DUPLEX_FULL:
6481 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6482 break;
6483 case SPEED_1000 + DUPLEX_FULL:
6484 mac->autoneg = 1;
6485 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6486 break;
6487 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6488 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006489 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006490 }
6491 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006492
6493err_inval:
6494 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6495 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006496}
6497
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006498static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006499{
6500 struct net_device *netdev = pci_get_drvdata(pdev);
6501 struct igb_adapter *adapter = netdev_priv(netdev);
6502 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006503 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006504 u32 wufc = adapter->wol;
6505#ifdef CONFIG_PM
6506 int retval = 0;
6507#endif
6508
6509 netif_device_detach(netdev);
6510
Alexander Duycka88f10e2008-07-08 15:13:38 -07006511 if (netif_running(netdev))
6512 igb_close(netdev);
6513
Alexander Duyck047e0032009-10-27 15:49:27 +00006514 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006515
6516#ifdef CONFIG_PM
6517 retval = pci_save_state(pdev);
6518 if (retval)
6519 return retval;
6520#endif
6521
6522 status = rd32(E1000_STATUS);
6523 if (status & E1000_STATUS_LU)
6524 wufc &= ~E1000_WUFC_LNKC;
6525
6526 if (wufc) {
6527 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006528 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006529
6530 /* turn on all-multi mode if wake on multicast is enabled */
6531 if (wufc & E1000_WUFC_MC) {
6532 rctl = rd32(E1000_RCTL);
6533 rctl |= E1000_RCTL_MPE;
6534 wr32(E1000_RCTL, rctl);
6535 }
6536
6537 ctrl = rd32(E1000_CTRL);
6538 /* advertise wake from D3Cold */
6539 #define E1000_CTRL_ADVD3WUC 0x00100000
6540 /* phy power management enable */
6541 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6542 ctrl |= E1000_CTRL_ADVD3WUC;
6543 wr32(E1000_CTRL, ctrl);
6544
Auke Kok9d5c8242008-01-24 02:22:38 -08006545 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006546 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006547
6548 wr32(E1000_WUC, E1000_WUC_PME_EN);
6549 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006550 } else {
6551 wr32(E1000_WUC, 0);
6552 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006553 }
6554
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006555 *enable_wake = wufc || adapter->en_mng_pt;
6556 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006557 igb_power_down_link(adapter);
6558 else
6559 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006560
6561 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6562 * would have already happened in close and is redundant. */
6563 igb_release_hw_control(adapter);
6564
6565 pci_disable_device(pdev);
6566
Auke Kok9d5c8242008-01-24 02:22:38 -08006567 return 0;
6568}
6569
6570#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006571static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6572{
6573 int retval;
6574 bool wake;
6575
6576 retval = __igb_shutdown(pdev, &wake);
6577 if (retval)
6578 return retval;
6579
6580 if (wake) {
6581 pci_prepare_to_sleep(pdev);
6582 } else {
6583 pci_wake_from_d3(pdev, false);
6584 pci_set_power_state(pdev, PCI_D3hot);
6585 }
6586
6587 return 0;
6588}
6589
Auke Kok9d5c8242008-01-24 02:22:38 -08006590static int igb_resume(struct pci_dev *pdev)
6591{
6592 struct net_device *netdev = pci_get_drvdata(pdev);
6593 struct igb_adapter *adapter = netdev_priv(netdev);
6594 struct e1000_hw *hw = &adapter->hw;
6595 u32 err;
6596
6597 pci_set_power_state(pdev, PCI_D0);
6598 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006599 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006600
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006601 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006602 if (err) {
6603 dev_err(&pdev->dev,
6604 "igb: Cannot enable PCI device from suspend\n");
6605 return err;
6606 }
6607 pci_set_master(pdev);
6608
6609 pci_enable_wake(pdev, PCI_D3hot, 0);
6610 pci_enable_wake(pdev, PCI_D3cold, 0);
6611
Alexander Duyck047e0032009-10-27 15:49:27 +00006612 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006613 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6614 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006615 }
6616
Auke Kok9d5c8242008-01-24 02:22:38 -08006617 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006618
6619 /* let the f/w know that the h/w is now under the control of the
6620 * driver. */
6621 igb_get_hw_control(adapter);
6622
Auke Kok9d5c8242008-01-24 02:22:38 -08006623 wr32(E1000_WUS, ~0);
6624
Alexander Duycka88f10e2008-07-08 15:13:38 -07006625 if (netif_running(netdev)) {
6626 err = igb_open(netdev);
6627 if (err)
6628 return err;
6629 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006630
6631 netif_device_attach(netdev);
6632
Auke Kok9d5c8242008-01-24 02:22:38 -08006633 return 0;
6634}
6635#endif
6636
6637static void igb_shutdown(struct pci_dev *pdev)
6638{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006639 bool wake;
6640
6641 __igb_shutdown(pdev, &wake);
6642
6643 if (system_state == SYSTEM_POWER_OFF) {
6644 pci_wake_from_d3(pdev, wake);
6645 pci_set_power_state(pdev, PCI_D3hot);
6646 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006647}
6648
6649#ifdef CONFIG_NET_POLL_CONTROLLER
6650/*
6651 * Polling 'interrupt' - used by things like netconsole to send skbs
6652 * without having to re-enable interrupts. It's not called while
6653 * the interrupt routine is executing.
6654 */
6655static void igb_netpoll(struct net_device *netdev)
6656{
6657 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006658 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006659 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006660
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006661 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006662 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006663 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006664 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006665 return;
6666 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006667
Alexander Duyck047e0032009-10-27 15:49:27 +00006668 for (i = 0; i < adapter->num_q_vectors; i++) {
6669 struct igb_q_vector *q_vector = adapter->q_vector[i];
6670 wr32(E1000_EIMC, q_vector->eims_value);
6671 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006672 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006673}
6674#endif /* CONFIG_NET_POLL_CONTROLLER */
6675
6676/**
6677 * igb_io_error_detected - called when PCI error is detected
6678 * @pdev: Pointer to PCI device
6679 * @state: The current pci connection state
6680 *
6681 * This function is called after a PCI bus error affecting
6682 * this device has been detected.
6683 */
6684static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6685 pci_channel_state_t state)
6686{
6687 struct net_device *netdev = pci_get_drvdata(pdev);
6688 struct igb_adapter *adapter = netdev_priv(netdev);
6689
6690 netif_device_detach(netdev);
6691
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006692 if (state == pci_channel_io_perm_failure)
6693 return PCI_ERS_RESULT_DISCONNECT;
6694
Auke Kok9d5c8242008-01-24 02:22:38 -08006695 if (netif_running(netdev))
6696 igb_down(adapter);
6697 pci_disable_device(pdev);
6698
6699 /* Request a slot slot reset. */
6700 return PCI_ERS_RESULT_NEED_RESET;
6701}
6702
6703/**
6704 * igb_io_slot_reset - called after the pci bus has been reset.
6705 * @pdev: Pointer to PCI device
6706 *
6707 * Restart the card from scratch, as if from a cold-boot. Implementation
6708 * resembles the first-half of the igb_resume routine.
6709 */
6710static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6711{
6712 struct net_device *netdev = pci_get_drvdata(pdev);
6713 struct igb_adapter *adapter = netdev_priv(netdev);
6714 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006715 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006716 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006717
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006718 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006719 dev_err(&pdev->dev,
6720 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006721 result = PCI_ERS_RESULT_DISCONNECT;
6722 } else {
6723 pci_set_master(pdev);
6724 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006725 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006726
6727 pci_enable_wake(pdev, PCI_D3hot, 0);
6728 pci_enable_wake(pdev, PCI_D3cold, 0);
6729
6730 igb_reset(adapter);
6731 wr32(E1000_WUS, ~0);
6732 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006733 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006734
Jeff Kirsherea943d42008-12-11 20:34:19 -08006735 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6736 if (err) {
6737 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6738 "failed 0x%0x\n", err);
6739 /* non-fatal, continue */
6740 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006741
Alexander Duyck40a914f2008-11-27 00:24:37 -08006742 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006743}
6744
6745/**
6746 * igb_io_resume - called when traffic can start flowing again.
6747 * @pdev: Pointer to PCI device
6748 *
6749 * This callback is called when the error recovery driver tells us that
6750 * its OK to resume normal operation. Implementation resembles the
6751 * second-half of the igb_resume routine.
6752 */
6753static void igb_io_resume(struct pci_dev *pdev)
6754{
6755 struct net_device *netdev = pci_get_drvdata(pdev);
6756 struct igb_adapter *adapter = netdev_priv(netdev);
6757
Auke Kok9d5c8242008-01-24 02:22:38 -08006758 if (netif_running(netdev)) {
6759 if (igb_up(adapter)) {
6760 dev_err(&pdev->dev, "igb_up failed after reset\n");
6761 return;
6762 }
6763 }
6764
6765 netif_device_attach(netdev);
6766
6767 /* let the f/w know that the h/w is now under the control of the
6768 * driver. */
6769 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006770}
6771
Alexander Duyck26ad9172009-10-05 06:32:49 +00006772static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6773 u8 qsel)
6774{
6775 u32 rar_low, rar_high;
6776 struct e1000_hw *hw = &adapter->hw;
6777
6778 /* HW expects these in little endian so we reverse the byte order
6779 * from network order (big endian) to little endian
6780 */
6781 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6782 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6783 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6784
6785 /* Indicate to hardware the Address is Valid. */
6786 rar_high |= E1000_RAH_AV;
6787
6788 if (hw->mac.type == e1000_82575)
6789 rar_high |= E1000_RAH_POOL_1 * qsel;
6790 else
6791 rar_high |= E1000_RAH_POOL_1 << qsel;
6792
6793 wr32(E1000_RAL(index), rar_low);
6794 wrfl();
6795 wr32(E1000_RAH(index), rar_high);
6796 wrfl();
6797}
6798
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006799static int igb_set_vf_mac(struct igb_adapter *adapter,
6800 int vf, unsigned char *mac_addr)
6801{
6802 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006803 /* VF MAC addresses start at end of receive addresses and moves
6804 * torwards the first, as a result a collision should not be possible */
6805 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006806
Alexander Duyck37680112009-02-19 20:40:30 -08006807 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006808
Alexander Duyck26ad9172009-10-05 06:32:49 +00006809 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006810
6811 return 0;
6812}
6813
Williams, Mitch A8151d292010-02-10 01:44:24 +00006814static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6815{
6816 struct igb_adapter *adapter = netdev_priv(netdev);
6817 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6818 return -EINVAL;
6819 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6820 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6821 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6822 " change effective.");
6823 if (test_bit(__IGB_DOWN, &adapter->state)) {
6824 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6825 " but the PF device is not up.\n");
6826 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6827 " attempting to use the VF device.\n");
6828 }
6829 return igb_set_vf_mac(adapter, vf, mac);
6830}
6831
Lior Levy17dc5662011-02-08 02:28:46 +00006832static int igb_link_mbps(int internal_link_speed)
6833{
6834 switch (internal_link_speed) {
6835 case SPEED_100:
6836 return 100;
6837 case SPEED_1000:
6838 return 1000;
6839 default:
6840 return 0;
6841 }
6842}
6843
6844static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6845 int link_speed)
6846{
6847 int rf_dec, rf_int;
6848 u32 bcnrc_val;
6849
6850 if (tx_rate != 0) {
6851 /* Calculate the rate factor values to set */
6852 rf_int = link_speed / tx_rate;
6853 rf_dec = (link_speed - (rf_int * tx_rate));
6854 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6855
6856 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6857 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6858 E1000_RTTBCNRC_RF_INT_MASK);
6859 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6860 } else {
6861 bcnrc_val = 0;
6862 }
6863
6864 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6865 wr32(E1000_RTTBCNRC, bcnrc_val);
6866}
6867
6868static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6869{
6870 int actual_link_speed, i;
6871 bool reset_rate = false;
6872
6873 /* VF TX rate limit was not set or not supported */
6874 if ((adapter->vf_rate_link_speed == 0) ||
6875 (adapter->hw.mac.type != e1000_82576))
6876 return;
6877
6878 actual_link_speed = igb_link_mbps(adapter->link_speed);
6879 if (actual_link_speed != adapter->vf_rate_link_speed) {
6880 reset_rate = true;
6881 adapter->vf_rate_link_speed = 0;
6882 dev_info(&adapter->pdev->dev,
6883 "Link speed has been changed. VF Transmit "
6884 "rate is disabled\n");
6885 }
6886
6887 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6888 if (reset_rate)
6889 adapter->vf_data[i].tx_rate = 0;
6890
6891 igb_set_vf_rate_limit(&adapter->hw, i,
6892 adapter->vf_data[i].tx_rate,
6893 actual_link_speed);
6894 }
6895}
6896
Williams, Mitch A8151d292010-02-10 01:44:24 +00006897static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6898{
Lior Levy17dc5662011-02-08 02:28:46 +00006899 struct igb_adapter *adapter = netdev_priv(netdev);
6900 struct e1000_hw *hw = &adapter->hw;
6901 int actual_link_speed;
6902
6903 if (hw->mac.type != e1000_82576)
6904 return -EOPNOTSUPP;
6905
6906 actual_link_speed = igb_link_mbps(adapter->link_speed);
6907 if ((vf >= adapter->vfs_allocated_count) ||
6908 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6909 (tx_rate < 0) || (tx_rate > actual_link_speed))
6910 return -EINVAL;
6911
6912 adapter->vf_rate_link_speed = actual_link_speed;
6913 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6914 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6915
6916 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006917}
6918
6919static int igb_ndo_get_vf_config(struct net_device *netdev,
6920 int vf, struct ifla_vf_info *ivi)
6921{
6922 struct igb_adapter *adapter = netdev_priv(netdev);
6923 if (vf >= adapter->vfs_allocated_count)
6924 return -EINVAL;
6925 ivi->vf = vf;
6926 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006927 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006928 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6929 ivi->qos = adapter->vf_data[vf].pf_qos;
6930 return 0;
6931}
6932
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006933static void igb_vmm_control(struct igb_adapter *adapter)
6934{
6935 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006936 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006937
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006938 switch (hw->mac.type) {
6939 case e1000_82575:
6940 default:
6941 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006942 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006943 case e1000_82576:
6944 /* notify HW that the MAC is adding vlan tags */
6945 reg = rd32(E1000_DTXCTL);
6946 reg |= E1000_DTXCTL_VLAN_ADDED;
6947 wr32(E1000_DTXCTL, reg);
6948 case e1000_82580:
6949 /* enable replication vlan tag stripping */
6950 reg = rd32(E1000_RPLOLR);
6951 reg |= E1000_RPLOLR_STRVLAN;
6952 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006953 case e1000_i350:
6954 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006955 break;
6956 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006957
Alexander Duyckd4960302009-10-27 15:53:45 +00006958 if (adapter->vfs_allocated_count) {
6959 igb_vmdq_set_loopback_pf(hw, true);
6960 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006961 igb_vmdq_set_anti_spoofing_pf(hw, true,
6962 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006963 } else {
6964 igb_vmdq_set_loopback_pf(hw, false);
6965 igb_vmdq_set_replication_pf(hw, false);
6966 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006967}
6968
Auke Kok9d5c8242008-01-24 02:22:38 -08006969/* igb_main.c */