blob: 10670f944115c61a3254fc1899f2357ded32f5d3 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000048#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080052#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070054#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070055#include <linux/dca.h>
56#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080057#include "igb.h"
58
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define MAJ 3
60#define MIN 0
61#define BUILD 6
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000063__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080064char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000068static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080069
Auke Kok9d5c8242008-01-24 02:22:38 -080070static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000074static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000111static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000114static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000124static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000133static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000137static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700140static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700142static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000143static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000148static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000164static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800165
Auke Kok9d5c8242008-01-24 02:22:38 -0800166#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000167static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800168static int igb_resume(struct pci_dev *);
169#endif
170static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700171#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700172static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
173static struct notifier_block dca_notifier = {
174 .notifier_call = igb_notify_dca,
175 .next = NULL,
176 .priority = 0
177};
178#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void igb_netpoll(struct net_device *);
182#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800183#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000184static unsigned int max_vfs = 0;
185module_param(max_vfs, uint, 0);
186MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
187 "per physical function");
188#endif /* CONFIG_PCI_IOV */
189
Auke Kok9d5c8242008-01-24 02:22:38 -0800190static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
191 pci_channel_state_t);
192static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
193static void igb_io_resume(struct pci_dev *);
194
195static struct pci_error_handlers igb_err_handler = {
196 .error_detected = igb_io_error_detected,
197 .slot_reset = igb_io_slot_reset,
198 .resume = igb_io_resume,
199};
200
201
202static struct pci_driver igb_driver = {
203 .name = igb_driver_name,
204 .id_table = igb_pci_tbl,
205 .probe = igb_probe,
206 .remove = __devexit_p(igb_remove),
207#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300208 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800209 .suspend = igb_suspend,
210 .resume = igb_resume,
211#endif
212 .shutdown = igb_shutdown,
213 .err_handler = &igb_err_handler
214};
215
216MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
217MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
218MODULE_LICENSE("GPL");
219MODULE_VERSION(DRV_VERSION);
220
Taku Izumic97ec422010-04-27 14:39:30 +0000221struct igb_reg_info {
222 u32 ofs;
223 char *name;
224};
225
226static const struct igb_reg_info igb_reg_info_tbl[] = {
227
228 /* General Registers */
229 {E1000_CTRL, "CTRL"},
230 {E1000_STATUS, "STATUS"},
231 {E1000_CTRL_EXT, "CTRL_EXT"},
232
233 /* Interrupt Registers */
234 {E1000_ICR, "ICR"},
235
236 /* RX Registers */
237 {E1000_RCTL, "RCTL"},
238 {E1000_RDLEN(0), "RDLEN"},
239 {E1000_RDH(0), "RDH"},
240 {E1000_RDT(0), "RDT"},
241 {E1000_RXDCTL(0), "RXDCTL"},
242 {E1000_RDBAL(0), "RDBAL"},
243 {E1000_RDBAH(0), "RDBAH"},
244
245 /* TX Registers */
246 {E1000_TCTL, "TCTL"},
247 {E1000_TDBAL(0), "TDBAL"},
248 {E1000_TDBAH(0), "TDBAH"},
249 {E1000_TDLEN(0), "TDLEN"},
250 {E1000_TDH(0), "TDH"},
251 {E1000_TDT(0), "TDT"},
252 {E1000_TXDCTL(0), "TXDCTL"},
253 {E1000_TDFH, "TDFH"},
254 {E1000_TDFT, "TDFT"},
255 {E1000_TDFHS, "TDFHS"},
256 {E1000_TDFPC, "TDFPC"},
257
258 /* List Terminator */
259 {}
260};
261
262/*
263 * igb_regdump - register printout routine
264 */
265static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
266{
267 int n = 0;
268 char rname[16];
269 u32 regs[8];
270
271 switch (reginfo->ofs) {
272 case E1000_RDLEN(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDLEN(n));
275 break;
276 case E1000_RDH(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RDH(n));
279 break;
280 case E1000_RDT(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDT(n));
283 break;
284 case E1000_RXDCTL(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RXDCTL(n));
287 break;
288 case E1000_RDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_RDBAL(n));
291 break;
292 case E1000_RDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDBAH(n));
295 break;
296 case E1000_TDBAL(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDBAL(n));
299 break;
300 case E1000_TDBAH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDBAH(n));
303 break;
304 case E1000_TDLEN(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDLEN(n));
307 break;
308 case E1000_TDH(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TDH(n));
311 break;
312 case E1000_TDT(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_TDT(n));
315 break;
316 case E1000_TXDCTL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_TXDCTL(n));
319 break;
320 default:
321 printk(KERN_INFO "%-15s %08x\n",
322 reginfo->name, rd32(reginfo->ofs));
323 return;
324 }
325
326 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
327 printk(KERN_INFO "%-15s ", rname);
328 for (n = 0; n < 4; n++)
329 printk(KERN_CONT "%08x ", regs[n]);
330 printk(KERN_CONT "\n");
331}
332
333/*
334 * igb_dump - Print registers, tx-rings and rx-rings
335 */
336static void igb_dump(struct igb_adapter *adapter)
337{
338 struct net_device *netdev = adapter->netdev;
339 struct e1000_hw *hw = &adapter->hw;
340 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000341 struct igb_ring *tx_ring;
342 union e1000_adv_tx_desc *tx_desc;
343 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000344 struct igb_ring *rx_ring;
345 union e1000_adv_rx_desc *rx_desc;
346 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000347 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000348
349 if (!netif_msg_hw(adapter))
350 return;
351
352 /* Print netdevice Info */
353 if (netdev) {
354 dev_info(&adapter->pdev->dev, "Net device Info\n");
355 printk(KERN_INFO "Device Name state "
356 "trans_start last_rx\n");
357 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
358 netdev->name,
359 netdev->state,
360 netdev->trans_start,
361 netdev->last_rx);
362 }
363
364 /* Print Registers */
365 dev_info(&adapter->pdev->dev, "Register Dump\n");
366 printk(KERN_INFO " Register Name Value\n");
367 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
368 reginfo->name; reginfo++) {
369 igb_regdump(hw, reginfo);
370 }
371
372 /* Print TX Ring Summary */
373 if (!netdev || !netif_running(netdev))
374 goto exit;
375
376 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
377 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
378 " leng ntw timestamp\n");
379 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000380 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000381 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000382 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyck8542db02011-08-26 07:44:43 +0000383 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumic97ec422010-04-27 14:39:30 +0000384 n, tx_ring->next_to_use, tx_ring->next_to_clean,
385 (u64)buffer_info->dma,
386 buffer_info->length,
387 buffer_info->next_to_watch,
388 (u64)buffer_info->time_stamp);
389 }
390
391 /* Print TX Rings */
392 if (!netif_msg_tx_done(adapter))
393 goto rx_ring_summary;
394
395 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
396
397 /* Transmit Descriptor Formats
398 *
399 * Advanced Transmit Descriptor
400 * +--------------------------------------------------------------+
401 * 0 | Buffer Address [63:0] |
402 * +--------------------------------------------------------------+
403 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
404 * +--------------------------------------------------------------+
405 * 63 46 45 40 39 38 36 35 32 31 24 15 0
406 */
407
408 for (n = 0; n < adapter->num_tx_queues; n++) {
409 tx_ring = adapter->tx_ring[n];
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
412 printk(KERN_INFO "------------------------------------\n");
413 printk(KERN_INFO "T [desc] [address 63:0 ] "
414 "[PlPOCIStDDM Ln] [bi->dma ] "
415 "leng ntw timestamp bi->skb\n");
416
417 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000418 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000419 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000420 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000421 u0 = (struct my_u0 *)tx_desc;
422 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
Alexander Duyck8542db02011-08-26 07:44:43 +0000423 " %04X %p %016llX %p", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000424 le64_to_cpu(u0->a),
425 le64_to_cpu(u0->b),
426 (u64)buffer_info->dma,
427 buffer_info->length,
428 buffer_info->next_to_watch,
429 (u64)buffer_info->time_stamp,
430 buffer_info->skb);
431 if (i == tx_ring->next_to_use &&
432 i == tx_ring->next_to_clean)
433 printk(KERN_CONT " NTC/U\n");
434 else if (i == tx_ring->next_to_use)
435 printk(KERN_CONT " NTU\n");
436 else if (i == tx_ring->next_to_clean)
437 printk(KERN_CONT " NTC\n");
438 else
439 printk(KERN_CONT "\n");
440
441 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
442 print_hex_dump(KERN_INFO, "",
443 DUMP_PREFIX_ADDRESS,
444 16, 1, phys_to_virt(buffer_info->dma),
445 buffer_info->length, true);
446 }
447 }
448
449 /* Print RX Rings Summary */
450rx_ring_summary:
451 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
452 printk(KERN_INFO "Queue [NTU] [NTC]\n");
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO " %5d %5X %5X\n", n,
456 rx_ring->next_to_use, rx_ring->next_to_clean);
457 }
458
459 /* Print RX Rings */
460 if (!netif_msg_rx_status(adapter))
461 goto exit;
462
463 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
464
465 /* Advanced Receive Descriptor (Read) Format
466 * 63 1 0
467 * +-----------------------------------------------------+
468 * 0 | Packet Buffer Address [63:1] |A0/NSE|
469 * +----------------------------------------------+------+
470 * 8 | Header Buffer Address [63:1] | DD |
471 * +-----------------------------------------------------+
472 *
473 *
474 * Advanced Receive Descriptor (Write-Back) Format
475 *
476 * 63 48 47 32 31 30 21 20 17 16 4 3 0
477 * +------------------------------------------------------+
478 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
479 * | Checksum Ident | | | | Type | Type |
480 * +------------------------------------------------------+
481 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
482 * +------------------------------------------------------+
483 * 63 48 47 32 31 20 19 0
484 */
485
486 for (n = 0; n < adapter->num_rx_queues; n++) {
487 rx_ring = adapter->rx_ring[n];
488 printk(KERN_INFO "------------------------------------\n");
489 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
490 printk(KERN_INFO "------------------------------------\n");
491 printk(KERN_INFO "R [desc] [ PktBuf A0] "
492 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
493 "<-- Adv Rx Read format\n");
494 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
495 "[vl er S cks ln] ---------------- [bi->skb] "
496 "<-- Adv Rx Write-Back format\n");
497
498 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000499 struct igb_rx_buffer *buffer_info;
500 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000501 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000502 u0 = (struct my_u0 *)rx_desc;
503 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
504 if (staterr & E1000_RXD_STAT_DD) {
505 /* Descriptor Done */
506 printk(KERN_INFO "RWB[0x%03X] %016llX "
507 "%016llX ---------------- %p", i,
508 le64_to_cpu(u0->a),
509 le64_to_cpu(u0->b),
510 buffer_info->skb);
511 } else {
512 printk(KERN_INFO "R [0x%03X] %016llX "
513 "%016llX %016llX %p", i,
514 le64_to_cpu(u0->a),
515 le64_to_cpu(u0->b),
516 (u64)buffer_info->dma,
517 buffer_info->skb);
518
519 if (netif_msg_pktdata(adapter)) {
520 print_hex_dump(KERN_INFO, "",
521 DUMP_PREFIX_ADDRESS,
522 16, 1,
523 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000524 IGB_RX_HDR_LEN, true);
525 print_hex_dump(KERN_INFO, "",
526 DUMP_PREFIX_ADDRESS,
527 16, 1,
528 phys_to_virt(
529 buffer_info->page_dma +
530 buffer_info->page_offset),
531 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000532 }
533 }
534
535 if (i == rx_ring->next_to_use)
536 printk(KERN_CONT " NTU\n");
537 else if (i == rx_ring->next_to_clean)
538 printk(KERN_CONT " NTC\n");
539 else
540 printk(KERN_CONT "\n");
541
542 }
543 }
544
545exit:
546 return;
547}
548
549
Patrick Ohly38c845c2009-02-12 05:03:41 +0000550/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000551 * igb_read_clock - read raw cycle counter (to be used by time counter)
552 */
553static cycle_t igb_read_clock(const struct cyclecounter *tc)
554{
555 struct igb_adapter *adapter =
556 container_of(tc, struct igb_adapter, cycles);
557 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000558 u64 stamp = 0;
559 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000560
Alexander Duyck55cac242009-11-19 12:42:21 +0000561 /*
562 * The timestamp latches on lowest register read. For the 82580
563 * the lowest register is SYSTIMR instead of SYSTIML. However we never
564 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
565 */
566 if (hw->mac.type == e1000_82580) {
567 stamp = rd32(E1000_SYSTIMR) >> 8;
568 shift = IGB_82580_TSYNC_SHIFT;
569 }
570
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000571 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
572 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000573 return stamp;
574}
575
Auke Kok9d5c8242008-01-24 02:22:38 -0800576/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000577 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800578 * used by hardware layer to print debugging information
579 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000580struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800581{
582 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000583 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800584}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000585
586/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800587 * igb_init_module - Driver Registration Routine
588 *
589 * igb_init_module is the first routine called when the driver is
590 * loaded. All it does is register with the PCI subsystem.
591 **/
592static int __init igb_init_module(void)
593{
594 int ret;
595 printk(KERN_INFO "%s - version %s\n",
596 igb_driver_string, igb_driver_version);
597
598 printk(KERN_INFO "%s\n", igb_copyright);
599
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700600#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700601 dca_register_notify(&dca_notifier);
602#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800603 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800604 return ret;
605}
606
607module_init(igb_init_module);
608
609/**
610 * igb_exit_module - Driver Exit Cleanup Routine
611 *
612 * igb_exit_module is called just before the driver is removed
613 * from memory.
614 **/
615static void __exit igb_exit_module(void)
616{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700617#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700618 dca_unregister_notify(&dca_notifier);
619#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800620 pci_unregister_driver(&igb_driver);
621}
622
623module_exit(igb_exit_module);
624
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800625#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
626/**
627 * igb_cache_ring_register - Descriptor ring to register mapping
628 * @adapter: board private structure to initialize
629 *
630 * Once we know the feature-set enabled for the device, we'll cache
631 * the register offset the descriptor ring is assigned to.
632 **/
633static void igb_cache_ring_register(struct igb_adapter *adapter)
634{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000635 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000636 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800637
638 switch (adapter->hw.mac.type) {
639 case e1000_82576:
640 /* The queues are allocated for virtualization such that VF 0
641 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
642 * In order to avoid collision we start at the first free queue
643 * and continue consuming queues in the same sequence
644 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000646 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000647 adapter->rx_ring[i]->reg_idx = rbase_offset +
648 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000649 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800650 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000651 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000652 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000656 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000657 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800658 break;
659 }
660}
661
Alexander Duyck047e0032009-10-27 15:49:27 +0000662static void igb_free_queues(struct igb_adapter *adapter)
663{
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000665
Alexander Duyck3025a442010-02-17 01:02:39 +0000666 for (i = 0; i < adapter->num_tx_queues; i++) {
667 kfree(adapter->tx_ring[i]);
668 adapter->tx_ring[i] = NULL;
669 }
670 for (i = 0; i < adapter->num_rx_queues; i++) {
671 kfree(adapter->rx_ring[i]);
672 adapter->rx_ring[i] = NULL;
673 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000674 adapter->num_rx_queues = 0;
675 adapter->num_tx_queues = 0;
676}
677
Auke Kok9d5c8242008-01-24 02:22:38 -0800678/**
679 * igb_alloc_queues - Allocate memory for all rings
680 * @adapter: board private structure to initialize
681 *
682 * We allocate one ring per queue at run-time since we don't know the
683 * number of queues at compile-time.
684 **/
685static int igb_alloc_queues(struct igb_adapter *adapter)
686{
Alexander Duyck3025a442010-02-17 01:02:39 +0000687 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800688 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000689 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800690
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700691 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000692 if (orig_node == -1) {
693 int cur_node = next_online_node(adapter->node);
694 if (cur_node == MAX_NUMNODES)
695 cur_node = first_online_node;
696 adapter->node = cur_node;
697 }
698 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
699 adapter->node);
700 if (!ring)
701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000702 if (!ring)
703 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800704 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700705 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000706 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000707 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000708 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000709 /* For 82575, context index must be unique per ring. */
710 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000711 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000712 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700713 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000714 /* Restore the adapter's original node */
715 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000716
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000718 if (orig_node == -1) {
719 int cur_node = next_online_node(adapter->node);
720 if (cur_node == MAX_NUMNODES)
721 cur_node = first_online_node;
722 adapter->node = cur_node;
723 }
724 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
725 adapter->node);
726 if (!ring)
727 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000728 if (!ring)
729 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800730 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700731 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000732 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000733 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000734 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000735 /* set flag indicating ring supports SCTP checksum offload */
736 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000737 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000738 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800739 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000740 /* Restore the adapter's original node */
741 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800742
743 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000744
Auke Kok9d5c8242008-01-24 02:22:38 -0800745 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800746
Alexander Duyck047e0032009-10-27 15:49:27 +0000747err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000748 /* Restore the adapter's original node */
749 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000750 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700751
Alexander Duyck047e0032009-10-27 15:49:27 +0000752 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700753}
754
Alexander Duyck4be000c2011-08-26 07:45:52 +0000755/**
756 * igb_write_ivar - configure ivar for given MSI-X vector
757 * @hw: pointer to the HW structure
758 * @msix_vector: vector number we are allocating to a given ring
759 * @index: row index of IVAR register to write within IVAR table
760 * @offset: column offset of in IVAR, should be multiple of 8
761 *
762 * This function is intended to handle the writing of the IVAR register
763 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
764 * each containing an cause allocation for an Rx and Tx ring, and a
765 * variable number of rows depending on the number of queues supported.
766 **/
767static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
768 int index, int offset)
769{
770 u32 ivar = array_rd32(E1000_IVAR0, index);
771
772 /* clear any bits that are currently set */
773 ivar &= ~((u32)0xFF << offset);
774
775 /* write vector and valid bit */
776 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
777
778 array_wr32(E1000_IVAR0, index, ivar);
779}
780
Auke Kok9d5c8242008-01-24 02:22:38 -0800781#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000782static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800783{
Alexander Duyck047e0032009-10-27 15:49:27 +0000784 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800785 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000786 int rx_queue = IGB_N0_QUEUE;
787 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000788 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000789
Alexander Duyck0ba82992011-08-26 07:45:47 +0000790 if (q_vector->rx.ring)
791 rx_queue = q_vector->rx.ring->reg_idx;
792 if (q_vector->tx.ring)
793 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700794
795 switch (hw->mac.type) {
796 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800797 /* The 82575 assigns vectors using a bitmask, which matches the
798 bitmask for the EICR/EIMS/EIMC registers. To assign one
799 or more queues to a vector, we write the appropriate bits
800 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000801 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800802 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000803 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800804 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000805 if (!adapter->msix_entries && msix_vector == 0)
806 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800807 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000808 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700809 break;
810 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000811 /*
812 * 82576 uses a table that essentially consists of 2 columns
813 * with 8 rows. The ordering is column-major so we use the
814 * lower 3 bits as the row index, and the 4th bit as the
815 * column offset.
816 */
817 if (rx_queue > IGB_N0_QUEUE)
818 igb_write_ivar(hw, msix_vector,
819 rx_queue & 0x7,
820 (rx_queue & 0x8) << 1);
821 if (tx_queue > IGB_N0_QUEUE)
822 igb_write_ivar(hw, msix_vector,
823 tx_queue & 0x7,
824 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000825 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700826 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000827 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000828 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000829 /*
830 * On 82580 and newer adapters the scheme is similar to 82576
831 * however instead of ordering column-major we have things
832 * ordered row-major. So we traverse the table by using
833 * bit 0 as the column offset, and the remaining bits as the
834 * row index.
835 */
836 if (rx_queue > IGB_N0_QUEUE)
837 igb_write_ivar(hw, msix_vector,
838 rx_queue >> 1,
839 (rx_queue & 0x1) << 4);
840 if (tx_queue > IGB_N0_QUEUE)
841 igb_write_ivar(hw, msix_vector,
842 tx_queue >> 1,
843 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000844 q_vector->eims_value = 1 << msix_vector;
845 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700846 default:
847 BUG();
848 break;
849 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000850
851 /* add q_vector eims value to global eims_enable_mask */
852 adapter->eims_enable_mask |= q_vector->eims_value;
853
854 /* configure q_vector to set itr on first interrupt */
855 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800856}
857
858/**
859 * igb_configure_msix - Configure MSI-X hardware
860 *
861 * igb_configure_msix sets up the hardware to properly
862 * generate MSI-X interrupts.
863 **/
864static void igb_configure_msix(struct igb_adapter *adapter)
865{
866 u32 tmp;
867 int i, vector = 0;
868 struct e1000_hw *hw = &adapter->hw;
869
870 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800871
872 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700873 switch (hw->mac.type) {
874 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800875 tmp = rd32(E1000_CTRL_EXT);
876 /* enable MSI-X PBA support*/
877 tmp |= E1000_CTRL_EXT_PBA_CLR;
878
879 /* Auto-Mask interrupts upon ICR read. */
880 tmp |= E1000_CTRL_EXT_EIAME;
881 tmp |= E1000_CTRL_EXT_IRCA;
882
883 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000884
885 /* enable msix_other interrupt */
886 array_wr32(E1000_MSIXBM(0), vector++,
887 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700888 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800889
Alexander Duyck2d064c02008-07-08 15:10:12 -0700890 break;
891
892 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000893 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000894 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000895 /* Turn on MSI-X capability first, or our settings
896 * won't stick. And it will take days to debug. */
897 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
898 E1000_GPIE_PBA | E1000_GPIE_EIAME |
899 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700900
Alexander Duyck047e0032009-10-27 15:49:27 +0000901 /* enable msix_other interrupt */
902 adapter->eims_other = 1 << vector;
903 tmp = (vector++ | E1000_IVAR_VALID) << 8;
904
905 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700906 break;
907 default:
908 /* do nothing, since nothing else supports MSI-X */
909 break;
910 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000911
912 adapter->eims_enable_mask |= adapter->eims_other;
913
Alexander Duyck26b39272010-02-17 01:00:41 +0000914 for (i = 0; i < adapter->num_q_vectors; i++)
915 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000916
Auke Kok9d5c8242008-01-24 02:22:38 -0800917 wrfl();
918}
919
920/**
921 * igb_request_msix - Initialize MSI-X interrupts
922 *
923 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
924 * kernel.
925 **/
926static int igb_request_msix(struct igb_adapter *adapter)
927{
928 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000929 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800930 int i, err = 0, vector = 0;
931
Auke Kok9d5c8242008-01-24 02:22:38 -0800932 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800933 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800934 if (err)
935 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000936 vector++;
937
938 for (i = 0; i < adapter->num_q_vectors; i++) {
939 struct igb_q_vector *q_vector = adapter->q_vector[i];
940
941 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
942
Alexander Duyck0ba82992011-08-26 07:45:47 +0000943 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000944 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000945 q_vector->rx.ring->queue_index);
946 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000947 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000948 q_vector->tx.ring->queue_index);
949 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000950 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000951 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000952 else
953 sprintf(q_vector->name, "%s-unused", netdev->name);
954
955 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800956 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 q_vector);
958 if (err)
959 goto out;
960 vector++;
961 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800962
Auke Kok9d5c8242008-01-24 02:22:38 -0800963 igb_configure_msix(adapter);
964 return 0;
965out:
966 return err;
967}
968
969static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
970{
971 if (adapter->msix_entries) {
972 pci_disable_msix(adapter->pdev);
973 kfree(adapter->msix_entries);
974 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000975 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800976 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000977 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800978}
979
Alexander Duyck047e0032009-10-27 15:49:27 +0000980/**
981 * igb_free_q_vectors - Free memory allocated for interrupt vectors
982 * @adapter: board private structure to initialize
983 *
984 * This function frees the memory allocated to the q_vectors. In addition if
985 * NAPI is enabled it will delete any references to the NAPI struct prior
986 * to freeing the q_vector.
987 **/
988static void igb_free_q_vectors(struct igb_adapter *adapter)
989{
990 int v_idx;
991
992 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
993 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
994 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000995 if (!q_vector)
996 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000997 netif_napi_del(&q_vector->napi);
998 kfree(q_vector);
999 }
1000 adapter->num_q_vectors = 0;
1001}
1002
1003/**
1004 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1005 *
1006 * This function resets the device so that it has 0 rx queues, tx queues, and
1007 * MSI-X interrupts allocated.
1008 */
1009static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1010{
1011 igb_free_queues(adapter);
1012 igb_free_q_vectors(adapter);
1013 igb_reset_interrupt_capability(adapter);
1014}
Auke Kok9d5c8242008-01-24 02:22:38 -08001015
1016/**
1017 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1018 *
1019 * Attempt to configure interrupts using the best available
1020 * capabilities of the hardware and kernel.
1021 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001022static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001023{
1024 int err;
1025 int numvecs, i;
1026
Alexander Duyck83b71802009-02-06 23:15:45 +00001027 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001028 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001029 if (adapter->vfs_allocated_count)
1030 adapter->num_tx_queues = 1;
1031 else
1032 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001033
Alexander Duyck047e0032009-10-27 15:49:27 +00001034 /* start with one vector for every rx queue */
1035 numvecs = adapter->num_rx_queues;
1036
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001037 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001038 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1039 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001040
1041 /* store the number of vectors reserved for queues */
1042 adapter->num_q_vectors = numvecs;
1043
1044 /* add 1 vector for link status interrupts */
1045 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001046 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1047 GFP_KERNEL);
1048 if (!adapter->msix_entries)
1049 goto msi_only;
1050
1051 for (i = 0; i < numvecs; i++)
1052 adapter->msix_entries[i].entry = i;
1053
1054 err = pci_enable_msix(adapter->pdev,
1055 adapter->msix_entries,
1056 numvecs);
1057 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001058 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001059
1060 igb_reset_interrupt_capability(adapter);
1061
1062 /* If we can't do MSI-X, try MSI */
1063msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001064#ifdef CONFIG_PCI_IOV
1065 /* disable SR-IOV for non MSI-X configurations */
1066 if (adapter->vf_data) {
1067 struct e1000_hw *hw = &adapter->hw;
1068 /* disable iov and allow time for transactions to clear */
1069 pci_disable_sriov(adapter->pdev);
1070 msleep(500);
1071
1072 kfree(adapter->vf_data);
1073 adapter->vf_data = NULL;
1074 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001075 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001076 msleep(100);
1077 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1078 }
1079#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001080 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001081 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001082 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001083 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001084 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001085 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001086 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001087 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001088out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001089 /* Notify the stack of the (possibly) reduced queue counts. */
1090 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1091 return netif_set_real_num_rx_queues(adapter->netdev,
1092 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001093}
1094
1095/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001096 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1097 * @adapter: board private structure to initialize
1098 *
1099 * We allocate one q_vector per queue interrupt. If allocation fails we
1100 * return -ENOMEM.
1101 **/
1102static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1103{
1104 struct igb_q_vector *q_vector;
1105 struct e1000_hw *hw = &adapter->hw;
1106 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001107 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001108
1109 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001110 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1111 adapter->num_tx_queues)) &&
1112 (adapter->num_rx_queues == v_idx))
1113 adapter->node = orig_node;
1114 if (orig_node == -1) {
1115 int cur_node = next_online_node(adapter->node);
1116 if (cur_node == MAX_NUMNODES)
1117 cur_node = first_online_node;
1118 adapter->node = cur_node;
1119 }
1120 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1121 adapter->node);
1122 if (!q_vector)
1123 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001125 if (!q_vector)
1126 goto err_out;
1127 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001128 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1129 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001130 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1131 adapter->q_vector[v_idx] = q_vector;
1132 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001133 /* Restore the adapter's original node */
1134 adapter->node = orig_node;
1135
Alexander Duyck047e0032009-10-27 15:49:27 +00001136 return 0;
1137
1138err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001139 /* Restore the adapter's original node */
1140 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001141 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001142 return -ENOMEM;
1143}
1144
1145static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1146 int ring_idx, int v_idx)
1147{
Alexander Duyck3025a442010-02-17 01:02:39 +00001148 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001149
Alexander Duyck0ba82992011-08-26 07:45:47 +00001150 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1151 q_vector->rx.ring->q_vector = q_vector;
1152 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001153 q_vector->itr_val = adapter->rx_itr_setting;
1154 if (q_vector->itr_val && q_vector->itr_val <= 3)
1155 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001156}
1157
1158static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1159 int ring_idx, int v_idx)
1160{
Alexander Duyck3025a442010-02-17 01:02:39 +00001161 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001162
Alexander Duyck0ba82992011-08-26 07:45:47 +00001163 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1164 q_vector->tx.ring->q_vector = q_vector;
1165 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001166 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001167 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001168 if (q_vector->itr_val && q_vector->itr_val <= 3)
1169 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001170}
1171
1172/**
1173 * igb_map_ring_to_vector - maps allocated queues to vectors
1174 *
1175 * This function maps the recently allocated queues to vectors.
1176 **/
1177static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1178{
1179 int i;
1180 int v_idx = 0;
1181
1182 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1183 (adapter->num_q_vectors < adapter->num_tx_queues))
1184 return -ENOMEM;
1185
1186 if (adapter->num_q_vectors >=
1187 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1188 for (i = 0; i < adapter->num_rx_queues; i++)
1189 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1190 for (i = 0; i < adapter->num_tx_queues; i++)
1191 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1192 } else {
1193 for (i = 0; i < adapter->num_rx_queues; i++) {
1194 if (i < adapter->num_tx_queues)
1195 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1196 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1197 }
1198 for (; i < adapter->num_tx_queues; i++)
1199 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1200 }
1201 return 0;
1202}
1203
1204/**
1205 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1206 *
1207 * This function initializes the interrupts and allocates all of the queues.
1208 **/
1209static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1210{
1211 struct pci_dev *pdev = adapter->pdev;
1212 int err;
1213
Ben Hutchings21adef32010-09-27 08:28:39 +00001214 err = igb_set_interrupt_capability(adapter);
1215 if (err)
1216 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001217
1218 err = igb_alloc_q_vectors(adapter);
1219 if (err) {
1220 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1221 goto err_alloc_q_vectors;
1222 }
1223
1224 err = igb_alloc_queues(adapter);
1225 if (err) {
1226 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1227 goto err_alloc_queues;
1228 }
1229
1230 err = igb_map_ring_to_vector(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1233 goto err_map_queues;
1234 }
1235
1236
1237 return 0;
1238err_map_queues:
1239 igb_free_queues(adapter);
1240err_alloc_queues:
1241 igb_free_q_vectors(adapter);
1242err_alloc_q_vectors:
1243 igb_reset_interrupt_capability(adapter);
1244 return err;
1245}
1246
1247/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001248 * igb_request_irq - initialize interrupts
1249 *
1250 * Attempts to configure interrupts using the best available
1251 * capabilities of the hardware and kernel.
1252 **/
1253static int igb_request_irq(struct igb_adapter *adapter)
1254{
1255 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001256 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001257 int err = 0;
1258
1259 if (adapter->msix_entries) {
1260 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001261 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001262 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001263 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001264 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001265 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001266 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001267 igb_free_all_tx_resources(adapter);
1268 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001269 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001270 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001271 adapter->num_q_vectors = 1;
1272 err = igb_alloc_q_vectors(adapter);
1273 if (err) {
1274 dev_err(&pdev->dev,
1275 "Unable to allocate memory for vectors\n");
1276 goto request_done;
1277 }
1278 err = igb_alloc_queues(adapter);
1279 if (err) {
1280 dev_err(&pdev->dev,
1281 "Unable to allocate memory for queues\n");
1282 igb_free_q_vectors(adapter);
1283 goto request_done;
1284 }
1285 igb_setup_all_tx_resources(adapter);
1286 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001287 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001288 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001289 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001290
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001291 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001292 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001293 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001294 if (!err)
1295 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001296
Auke Kok9d5c8242008-01-24 02:22:38 -08001297 /* fall back to legacy interrupts */
1298 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001299 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001300 }
1301
Joe Perchesa0607fd2009-11-18 23:29:17 -08001302 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001303 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001304
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001305 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001306 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1307 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001308
1309request_done:
1310 return err;
1311}
1312
1313static void igb_free_irq(struct igb_adapter *adapter)
1314{
Auke Kok9d5c8242008-01-24 02:22:38 -08001315 if (adapter->msix_entries) {
1316 int vector = 0, i;
1317
Alexander Duyck047e0032009-10-27 15:49:27 +00001318 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001319
Alexander Duyck047e0032009-10-27 15:49:27 +00001320 for (i = 0; i < adapter->num_q_vectors; i++) {
1321 struct igb_q_vector *q_vector = adapter->q_vector[i];
1322 free_irq(adapter->msix_entries[vector++].vector,
1323 q_vector);
1324 }
1325 } else {
1326 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001327 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001328}
1329
1330/**
1331 * igb_irq_disable - Mask off interrupt generation on the NIC
1332 * @adapter: board private structure
1333 **/
1334static void igb_irq_disable(struct igb_adapter *adapter)
1335{
1336 struct e1000_hw *hw = &adapter->hw;
1337
Alexander Duyck25568a52009-10-27 23:49:59 +00001338 /*
1339 * we need to be careful when disabling interrupts. The VFs are also
1340 * mapped into these registers and so clearing the bits can cause
1341 * issues on the VF drivers so we only need to clear what we set
1342 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001343 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001344 u32 regval = rd32(E1000_EIAM);
1345 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1346 wr32(E1000_EIMC, adapter->eims_enable_mask);
1347 regval = rd32(E1000_EIAC);
1348 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001349 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001350
1351 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001352 wr32(E1000_IMC, ~0);
1353 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001354 if (adapter->msix_entries) {
1355 int i;
1356 for (i = 0; i < adapter->num_q_vectors; i++)
1357 synchronize_irq(adapter->msix_entries[i].vector);
1358 } else {
1359 synchronize_irq(adapter->pdev->irq);
1360 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001361}
1362
1363/**
1364 * igb_irq_enable - Enable default interrupt generation settings
1365 * @adapter: board private structure
1366 **/
1367static void igb_irq_enable(struct igb_adapter *adapter)
1368{
1369 struct e1000_hw *hw = &adapter->hw;
1370
1371 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001372 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001373 u32 regval = rd32(E1000_EIAC);
1374 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1375 regval = rd32(E1000_EIAM);
1376 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001377 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001378 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001379 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001380 ims |= E1000_IMS_VMMB;
1381 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001382 if (adapter->hw.mac.type == e1000_82580)
1383 ims |= E1000_IMS_DRSTA;
1384
Alexander Duyck25568a52009-10-27 23:49:59 +00001385 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001386 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001387 wr32(E1000_IMS, IMS_ENABLE_MASK |
1388 E1000_IMS_DRSTA);
1389 wr32(E1000_IAM, IMS_ENABLE_MASK |
1390 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001391 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001392}
1393
1394static void igb_update_mng_vlan(struct igb_adapter *adapter)
1395{
Alexander Duyck51466232009-10-27 23:47:35 +00001396 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001397 u16 vid = adapter->hw.mng_cookie.vlan_id;
1398 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001399
Alexander Duyck51466232009-10-27 23:47:35 +00001400 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1401 /* add VID to filter table */
1402 igb_vfta_set(hw, vid, true);
1403 adapter->mng_vlan_id = vid;
1404 } else {
1405 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1406 }
1407
1408 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1409 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001410 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001411 /* remove VID from filter table */
1412 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001413 }
1414}
1415
1416/**
1417 * igb_release_hw_control - release control of the h/w to f/w
1418 * @adapter: address of board private structure
1419 *
1420 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1421 * For ASF and Pass Through versions of f/w this means that the
1422 * driver is no longer loaded.
1423 *
1424 **/
1425static void igb_release_hw_control(struct igb_adapter *adapter)
1426{
1427 struct e1000_hw *hw = &adapter->hw;
1428 u32 ctrl_ext;
1429
1430 /* Let firmware take over control of h/w */
1431 ctrl_ext = rd32(E1000_CTRL_EXT);
1432 wr32(E1000_CTRL_EXT,
1433 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1434}
1435
Auke Kok9d5c8242008-01-24 02:22:38 -08001436/**
1437 * igb_get_hw_control - get control of the h/w from f/w
1438 * @adapter: address of board private structure
1439 *
1440 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1441 * For ASF and Pass Through versions of f/w this means that
1442 * the driver is loaded.
1443 *
1444 **/
1445static void igb_get_hw_control(struct igb_adapter *adapter)
1446{
1447 struct e1000_hw *hw = &adapter->hw;
1448 u32 ctrl_ext;
1449
1450 /* Let firmware know the driver has taken over */
1451 ctrl_ext = rd32(E1000_CTRL_EXT);
1452 wr32(E1000_CTRL_EXT,
1453 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1454}
1455
Auke Kok9d5c8242008-01-24 02:22:38 -08001456/**
1457 * igb_configure - configure the hardware for RX and TX
1458 * @adapter: private board structure
1459 **/
1460static void igb_configure(struct igb_adapter *adapter)
1461{
1462 struct net_device *netdev = adapter->netdev;
1463 int i;
1464
1465 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001466 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001467
1468 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001469
Alexander Duyck85b430b2009-10-27 15:50:29 +00001470 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001471 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001472 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001473
1474 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001475 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001476
1477 igb_rx_fifo_flush_82575(&adapter->hw);
1478
Alexander Duyckc493ea42009-03-20 00:16:50 +00001479 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 * at least 1 descriptor unused to make sure
1481 * next_to_use != next_to_clean */
1482 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001483 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001484 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001485 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001486}
1487
Nick Nunley88a268c2010-02-17 01:01:59 +00001488/**
1489 * igb_power_up_link - Power up the phy/serdes link
1490 * @adapter: address of board private structure
1491 **/
1492void igb_power_up_link(struct igb_adapter *adapter)
1493{
1494 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1495 igb_power_up_phy_copper(&adapter->hw);
1496 else
1497 igb_power_up_serdes_link_82575(&adapter->hw);
1498}
1499
1500/**
1501 * igb_power_down_link - Power down the phy/serdes link
1502 * @adapter: address of board private structure
1503 */
1504static void igb_power_down_link(struct igb_adapter *adapter)
1505{
1506 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1507 igb_power_down_phy_copper_82575(&adapter->hw);
1508 else
1509 igb_shutdown_serdes_link_82575(&adapter->hw);
1510}
Auke Kok9d5c8242008-01-24 02:22:38 -08001511
1512/**
1513 * igb_up - Open the interface and prepare it to handle traffic
1514 * @adapter: board private structure
1515 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001516int igb_up(struct igb_adapter *adapter)
1517{
1518 struct e1000_hw *hw = &adapter->hw;
1519 int i;
1520
1521 /* hardware has been reset, we need to reload some things */
1522 igb_configure(adapter);
1523
1524 clear_bit(__IGB_DOWN, &adapter->state);
1525
Alexander Duyck047e0032009-10-27 15:49:27 +00001526 for (i = 0; i < adapter->num_q_vectors; i++) {
1527 struct igb_q_vector *q_vector = adapter->q_vector[i];
1528 napi_enable(&q_vector->napi);
1529 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001530 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001531 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001532 else
1533 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001534
1535 /* Clear any pending interrupts. */
1536 rd32(E1000_ICR);
1537 igb_irq_enable(adapter);
1538
Alexander Duyckd4960302009-10-27 15:53:45 +00001539 /* notify VFs that reset has been completed */
1540 if (adapter->vfs_allocated_count) {
1541 u32 reg_data = rd32(E1000_CTRL_EXT);
1542 reg_data |= E1000_CTRL_EXT_PFRSTD;
1543 wr32(E1000_CTRL_EXT, reg_data);
1544 }
1545
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001546 netif_tx_start_all_queues(adapter->netdev);
1547
Alexander Duyck25568a52009-10-27 23:49:59 +00001548 /* start the watchdog. */
1549 hw->mac.get_link_status = 1;
1550 schedule_work(&adapter->watchdog_task);
1551
Auke Kok9d5c8242008-01-24 02:22:38 -08001552 return 0;
1553}
1554
1555void igb_down(struct igb_adapter *adapter)
1556{
Auke Kok9d5c8242008-01-24 02:22:38 -08001557 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001558 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 u32 tctl, rctl;
1560 int i;
1561
1562 /* signal that we're down so the interrupt handler does not
1563 * reschedule our watchdog timer */
1564 set_bit(__IGB_DOWN, &adapter->state);
1565
1566 /* disable receives in the hardware */
1567 rctl = rd32(E1000_RCTL);
1568 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1569 /* flush and sleep below */
1570
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001571 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001572
1573 /* disable transmits in the hardware */
1574 tctl = rd32(E1000_TCTL);
1575 tctl &= ~E1000_TCTL_EN;
1576 wr32(E1000_TCTL, tctl);
1577 /* flush both disables and wait for them to finish */
1578 wrfl();
1579 msleep(10);
1580
Alexander Duyck047e0032009-10-27 15:49:27 +00001581 for (i = 0; i < adapter->num_q_vectors; i++) {
1582 struct igb_q_vector *q_vector = adapter->q_vector[i];
1583 napi_disable(&q_vector->napi);
1584 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001585
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 igb_irq_disable(adapter);
1587
1588 del_timer_sync(&adapter->watchdog_timer);
1589 del_timer_sync(&adapter->phy_info_timer);
1590
Auke Kok9d5c8242008-01-24 02:22:38 -08001591 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001592
1593 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001594 spin_lock(&adapter->stats64_lock);
1595 igb_update_stats(adapter, &adapter->stats64);
1596 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001597
Auke Kok9d5c8242008-01-24 02:22:38 -08001598 adapter->link_speed = 0;
1599 adapter->link_duplex = 0;
1600
Jeff Kirsher30236822008-06-24 17:01:15 -07001601 if (!pci_channel_offline(adapter->pdev))
1602 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001603 igb_clean_all_tx_rings(adapter);
1604 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001605#ifdef CONFIG_IGB_DCA
1606
1607 /* since we reset the hardware DCA settings were cleared */
1608 igb_setup_dca(adapter);
1609#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001610}
1611
1612void igb_reinit_locked(struct igb_adapter *adapter)
1613{
1614 WARN_ON(in_interrupt());
1615 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1616 msleep(1);
1617 igb_down(adapter);
1618 igb_up(adapter);
1619 clear_bit(__IGB_RESETTING, &adapter->state);
1620}
1621
1622void igb_reset(struct igb_adapter *adapter)
1623{
Alexander Duyck090b1792009-10-27 23:51:55 +00001624 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001625 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001626 struct e1000_mac_info *mac = &hw->mac;
1627 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1629 u16 hwm;
1630
1631 /* Repartition Pba for greater than 9k mtu
1632 * To take effect CTRL.RST is required.
1633 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001634 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001635 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001636 case e1000_82580:
1637 pba = rd32(E1000_RXPBS);
1638 pba = igb_rxpbs_adjust_82580(pba);
1639 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001640 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001641 pba = rd32(E1000_RXPBS);
1642 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001643 break;
1644 case e1000_82575:
1645 default:
1646 pba = E1000_PBA_34K;
1647 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001648 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001649
Alexander Duyck2d064c02008-07-08 15:10:12 -07001650 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1651 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001652 /* adjust PBA for jumbo frames */
1653 wr32(E1000_PBA, pba);
1654
1655 /* To maintain wire speed transmits, the Tx FIFO should be
1656 * large enough to accommodate two full transmit packets,
1657 * rounded up to the next 1KB and expressed in KB. Likewise,
1658 * the Rx FIFO should be large enough to accommodate at least
1659 * one full receive packet and is similarly rounded up and
1660 * expressed in KB. */
1661 pba = rd32(E1000_PBA);
1662 /* upper 16 bits has Tx packet buffer allocation size in KB */
1663 tx_space = pba >> 16;
1664 /* lower 16 bits has Rx packet buffer allocation size in KB */
1665 pba &= 0xffff;
1666 /* the tx fifo also stores 16 bytes of information about the tx
1667 * but don't include ethernet FCS because hardware appends it */
1668 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001669 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001670 ETH_FCS_LEN) * 2;
1671 min_tx_space = ALIGN(min_tx_space, 1024);
1672 min_tx_space >>= 10;
1673 /* software strips receive CRC, so leave room for it */
1674 min_rx_space = adapter->max_frame_size;
1675 min_rx_space = ALIGN(min_rx_space, 1024);
1676 min_rx_space >>= 10;
1677
1678 /* If current Tx allocation is less than the min Tx FIFO size,
1679 * and the min Tx FIFO size is less than the current Rx FIFO
1680 * allocation, take space away from current Rx allocation */
1681 if (tx_space < min_tx_space &&
1682 ((min_tx_space - tx_space) < pba)) {
1683 pba = pba - (min_tx_space - tx_space);
1684
1685 /* if short on rx space, rx wins and must trump tx
1686 * adjustment */
1687 if (pba < min_rx_space)
1688 pba = min_rx_space;
1689 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001690 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001691 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001692
1693 /* flow control settings */
1694 /* The high water mark must be low enough to fit one full frame
1695 * (or the size used for early receive) above it in the Rx FIFO.
1696 * Set it to the lower of:
1697 * - 90% of the Rx FIFO size, or
1698 * - the full Rx FIFO size minus one full frame */
1699 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001700 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001701
Alexander Duyckd405ea32009-12-23 13:21:27 +00001702 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1703 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001704 fc->pause_time = 0xFFFF;
1705 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001706 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001707
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001708 /* disable receive for all VFs and wait one second */
1709 if (adapter->vfs_allocated_count) {
1710 int i;
1711 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001712 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001713
1714 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001715 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001716
1717 /* disable transmits and receives */
1718 wr32(E1000_VFRE, 0);
1719 wr32(E1000_VFTE, 0);
1720 }
1721
Auke Kok9d5c8242008-01-24 02:22:38 -08001722 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001723 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001724 wr32(E1000_WUC, 0);
1725
Alexander Duyck330a6d62009-10-27 23:51:35 +00001726 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001727 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001728 if (hw->mac.type > e1000_82580) {
1729 if (adapter->flags & IGB_FLAG_DMAC) {
1730 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001731
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001732 /*
1733 * DMA Coalescing high water mark needs to be higher
1734 * than * the * Rx threshold. The Rx threshold is
1735 * currently * pba - 6, so we * should use a high water
1736 * mark of pba * - 4. */
1737 hwm = (pba - 4) << 10;
1738
1739 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1740 & E1000_DMACR_DMACTHR_MASK);
1741
1742 /* transition to L0x or L1 if available..*/
1743 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1744
1745 /* watchdog timer= +-1000 usec in 32usec intervals */
1746 reg |= (1000 >> 5);
1747 wr32(E1000_DMACR, reg);
1748
1749 /* no lower threshold to disable coalescing(smart fifb)
1750 * -UTRESH=0*/
1751 wr32(E1000_DMCRTRH, 0);
1752
1753 /* set hwm to PBA - 2 * max frame size */
1754 wr32(E1000_FCRTC, hwm);
1755
1756 /*
1757 * This sets the time to wait before requesting tran-
1758 * sition to * low power state to number of usecs needed
1759 * to receive 1 512 * byte frame at gigabit line rate
1760 */
1761 reg = rd32(E1000_DMCTLX);
1762 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1763
1764 /* Delay 255 usec before entering Lx state. */
1765 reg |= 0xFF;
1766 wr32(E1000_DMCTLX, reg);
1767
1768 /* free space in Tx packet buffer to wake from DMAC */
1769 wr32(E1000_DMCTXTH,
1770 (IGB_MIN_TXPBSIZE -
1771 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1772 >> 6);
1773
1774 /* make low power state decision controlled by DMAC */
1775 reg = rd32(E1000_PCIEMISC);
1776 reg |= E1000_PCIEMISC_LX_DECISION;
1777 wr32(E1000_PCIEMISC, reg);
1778 } /* end if IGB_FLAG_DMAC set */
1779 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001780 if (hw->mac.type == e1000_82580) {
1781 u32 reg = rd32(E1000_PCIEMISC);
1782 wr32(E1000_PCIEMISC,
1783 reg & ~E1000_PCIEMISC_LX_DECISION);
1784 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001785 if (!netif_running(adapter->netdev))
1786 igb_power_down_link(adapter);
1787
Auke Kok9d5c8242008-01-24 02:22:38 -08001788 igb_update_mng_vlan(adapter);
1789
1790 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1791 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1792
Alexander Duyck330a6d62009-10-27 23:51:35 +00001793 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001794}
1795
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001796static u32 igb_fix_features(struct net_device *netdev, u32 features)
1797{
1798 /*
1799 * Since there is no support for separate rx/tx vlan accel
1800 * enable/disable make sure tx flag is always in same state as rx.
1801 */
1802 if (features & NETIF_F_HW_VLAN_RX)
1803 features |= NETIF_F_HW_VLAN_TX;
1804 else
1805 features &= ~NETIF_F_HW_VLAN_TX;
1806
1807 return features;
1808}
1809
Michał Mirosławac52caa2011-06-08 08:38:01 +00001810static int igb_set_features(struct net_device *netdev, u32 features)
1811{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001812 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001813
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001814 if (changed & NETIF_F_HW_VLAN_RX)
1815 igb_vlan_mode(netdev, features);
1816
Michał Mirosławac52caa2011-06-08 08:38:01 +00001817 return 0;
1818}
1819
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001820static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001821 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001822 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001823 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001824 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001825 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001826 .ndo_set_mac_address = igb_set_mac,
1827 .ndo_change_mtu = igb_change_mtu,
1828 .ndo_do_ioctl = igb_ioctl,
1829 .ndo_tx_timeout = igb_tx_timeout,
1830 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001831 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1832 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001833 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1834 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1835 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1836 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001837#ifdef CONFIG_NET_POLL_CONTROLLER
1838 .ndo_poll_controller = igb_netpoll,
1839#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001840 .ndo_fix_features = igb_fix_features,
1841 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001842};
1843
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001844/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001845 * igb_probe - Device Initialization Routine
1846 * @pdev: PCI device information struct
1847 * @ent: entry in igb_pci_tbl
1848 *
1849 * Returns 0 on success, negative on failure
1850 *
1851 * igb_probe initializes an adapter identified by a pci_dev structure.
1852 * The OS initialization, configuring of the adapter private structure,
1853 * and a hardware reset occur.
1854 **/
1855static int __devinit igb_probe(struct pci_dev *pdev,
1856 const struct pci_device_id *ent)
1857{
1858 struct net_device *netdev;
1859 struct igb_adapter *adapter;
1860 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001861 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001862 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001863 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001864 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1865 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001866 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001867 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001868 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001869
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001870 /* Catch broken hardware that put the wrong VF device ID in
1871 * the PCIe SR-IOV capability.
1872 */
1873 if (pdev->is_virtfn) {
1874 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1875 pci_name(pdev), pdev->vendor, pdev->device);
1876 return -EINVAL;
1877 }
1878
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001879 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001880 if (err)
1881 return err;
1882
1883 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001884 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001886 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001887 if (!err)
1888 pci_using_dac = 1;
1889 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001890 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001891 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001892 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001893 if (err) {
1894 dev_err(&pdev->dev, "No usable DMA "
1895 "configuration, aborting\n");
1896 goto err_dma;
1897 }
1898 }
1899 }
1900
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001901 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1902 IORESOURCE_MEM),
1903 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001904 if (err)
1905 goto err_pci_reg;
1906
Frans Pop19d5afd2009-10-02 10:04:12 -07001907 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001908
Auke Kok9d5c8242008-01-24 02:22:38 -08001909 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001910 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001911
1912 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001913 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001914 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001915 if (!netdev)
1916 goto err_alloc_etherdev;
1917
1918 SET_NETDEV_DEV(netdev, &pdev->dev);
1919
1920 pci_set_drvdata(pdev, netdev);
1921 adapter = netdev_priv(netdev);
1922 adapter->netdev = netdev;
1923 adapter->pdev = pdev;
1924 hw = &adapter->hw;
1925 hw->back = adapter;
1926 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1927
1928 mmio_start = pci_resource_start(pdev, 0);
1929 mmio_len = pci_resource_len(pdev, 0);
1930
1931 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001932 hw->hw_addr = ioremap(mmio_start, mmio_len);
1933 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001934 goto err_ioremap;
1935
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001936 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001937 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001938 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001939
1940 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1941
1942 netdev->mem_start = mmio_start;
1943 netdev->mem_end = mmio_start + mmio_len;
1944
Auke Kok9d5c8242008-01-24 02:22:38 -08001945 /* PCI config space info */
1946 hw->vendor_id = pdev->vendor;
1947 hw->device_id = pdev->device;
1948 hw->revision_id = pdev->revision;
1949 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1950 hw->subsystem_device_id = pdev->subsystem_device;
1951
Auke Kok9d5c8242008-01-24 02:22:38 -08001952 /* Copy the default MAC, PHY and NVM function pointers */
1953 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1954 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1955 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1956 /* Initialize skew-specific constants */
1957 err = ei->get_invariants(hw);
1958 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001959 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001960
Alexander Duyck450c87c2009-02-06 23:22:11 +00001961 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001962 err = igb_sw_init(adapter);
1963 if (err)
1964 goto err_sw_init;
1965
1966 igb_get_bus_info_pcie(hw);
1967
1968 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001969
1970 /* Copper options */
1971 if (hw->phy.media_type == e1000_media_type_copper) {
1972 hw->phy.mdix = AUTO_ALL_MODES;
1973 hw->phy.disable_polarity_correction = false;
1974 hw->phy.ms_type = e1000_ms_hw_default;
1975 }
1976
1977 if (igb_check_reset_block(hw))
1978 dev_info(&pdev->dev,
1979 "PHY reset is blocked due to SOL/IDER session.\n");
1980
Alexander Duyck077887c2011-08-26 07:46:29 +00001981 /*
1982 * features is initialized to 0 in allocation, it might have bits
1983 * set by igb_sw_init so we should use an or instead of an
1984 * assignment.
1985 */
1986 netdev->features |= NETIF_F_SG |
1987 NETIF_F_IP_CSUM |
1988 NETIF_F_IPV6_CSUM |
1989 NETIF_F_TSO |
1990 NETIF_F_TSO6 |
1991 NETIF_F_RXHASH |
1992 NETIF_F_RXCSUM |
1993 NETIF_F_HW_VLAN_RX |
1994 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001995
Alexander Duyck077887c2011-08-26 07:46:29 +00001996 /* copy netdev features into list of user selectable features */
1997 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001998
Alexander Duyck077887c2011-08-26 07:46:29 +00001999 /* set this bit last since it cannot be part of hw_features */
2000 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2001
2002 netdev->vlan_features |= NETIF_F_TSO |
2003 NETIF_F_TSO6 |
2004 NETIF_F_IP_CSUM |
2005 NETIF_F_IPV6_CSUM |
2006 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07002007
Yi Zou7b872a52010-09-22 17:57:58 +00002008 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002009 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00002010 netdev->vlan_features |= NETIF_F_HIGHDMA;
2011 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002012
Michał Mirosławac52caa2011-06-08 08:38:01 +00002013 if (hw->mac.type >= e1000_82576) {
2014 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002015 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00002016 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00002017
Jiri Pirko01789342011-08-16 06:29:00 +00002018 netdev->priv_flags |= IFF_UNICAST_FLT;
2019
Alexander Duyck330a6d62009-10-27 23:51:35 +00002020 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002021
2022 /* before reading the NVM, reset the controller to put the device in a
2023 * known good starting state */
2024 hw->mac.ops.reset_hw(hw);
2025
2026 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08002027 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002028 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2029 err = -EIO;
2030 goto err_eeprom;
2031 }
2032
2033 /* copy the MAC address out of the NVM */
2034 if (hw->mac.ops.read_mac_addr(hw))
2035 dev_err(&pdev->dev, "NVM Read Error\n");
2036
2037 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2038 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2039
2040 if (!is_valid_ether_addr(netdev->perm_addr)) {
2041 dev_err(&pdev->dev, "Invalid MAC Address\n");
2042 err = -EIO;
2043 goto err_eeprom;
2044 }
2045
Joe Perchesc061b182010-08-23 18:20:03 +00002046 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002047 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002048 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002049 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002050
2051 INIT_WORK(&adapter->reset_task, igb_reset_task);
2052 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2053
Alexander Duyck450c87c2009-02-06 23:22:11 +00002054 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002055 adapter->fc_autoneg = true;
2056 hw->mac.autoneg = true;
2057 hw->phy.autoneg_advertised = 0x2f;
2058
Alexander Duyck0cce1192009-07-23 18:10:24 +00002059 hw->fc.requested_mode = e1000_fc_default;
2060 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002061
Auke Kok9d5c8242008-01-24 02:22:38 -08002062 igb_validate_mdi_setting(hw);
2063
Auke Kok9d5c8242008-01-24 02:22:38 -08002064 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2065 * enable the ACPI Magic Packet filter
2066 */
2067
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002068 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002069 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002070 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002071 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2072 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2073 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002074 else if (hw->bus.func == 1)
2075 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002076
2077 if (eeprom_data & eeprom_apme_mask)
2078 adapter->eeprom_wol |= E1000_WUFC_MAG;
2079
2080 /* now that we have the eeprom settings, apply the special cases where
2081 * the eeprom may be wrong or the board simply won't support wake on
2082 * lan on a particular port */
2083 switch (pdev->device) {
2084 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2085 adapter->eeprom_wol = 0;
2086 break;
2087 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002088 case E1000_DEV_ID_82576_FIBER:
2089 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002090 /* Wake events only supported on port A for dual fiber
2091 * regardless of eeprom setting */
2092 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2093 adapter->eeprom_wol = 0;
2094 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002095 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002096 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002097 /* if quad port adapter, disable WoL on all but port A */
2098 if (global_quad_port_a != 0)
2099 adapter->eeprom_wol = 0;
2100 else
2101 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2102 /* Reset for multiple quad port adapters */
2103 if (++global_quad_port_a == 4)
2104 global_quad_port_a = 0;
2105 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002106 }
2107
2108 /* initialize the wol settings based on the eeprom settings */
2109 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002110 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002111
2112 /* reset the hardware with the new settings */
2113 igb_reset(adapter);
2114
2115 /* let the f/w know that the h/w is now under the control of the
2116 * driver. */
2117 igb_get_hw_control(adapter);
2118
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 strcpy(netdev->name, "eth%d");
2120 err = register_netdev(netdev);
2121 if (err)
2122 goto err_register;
2123
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002124 /* carrier off reporting is important to ethtool even BEFORE open */
2125 netif_carrier_off(netdev);
2126
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002127#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002128 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002129 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002130 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002131 igb_setup_dca(adapter);
2132 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002133
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002134#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002135 /* do hw tstamp init after resetting */
2136 igb_init_hw_timer(adapter);
2137
Auke Kok9d5c8242008-01-24 02:22:38 -08002138 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2139 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002140 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002141 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002142 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002143 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002144 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002145 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2146 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2147 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2148 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002149 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002150
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002151 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2152 if (ret_val)
2153 strcpy(part_str, "Unknown");
2154 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002155 dev_info(&pdev->dev,
2156 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2157 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002158 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002159 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002160 switch (hw->mac.type) {
2161 case e1000_i350:
2162 igb_set_eee_i350(hw);
2163 break;
2164 default:
2165 break;
2166 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002167 return 0;
2168
2169err_register:
2170 igb_release_hw_control(adapter);
2171err_eeprom:
2172 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002173 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002174
2175 if (hw->flash_address)
2176 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002177err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002178 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002179 iounmap(hw->hw_addr);
2180err_ioremap:
2181 free_netdev(netdev);
2182err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002183 pci_release_selected_regions(pdev,
2184 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002185err_pci_reg:
2186err_dma:
2187 pci_disable_device(pdev);
2188 return err;
2189}
2190
2191/**
2192 * igb_remove - Device Removal Routine
2193 * @pdev: PCI device information struct
2194 *
2195 * igb_remove is called by the PCI subsystem to alert the driver
2196 * that it should release a PCI device. The could be caused by a
2197 * Hot-Plug event, or because the driver is going to be removed from
2198 * memory.
2199 **/
2200static void __devexit igb_remove(struct pci_dev *pdev)
2201{
2202 struct net_device *netdev = pci_get_drvdata(pdev);
2203 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002204 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002205
Tejun Heo760141a2010-12-12 16:45:14 +01002206 /*
2207 * The watchdog timer may be rescheduled, so explicitly
2208 * disable watchdog from being rescheduled.
2209 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002210 set_bit(__IGB_DOWN, &adapter->state);
2211 del_timer_sync(&adapter->watchdog_timer);
2212 del_timer_sync(&adapter->phy_info_timer);
2213
Tejun Heo760141a2010-12-12 16:45:14 +01002214 cancel_work_sync(&adapter->reset_task);
2215 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002216
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002217#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002218 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002219 dev_info(&pdev->dev, "DCA disabled\n");
2220 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002221 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002222 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002223 }
2224#endif
2225
Auke Kok9d5c8242008-01-24 02:22:38 -08002226 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2227 * would have already happened in close and is redundant. */
2228 igb_release_hw_control(adapter);
2229
2230 unregister_netdev(netdev);
2231
Alexander Duyck047e0032009-10-27 15:49:27 +00002232 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002233
Alexander Duyck37680112009-02-19 20:40:30 -08002234#ifdef CONFIG_PCI_IOV
2235 /* reclaim resources allocated to VFs */
2236 if (adapter->vf_data) {
2237 /* disable iov and allow time for transactions to clear */
2238 pci_disable_sriov(pdev);
2239 msleep(500);
2240
2241 kfree(adapter->vf_data);
2242 adapter->vf_data = NULL;
2243 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002244 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002245 msleep(100);
2246 dev_info(&pdev->dev, "IOV Disabled\n");
2247 }
2248#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002249
Alexander Duyck28b07592009-02-06 23:20:31 +00002250 iounmap(hw->hw_addr);
2251 if (hw->flash_address)
2252 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002253 pci_release_selected_regions(pdev,
2254 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002255
2256 free_netdev(netdev);
2257
Frans Pop19d5afd2009-10-02 10:04:12 -07002258 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002259
Auke Kok9d5c8242008-01-24 02:22:38 -08002260 pci_disable_device(pdev);
2261}
2262
2263/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002264 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2265 * @adapter: board private structure to initialize
2266 *
2267 * This function initializes the vf specific data storage and then attempts to
2268 * allocate the VFs. The reason for ordering it this way is because it is much
2269 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2270 * the memory for the VFs.
2271 **/
2272static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2273{
2274#ifdef CONFIG_PCI_IOV
2275 struct pci_dev *pdev = adapter->pdev;
2276
Alexander Duycka6b623e2009-10-27 23:47:53 +00002277 if (adapter->vfs_allocated_count) {
2278 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2279 sizeof(struct vf_data_storage),
2280 GFP_KERNEL);
2281 /* if allocation failed then we do not support SR-IOV */
2282 if (!adapter->vf_data) {
2283 adapter->vfs_allocated_count = 0;
2284 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2285 "Data Storage\n");
2286 }
2287 }
2288
2289 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2290 kfree(adapter->vf_data);
2291 adapter->vf_data = NULL;
2292#endif /* CONFIG_PCI_IOV */
2293 adapter->vfs_allocated_count = 0;
2294#ifdef CONFIG_PCI_IOV
2295 } else {
2296 unsigned char mac_addr[ETH_ALEN];
2297 int i;
2298 dev_info(&pdev->dev, "%d vfs allocated\n",
2299 adapter->vfs_allocated_count);
2300 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2301 random_ether_addr(mac_addr);
2302 igb_set_vf_mac(adapter, i, mac_addr);
2303 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002304 /* DMA Coalescing is not supported in IOV mode. */
2305 if (adapter->flags & IGB_FLAG_DMAC)
2306 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002307 }
2308#endif /* CONFIG_PCI_IOV */
2309}
2310
Alexander Duyck115f4592009-11-12 18:37:00 +00002311
2312/**
2313 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2314 * @adapter: board private structure to initialize
2315 *
2316 * igb_init_hw_timer initializes the function pointer and values for the hw
2317 * timer found in hardware.
2318 **/
2319static void igb_init_hw_timer(struct igb_adapter *adapter)
2320{
2321 struct e1000_hw *hw = &adapter->hw;
2322
2323 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002324 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002325 case e1000_82580:
2326 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2327 adapter->cycles.read = igb_read_clock;
2328 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2329 adapter->cycles.mult = 1;
2330 /*
2331 * The 82580 timesync updates the system timer every 8ns by 8ns
2332 * and the value cannot be shifted. Instead we need to shift
2333 * the registers to generate a 64bit timer value. As a result
2334 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2335 * 24 in order to generate a larger value for synchronization.
2336 */
2337 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2338 /* disable system timer temporarily by setting bit 31 */
2339 wr32(E1000_TSAUXC, 0x80000000);
2340 wrfl();
2341
2342 /* Set registers so that rollover occurs soon to test this. */
2343 wr32(E1000_SYSTIMR, 0x00000000);
2344 wr32(E1000_SYSTIML, 0x80000000);
2345 wr32(E1000_SYSTIMH, 0x000000FF);
2346 wrfl();
2347
2348 /* enable system timer by clearing bit 31 */
2349 wr32(E1000_TSAUXC, 0x0);
2350 wrfl();
2351
2352 timecounter_init(&adapter->clock,
2353 &adapter->cycles,
2354 ktime_to_ns(ktime_get_real()));
2355 /*
2356 * Synchronize our NIC clock against system wall clock. NIC
2357 * time stamp reading requires ~3us per sample, each sample
2358 * was pretty stable even under load => only require 10
2359 * samples for each offset comparison.
2360 */
2361 memset(&adapter->compare, 0, sizeof(adapter->compare));
2362 adapter->compare.source = &adapter->clock;
2363 adapter->compare.target = ktime_get_real;
2364 adapter->compare.num_samples = 10;
2365 timecompare_update(&adapter->compare, 0);
2366 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002367 case e1000_82576:
2368 /*
2369 * Initialize hardware timer: we keep it running just in case
2370 * that some program needs it later on.
2371 */
2372 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2373 adapter->cycles.read = igb_read_clock;
2374 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2375 adapter->cycles.mult = 1;
2376 /**
2377 * Scale the NIC clock cycle by a large factor so that
2378 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002379 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002380 * factor are a) that the clock register overflows more quickly
2381 * (not such a big deal) and b) that the increment per tick has
2382 * to fit into 24 bits. As a result we need to use a shift of
2383 * 19 so we can fit a value of 16 into the TIMINCA register.
2384 */
2385 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2386 wr32(E1000_TIMINCA,
2387 (1 << E1000_TIMINCA_16NS_SHIFT) |
2388 (16 << IGB_82576_TSYNC_SHIFT));
2389
2390 /* Set registers so that rollover occurs soon to test this. */
2391 wr32(E1000_SYSTIML, 0x00000000);
2392 wr32(E1000_SYSTIMH, 0xFF800000);
2393 wrfl();
2394
2395 timecounter_init(&adapter->clock,
2396 &adapter->cycles,
2397 ktime_to_ns(ktime_get_real()));
2398 /*
2399 * Synchronize our NIC clock against system wall clock. NIC
2400 * time stamp reading requires ~3us per sample, each sample
2401 * was pretty stable even under load => only require 10
2402 * samples for each offset comparison.
2403 */
2404 memset(&adapter->compare, 0, sizeof(adapter->compare));
2405 adapter->compare.source = &adapter->clock;
2406 adapter->compare.target = ktime_get_real;
2407 adapter->compare.num_samples = 10;
2408 timecompare_update(&adapter->compare, 0);
2409 break;
2410 case e1000_82575:
2411 /* 82575 does not support timesync */
2412 default:
2413 break;
2414 }
2415
2416}
2417
Alexander Duycka6b623e2009-10-27 23:47:53 +00002418/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002419 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2420 * @adapter: board private structure to initialize
2421 *
2422 * igb_sw_init initializes the Adapter private data structure.
2423 * Fields are initialized based on PCI device information and
2424 * OS network device settings (MTU size).
2425 **/
2426static int __devinit igb_sw_init(struct igb_adapter *adapter)
2427{
2428 struct e1000_hw *hw = &adapter->hw;
2429 struct net_device *netdev = adapter->netdev;
2430 struct pci_dev *pdev = adapter->pdev;
2431
2432 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2433
Alexander Duyck13fde972011-10-05 13:35:24 +00002434 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002435 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2436 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002437
2438 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002439 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2440 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2441
Alexander Duyck13fde972011-10-05 13:35:24 +00002442 /* set default work limits */
2443 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2444
Alexander Duyck153285f2011-08-26 07:43:32 +00002445 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2446 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002447 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2448
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002449 adapter->node = -1;
2450
Eric Dumazet12dcd862010-10-15 17:27:10 +00002451 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002452#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002453 switch (hw->mac.type) {
2454 case e1000_82576:
2455 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002456 if (max_vfs > 7) {
2457 dev_warn(&pdev->dev,
2458 "Maximum of 7 VFs per PF, using max\n");
2459 adapter->vfs_allocated_count = 7;
2460 } else
2461 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002462 break;
2463 default:
2464 break;
2465 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002466#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002467 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002468 /* i350 cannot do RSS and SR-IOV at the same time */
2469 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2470 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002471
2472 /*
2473 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2474 * then we should combine the queues into a queue pair in order to
2475 * conserve interrupts due to limited supply
2476 */
2477 if ((adapter->rss_queues > 4) ||
2478 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2479 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2480
Alexander Duycka6b623e2009-10-27 23:47:53 +00002481 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002482 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002483 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2484 return -ENOMEM;
2485 }
2486
Alexander Duycka6b623e2009-10-27 23:47:53 +00002487 igb_probe_vfs(adapter);
2488
Auke Kok9d5c8242008-01-24 02:22:38 -08002489 /* Explicitly disable IRQ since the NIC can be in any state. */
2490 igb_irq_disable(adapter);
2491
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002492 if (hw->mac.type == e1000_i350)
2493 adapter->flags &= ~IGB_FLAG_DMAC;
2494
Auke Kok9d5c8242008-01-24 02:22:38 -08002495 set_bit(__IGB_DOWN, &adapter->state);
2496 return 0;
2497}
2498
2499/**
2500 * igb_open - Called when a network interface is made active
2501 * @netdev: network interface device structure
2502 *
2503 * Returns 0 on success, negative value on failure
2504 *
2505 * The open entry point is called when a network interface is made
2506 * active by the system (IFF_UP). At this point all resources needed
2507 * for transmit and receive operations are allocated, the interrupt
2508 * handler is registered with the OS, the watchdog timer is started,
2509 * and the stack is notified that the interface is ready.
2510 **/
2511static int igb_open(struct net_device *netdev)
2512{
2513 struct igb_adapter *adapter = netdev_priv(netdev);
2514 struct e1000_hw *hw = &adapter->hw;
2515 int err;
2516 int i;
2517
2518 /* disallow open during test */
2519 if (test_bit(__IGB_TESTING, &adapter->state))
2520 return -EBUSY;
2521
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002522 netif_carrier_off(netdev);
2523
Auke Kok9d5c8242008-01-24 02:22:38 -08002524 /* allocate transmit descriptors */
2525 err = igb_setup_all_tx_resources(adapter);
2526 if (err)
2527 goto err_setup_tx;
2528
2529 /* allocate receive descriptors */
2530 err = igb_setup_all_rx_resources(adapter);
2531 if (err)
2532 goto err_setup_rx;
2533
Nick Nunley88a268c2010-02-17 01:01:59 +00002534 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002535
Auke Kok9d5c8242008-01-24 02:22:38 -08002536 /* before we allocate an interrupt, we must be ready to handle it.
2537 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2538 * as soon as we call pci_request_irq, so we have to setup our
2539 * clean_rx handler before we do so. */
2540 igb_configure(adapter);
2541
2542 err = igb_request_irq(adapter);
2543 if (err)
2544 goto err_req_irq;
2545
2546 /* From here on the code is the same as igb_up() */
2547 clear_bit(__IGB_DOWN, &adapter->state);
2548
Alexander Duyck047e0032009-10-27 15:49:27 +00002549 for (i = 0; i < adapter->num_q_vectors; i++) {
2550 struct igb_q_vector *q_vector = adapter->q_vector[i];
2551 napi_enable(&q_vector->napi);
2552 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002553
2554 /* Clear any pending interrupts. */
2555 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002556
2557 igb_irq_enable(adapter);
2558
Alexander Duyckd4960302009-10-27 15:53:45 +00002559 /* notify VFs that reset has been completed */
2560 if (adapter->vfs_allocated_count) {
2561 u32 reg_data = rd32(E1000_CTRL_EXT);
2562 reg_data |= E1000_CTRL_EXT_PFRSTD;
2563 wr32(E1000_CTRL_EXT, reg_data);
2564 }
2565
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002566 netif_tx_start_all_queues(netdev);
2567
Alexander Duyck25568a52009-10-27 23:49:59 +00002568 /* start the watchdog. */
2569 hw->mac.get_link_status = 1;
2570 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002571
2572 return 0;
2573
2574err_req_irq:
2575 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002576 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002577 igb_free_all_rx_resources(adapter);
2578err_setup_rx:
2579 igb_free_all_tx_resources(adapter);
2580err_setup_tx:
2581 igb_reset(adapter);
2582
2583 return err;
2584}
2585
2586/**
2587 * igb_close - Disables a network interface
2588 * @netdev: network interface device structure
2589 *
2590 * Returns 0, this is not allowed to fail
2591 *
2592 * The close entry point is called when an interface is de-activated
2593 * by the OS. The hardware is still under the driver's control, but
2594 * needs to be disabled. A global MAC reset is issued to stop the
2595 * hardware, and all transmit and receive resources are freed.
2596 **/
2597static int igb_close(struct net_device *netdev)
2598{
2599 struct igb_adapter *adapter = netdev_priv(netdev);
2600
2601 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2602 igb_down(adapter);
2603
2604 igb_free_irq(adapter);
2605
2606 igb_free_all_tx_resources(adapter);
2607 igb_free_all_rx_resources(adapter);
2608
Auke Kok9d5c8242008-01-24 02:22:38 -08002609 return 0;
2610}
2611
2612/**
2613 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002614 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2615 *
2616 * Return 0 on success, negative on failure
2617 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002618int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002619{
Alexander Duyck59d71982010-04-27 13:09:25 +00002620 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002621 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002622 int size;
2623
Alexander Duyck06034642011-08-26 07:44:22 +00002624 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002625 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2626 if (!tx_ring->tx_buffer_info)
2627 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002628 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002629 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002630
2631 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002632 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002633 tx_ring->size = ALIGN(tx_ring->size, 4096);
2634
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002635 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002636 tx_ring->desc = dma_alloc_coherent(dev,
2637 tx_ring->size,
2638 &tx_ring->dma,
2639 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002640 set_dev_node(dev, orig_node);
2641 if (!tx_ring->desc)
2642 tx_ring->desc = dma_alloc_coherent(dev,
2643 tx_ring->size,
2644 &tx_ring->dma,
2645 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002646
2647 if (!tx_ring->desc)
2648 goto err;
2649
Auke Kok9d5c8242008-01-24 02:22:38 -08002650 tx_ring->next_to_use = 0;
2651 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002652
Auke Kok9d5c8242008-01-24 02:22:38 -08002653 return 0;
2654
2655err:
Alexander Duyck06034642011-08-26 07:44:22 +00002656 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002657 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002658 "Unable to allocate memory for the transmit descriptor ring\n");
2659 return -ENOMEM;
2660}
2661
2662/**
2663 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2664 * (Descriptors) for all queues
2665 * @adapter: board private structure
2666 *
2667 * Return 0 on success, negative on failure
2668 **/
2669static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2670{
Alexander Duyck439705e2009-10-27 23:49:20 +00002671 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 int i, err = 0;
2673
2674 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002675 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002676 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002677 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002678 "Allocation for Tx Queue %u failed\n", i);
2679 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002680 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002681 break;
2682 }
2683 }
2684
2685 return err;
2686}
2687
2688/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002689 * igb_setup_tctl - configure the transmit control registers
2690 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002691 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002692void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002693{
Auke Kok9d5c8242008-01-24 02:22:38 -08002694 struct e1000_hw *hw = &adapter->hw;
2695 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002696
Alexander Duyck85b430b2009-10-27 15:50:29 +00002697 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2698 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002699
2700 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002701 tctl = rd32(E1000_TCTL);
2702 tctl &= ~E1000_TCTL_CT;
2703 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2704 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2705
2706 igb_config_collision_dist(hw);
2707
Auke Kok9d5c8242008-01-24 02:22:38 -08002708 /* Enable transmits */
2709 tctl |= E1000_TCTL_EN;
2710
2711 wr32(E1000_TCTL, tctl);
2712}
2713
2714/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002715 * igb_configure_tx_ring - Configure transmit ring after Reset
2716 * @adapter: board private structure
2717 * @ring: tx ring to configure
2718 *
2719 * Configure a transmit ring after a reset.
2720 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002721void igb_configure_tx_ring(struct igb_adapter *adapter,
2722 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002723{
2724 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002725 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002726 u64 tdba = ring->dma;
2727 int reg_idx = ring->reg_idx;
2728
2729 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002730 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002731 wrfl();
2732 mdelay(10);
2733
2734 wr32(E1000_TDLEN(reg_idx),
2735 ring->count * sizeof(union e1000_adv_tx_desc));
2736 wr32(E1000_TDBAL(reg_idx),
2737 tdba & 0x00000000ffffffffULL);
2738 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2739
Alexander Duyckfce99e32009-10-27 15:51:27 +00002740 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002741 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002742 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002743
2744 txdctl |= IGB_TX_PTHRESH;
2745 txdctl |= IGB_TX_HTHRESH << 8;
2746 txdctl |= IGB_TX_WTHRESH << 16;
2747
2748 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2749 wr32(E1000_TXDCTL(reg_idx), txdctl);
2750}
2751
2752/**
2753 * igb_configure_tx - Configure transmit Unit after Reset
2754 * @adapter: board private structure
2755 *
2756 * Configure the Tx unit of the MAC after a reset.
2757 **/
2758static void igb_configure_tx(struct igb_adapter *adapter)
2759{
2760 int i;
2761
2762 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002763 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002764}
2765
2766/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002767 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002768 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2769 *
2770 * Returns 0 on success, negative on failure
2771 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002772int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002773{
Alexander Duyck59d71982010-04-27 13:09:25 +00002774 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002775 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002776 int size, desc_len;
2777
Alexander Duyck06034642011-08-26 07:44:22 +00002778 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002779 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2780 if (!rx_ring->rx_buffer_info)
2781 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002782 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002783 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002784
2785 desc_len = sizeof(union e1000_adv_rx_desc);
2786
2787 /* Round up to nearest 4K */
2788 rx_ring->size = rx_ring->count * desc_len;
2789 rx_ring->size = ALIGN(rx_ring->size, 4096);
2790
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002791 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002792 rx_ring->desc = dma_alloc_coherent(dev,
2793 rx_ring->size,
2794 &rx_ring->dma,
2795 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002796 set_dev_node(dev, orig_node);
2797 if (!rx_ring->desc)
2798 rx_ring->desc = dma_alloc_coherent(dev,
2799 rx_ring->size,
2800 &rx_ring->dma,
2801 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002802
2803 if (!rx_ring->desc)
2804 goto err;
2805
2806 rx_ring->next_to_clean = 0;
2807 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002808
Auke Kok9d5c8242008-01-24 02:22:38 -08002809 return 0;
2810
2811err:
Alexander Duyck06034642011-08-26 07:44:22 +00002812 vfree(rx_ring->rx_buffer_info);
2813 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002814 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2815 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002816 return -ENOMEM;
2817}
2818
2819/**
2820 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2821 * (Descriptors) for all queues
2822 * @adapter: board private structure
2823 *
2824 * Return 0 on success, negative on failure
2825 **/
2826static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2827{
Alexander Duyck439705e2009-10-27 23:49:20 +00002828 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002829 int i, err = 0;
2830
2831 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002832 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002833 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002834 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002835 "Allocation for Rx Queue %u failed\n", i);
2836 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002837 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002838 break;
2839 }
2840 }
2841
2842 return err;
2843}
2844
2845/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002846 * igb_setup_mrqc - configure the multiple receive queue control registers
2847 * @adapter: Board private structure
2848 **/
2849static void igb_setup_mrqc(struct igb_adapter *adapter)
2850{
2851 struct e1000_hw *hw = &adapter->hw;
2852 u32 mrqc, rxcsum;
2853 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2854 union e1000_reta {
2855 u32 dword;
2856 u8 bytes[4];
2857 } reta;
2858 static const u8 rsshash[40] = {
2859 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2860 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2861 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2862 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2863
2864 /* Fill out hash function seeds */
2865 for (j = 0; j < 10; j++) {
2866 u32 rsskey = rsshash[(j * 4)];
2867 rsskey |= rsshash[(j * 4) + 1] << 8;
2868 rsskey |= rsshash[(j * 4) + 2] << 16;
2869 rsskey |= rsshash[(j * 4) + 3] << 24;
2870 array_wr32(E1000_RSSRK(0), j, rsskey);
2871 }
2872
Alexander Duycka99955f2009-11-12 18:37:19 +00002873 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002874
2875 if (adapter->vfs_allocated_count) {
2876 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2877 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002878 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002879 case e1000_82580:
2880 num_rx_queues = 1;
2881 shift = 0;
2882 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002883 case e1000_82576:
2884 shift = 3;
2885 num_rx_queues = 2;
2886 break;
2887 case e1000_82575:
2888 shift = 2;
2889 shift2 = 6;
2890 default:
2891 break;
2892 }
2893 } else {
2894 if (hw->mac.type == e1000_82575)
2895 shift = 6;
2896 }
2897
2898 for (j = 0; j < (32 * 4); j++) {
2899 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2900 if (shift2)
2901 reta.bytes[j & 3] |= num_rx_queues << shift2;
2902 if ((j & 3) == 3)
2903 wr32(E1000_RETA(j >> 2), reta.dword);
2904 }
2905
2906 /*
2907 * Disable raw packet checksumming so that RSS hash is placed in
2908 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2909 * offloads as they are enabled by default
2910 */
2911 rxcsum = rd32(E1000_RXCSUM);
2912 rxcsum |= E1000_RXCSUM_PCSD;
2913
2914 if (adapter->hw.mac.type >= e1000_82576)
2915 /* Enable Receive Checksum Offload for SCTP */
2916 rxcsum |= E1000_RXCSUM_CRCOFL;
2917
2918 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2919 wr32(E1000_RXCSUM, rxcsum);
2920
2921 /* If VMDq is enabled then we set the appropriate mode for that, else
2922 * we default to RSS so that an RSS hash is calculated per packet even
2923 * if we are only using one queue */
2924 if (adapter->vfs_allocated_count) {
2925 if (hw->mac.type > e1000_82575) {
2926 /* Set the default pool for the PF's first queue */
2927 u32 vtctl = rd32(E1000_VT_CTL);
2928 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2929 E1000_VT_CTL_DISABLE_DEF_POOL);
2930 vtctl |= adapter->vfs_allocated_count <<
2931 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2932 wr32(E1000_VT_CTL, vtctl);
2933 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002934 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002935 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2936 else
2937 mrqc = E1000_MRQC_ENABLE_VMDQ;
2938 } else {
2939 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2940 }
2941 igb_vmm_control(adapter);
2942
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002943 /*
2944 * Generate RSS hash based on TCP port numbers and/or
2945 * IPv4/v6 src and dst addresses since UDP cannot be
2946 * hashed reliably due to IP fragmentation
2947 */
2948 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2949 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2950 E1000_MRQC_RSS_FIELD_IPV6 |
2951 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2952 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002953
2954 wr32(E1000_MRQC, mrqc);
2955}
2956
2957/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002958 * igb_setup_rctl - configure the receive control registers
2959 * @adapter: Board private structure
2960 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002961void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002962{
2963 struct e1000_hw *hw = &adapter->hw;
2964 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002965
2966 rctl = rd32(E1000_RCTL);
2967
2968 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002969 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002970
Alexander Duyck69d728b2008-11-25 01:04:03 -08002971 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002972 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002973
Auke Kok87cb7e82008-07-08 15:08:29 -07002974 /*
2975 * enable stripping of CRC. It's unlikely this will break BMC
2976 * redirection as it did with e1000. Newer features require
2977 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002978 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002979 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002980
Alexander Duyck559e9c42009-10-27 23:52:50 +00002981 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002982 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002983
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002984 /* enable LPE to prevent packets larger than max_frame_size */
2985 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002986
Alexander Duyck952f72a2009-10-27 15:51:07 +00002987 /* disable queue 0 to prevent tail write w/o re-config */
2988 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002989
Alexander Duycke1739522009-02-19 20:39:44 -08002990 /* Attention!!! For SR-IOV PF driver operations you must enable
2991 * queue drop for all VF and PF queues to prevent head of line blocking
2992 * if an un-trusted VF does not provide descriptors to hardware.
2993 */
2994 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002995 /* set all queue drop enable bits */
2996 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002997 }
2998
Auke Kok9d5c8242008-01-24 02:22:38 -08002999 wr32(E1000_RCTL, rctl);
3000}
3001
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003002static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3003 int vfn)
3004{
3005 struct e1000_hw *hw = &adapter->hw;
3006 u32 vmolr;
3007
3008 /* if it isn't the PF check to see if VFs are enabled and
3009 * increase the size to support vlan tags */
3010 if (vfn < adapter->vfs_allocated_count &&
3011 adapter->vf_data[vfn].vlans_enabled)
3012 size += VLAN_TAG_SIZE;
3013
3014 vmolr = rd32(E1000_VMOLR(vfn));
3015 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3016 vmolr |= size | E1000_VMOLR_LPE;
3017 wr32(E1000_VMOLR(vfn), vmolr);
3018
3019 return 0;
3020}
3021
Auke Kok9d5c8242008-01-24 02:22:38 -08003022/**
Alexander Duycke1739522009-02-19 20:39:44 -08003023 * igb_rlpml_set - set maximum receive packet size
3024 * @adapter: board private structure
3025 *
3026 * Configure maximum receivable packet size.
3027 **/
3028static void igb_rlpml_set(struct igb_adapter *adapter)
3029{
Alexander Duyck153285f2011-08-26 07:43:32 +00003030 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08003031 struct e1000_hw *hw = &adapter->hw;
3032 u16 pf_id = adapter->vfs_allocated_count;
3033
Alexander Duycke1739522009-02-19 20:39:44 -08003034 if (pf_id) {
3035 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003036 /*
3037 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3038 * to our max jumbo frame size, in case we need to enable
3039 * jumbo frames on one of the rings later.
3040 * This will not pass over-length frames into the default
3041 * queue because it's gated by the VMOLR.RLPML.
3042 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003043 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003044 }
3045
3046 wr32(E1000_RLPML, max_frame_size);
3047}
3048
Williams, Mitch A8151d292010-02-10 01:44:24 +00003049static inline void igb_set_vmolr(struct igb_adapter *adapter,
3050 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003051{
3052 struct e1000_hw *hw = &adapter->hw;
3053 u32 vmolr;
3054
3055 /*
3056 * This register exists only on 82576 and newer so if we are older then
3057 * we should exit and do nothing
3058 */
3059 if (hw->mac.type < e1000_82576)
3060 return;
3061
3062 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003063 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3064 if (aupe)
3065 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3066 else
3067 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003068
3069 /* clear all bits that might not be set */
3070 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3071
Alexander Duycka99955f2009-11-12 18:37:19 +00003072 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003073 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3074 /*
3075 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3076 * multicast packets
3077 */
3078 if (vfn <= adapter->vfs_allocated_count)
3079 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3080
3081 wr32(E1000_VMOLR(vfn), vmolr);
3082}
3083
Alexander Duycke1739522009-02-19 20:39:44 -08003084/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003085 * igb_configure_rx_ring - Configure a receive ring after Reset
3086 * @adapter: board private structure
3087 * @ring: receive ring to be configured
3088 *
3089 * Configure the Rx unit of the MAC after a reset.
3090 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003091void igb_configure_rx_ring(struct igb_adapter *adapter,
3092 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003093{
3094 struct e1000_hw *hw = &adapter->hw;
3095 u64 rdba = ring->dma;
3096 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003097 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003098
3099 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003100 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003101
3102 /* Set DMA base address registers */
3103 wr32(E1000_RDBAL(reg_idx),
3104 rdba & 0x00000000ffffffffULL);
3105 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3106 wr32(E1000_RDLEN(reg_idx),
3107 ring->count * sizeof(union e1000_adv_rx_desc));
3108
3109 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003110 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003111 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003112 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003113
Alexander Duyck952f72a2009-10-27 15:51:07 +00003114 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003115 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003116#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003117 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003118#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003119 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003120#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003121 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Nick Nunley757b77e2010-03-26 11:36:47 +00003122 if (hw->mac.type == e1000_82580)
3123 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003124 /* Only set Drop Enable if we are supporting multiple queues */
3125 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3126 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003127
3128 wr32(E1000_SRRCTL(reg_idx), srrctl);
3129
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003130 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003131 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003132
Alexander Duyck85b430b2009-10-27 15:50:29 +00003133 rxdctl |= IGB_RX_PTHRESH;
3134 rxdctl |= IGB_RX_HTHRESH << 8;
3135 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003136
3137 /* enable receive descriptor fetching */
3138 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003139 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3140}
3141
3142/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003143 * igb_configure_rx - Configure receive Unit after Reset
3144 * @adapter: board private structure
3145 *
3146 * Configure the Rx unit of the MAC after a reset.
3147 **/
3148static void igb_configure_rx(struct igb_adapter *adapter)
3149{
Hannes Eder91075842009-02-18 19:36:04 -08003150 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003151
Alexander Duyck68d480c2009-10-05 06:33:08 +00003152 /* set UTA to appropriate mode */
3153 igb_set_uta(adapter);
3154
Alexander Duyck26ad9172009-10-05 06:32:49 +00003155 /* set the correct pool for the PF default MAC address in entry 0 */
3156 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3157 adapter->vfs_allocated_count);
3158
Alexander Duyck06cf2662009-10-27 15:53:25 +00003159 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3160 * the Base and Length of the Rx Descriptor Ring */
3161 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003162 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003163}
3164
3165/**
3166 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003167 * @tx_ring: Tx descriptor ring for a specific queue
3168 *
3169 * Free all transmit software resources
3170 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003171void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003172{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003173 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003174
Alexander Duyck06034642011-08-26 07:44:22 +00003175 vfree(tx_ring->tx_buffer_info);
3176 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003177
Alexander Duyck439705e2009-10-27 23:49:20 +00003178 /* if not set, then don't free */
3179 if (!tx_ring->desc)
3180 return;
3181
Alexander Duyck59d71982010-04-27 13:09:25 +00003182 dma_free_coherent(tx_ring->dev, tx_ring->size,
3183 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003184
3185 tx_ring->desc = NULL;
3186}
3187
3188/**
3189 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3190 * @adapter: board private structure
3191 *
3192 * Free all transmit software resources
3193 **/
3194static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3195{
3196 int i;
3197
3198 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003199 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003200}
3201
Alexander Duyckebe42d12011-08-26 07:45:09 +00003202void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3203 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003204{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003205 if (tx_buffer->skb) {
3206 dev_kfree_skb_any(tx_buffer->skb);
3207 if (tx_buffer->dma)
3208 dma_unmap_single(ring->dev,
3209 tx_buffer->dma,
3210 tx_buffer->length,
3211 DMA_TO_DEVICE);
3212 } else if (tx_buffer->dma) {
3213 dma_unmap_page(ring->dev,
3214 tx_buffer->dma,
3215 tx_buffer->length,
3216 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003217 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003218 tx_buffer->next_to_watch = NULL;
3219 tx_buffer->skb = NULL;
3220 tx_buffer->dma = 0;
3221 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003222}
3223
3224/**
3225 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003226 * @tx_ring: ring to be cleaned
3227 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003228static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003229{
Alexander Duyck06034642011-08-26 07:44:22 +00003230 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003231 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003232 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003233
Alexander Duyck06034642011-08-26 07:44:22 +00003234 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003235 return;
3236 /* Free all the Tx ring sk_buffs */
3237
3238 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003239 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003240 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003241 }
3242
Alexander Duyck06034642011-08-26 07:44:22 +00003243 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3244 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003245
3246 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003247 memset(tx_ring->desc, 0, tx_ring->size);
3248
3249 tx_ring->next_to_use = 0;
3250 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003251}
3252
3253/**
3254 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3255 * @adapter: board private structure
3256 **/
3257static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3258{
3259 int i;
3260
3261 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003262 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003263}
3264
3265/**
3266 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003267 * @rx_ring: ring to clean the resources from
3268 *
3269 * Free all receive software resources
3270 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003271void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003272{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003273 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003274
Alexander Duyck06034642011-08-26 07:44:22 +00003275 vfree(rx_ring->rx_buffer_info);
3276 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003277
Alexander Duyck439705e2009-10-27 23:49:20 +00003278 /* if not set, then don't free */
3279 if (!rx_ring->desc)
3280 return;
3281
Alexander Duyck59d71982010-04-27 13:09:25 +00003282 dma_free_coherent(rx_ring->dev, rx_ring->size,
3283 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003284
3285 rx_ring->desc = NULL;
3286}
3287
3288/**
3289 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3290 * @adapter: board private structure
3291 *
3292 * Free all receive software resources
3293 **/
3294static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3295{
3296 int i;
3297
3298 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003299 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003300}
3301
3302/**
3303 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003304 * @rx_ring: ring to free buffers from
3305 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003306static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003307{
Auke Kok9d5c8242008-01-24 02:22:38 -08003308 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003309 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003310
Alexander Duyck06034642011-08-26 07:44:22 +00003311 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003312 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003313
Auke Kok9d5c8242008-01-24 02:22:38 -08003314 /* Free all the Rx ring sk_buffs */
3315 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003316 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003318 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003319 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003320 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003321 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003322 buffer_info->dma = 0;
3323 }
3324
3325 if (buffer_info->skb) {
3326 dev_kfree_skb(buffer_info->skb);
3327 buffer_info->skb = NULL;
3328 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003329 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003330 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003331 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003332 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003333 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003334 buffer_info->page_dma = 0;
3335 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003336 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003337 put_page(buffer_info->page);
3338 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003339 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003340 }
3341 }
3342
Alexander Duyck06034642011-08-26 07:44:22 +00003343 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3344 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003345
3346 /* Zero out the descriptor ring */
3347 memset(rx_ring->desc, 0, rx_ring->size);
3348
3349 rx_ring->next_to_clean = 0;
3350 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003351}
3352
3353/**
3354 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3355 * @adapter: board private structure
3356 **/
3357static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3358{
3359 int i;
3360
3361 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003362 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003363}
3364
3365/**
3366 * igb_set_mac - Change the Ethernet Address of the NIC
3367 * @netdev: network interface device structure
3368 * @p: pointer to an address structure
3369 *
3370 * Returns 0 on success, negative on failure
3371 **/
3372static int igb_set_mac(struct net_device *netdev, void *p)
3373{
3374 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003375 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003376 struct sockaddr *addr = p;
3377
3378 if (!is_valid_ether_addr(addr->sa_data))
3379 return -EADDRNOTAVAIL;
3380
3381 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003382 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003383
Alexander Duyck26ad9172009-10-05 06:32:49 +00003384 /* set the correct pool for the new PF MAC address in entry 0 */
3385 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3386 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003387
Auke Kok9d5c8242008-01-24 02:22:38 -08003388 return 0;
3389}
3390
3391/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003392 * igb_write_mc_addr_list - write multicast addresses to MTA
3393 * @netdev: network interface device structure
3394 *
3395 * Writes multicast address list to the MTA hash table.
3396 * Returns: -ENOMEM on failure
3397 * 0 on no addresses written
3398 * X on writing X addresses to MTA
3399 **/
3400static int igb_write_mc_addr_list(struct net_device *netdev)
3401{
3402 struct igb_adapter *adapter = netdev_priv(netdev);
3403 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003404 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003405 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003406 int i;
3407
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003408 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003409 /* nothing to program, so clear mc list */
3410 igb_update_mc_addr_list(hw, NULL, 0);
3411 igb_restore_vf_multicasts(adapter);
3412 return 0;
3413 }
3414
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003415 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003416 if (!mta_list)
3417 return -ENOMEM;
3418
Alexander Duyck68d480c2009-10-05 06:33:08 +00003419 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003420 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003421 netdev_for_each_mc_addr(ha, netdev)
3422 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003423
Alexander Duyck68d480c2009-10-05 06:33:08 +00003424 igb_update_mc_addr_list(hw, mta_list, i);
3425 kfree(mta_list);
3426
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003427 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428}
3429
3430/**
3431 * igb_write_uc_addr_list - write unicast addresses to RAR table
3432 * @netdev: network interface device structure
3433 *
3434 * Writes unicast address list to the RAR table.
3435 * Returns: -ENOMEM on failure/insufficient address space
3436 * 0 on no addresses written
3437 * X on writing X addresses to the RAR table
3438 **/
3439static int igb_write_uc_addr_list(struct net_device *netdev)
3440{
3441 struct igb_adapter *adapter = netdev_priv(netdev);
3442 struct e1000_hw *hw = &adapter->hw;
3443 unsigned int vfn = adapter->vfs_allocated_count;
3444 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3445 int count = 0;
3446
3447 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003448 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003449 return -ENOMEM;
3450
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003451 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003452 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003453
3454 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003455 if (!rar_entries)
3456 break;
3457 igb_rar_set_qsel(adapter, ha->addr,
3458 rar_entries--,
3459 vfn);
3460 count++;
3461 }
3462 }
3463 /* write the addresses in reverse order to avoid write combining */
3464 for (; rar_entries > 0 ; rar_entries--) {
3465 wr32(E1000_RAH(rar_entries), 0);
3466 wr32(E1000_RAL(rar_entries), 0);
3467 }
3468 wrfl();
3469
3470 return count;
3471}
3472
3473/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003474 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003475 * @netdev: network interface device structure
3476 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003477 * The set_rx_mode entry point is called whenever the unicast or multicast
3478 * address lists or the network interface flags are updated. This routine is
3479 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003480 * promiscuous mode, and all-multi behavior.
3481 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003482static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003483{
3484 struct igb_adapter *adapter = netdev_priv(netdev);
3485 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003486 unsigned int vfn = adapter->vfs_allocated_count;
3487 u32 rctl, vmolr = 0;
3488 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003489
3490 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003491 rctl = rd32(E1000_RCTL);
3492
Alexander Duyck68d480c2009-10-05 06:33:08 +00003493 /* clear the effected bits */
3494 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3495
Patrick McHardy746b9f02008-07-16 20:15:45 -07003496 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003497 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003498 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003499 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003500 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003501 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003502 vmolr |= E1000_VMOLR_MPME;
3503 } else {
3504 /*
3505 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003506 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003507 * that we can at least receive multicast traffic
3508 */
3509 count = igb_write_mc_addr_list(netdev);
3510 if (count < 0) {
3511 rctl |= E1000_RCTL_MPE;
3512 vmolr |= E1000_VMOLR_MPME;
3513 } else if (count) {
3514 vmolr |= E1000_VMOLR_ROMPE;
3515 }
3516 }
3517 /*
3518 * Write addresses to available RAR registers, if there is not
3519 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003520 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003521 */
3522 count = igb_write_uc_addr_list(netdev);
3523 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003524 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003525 vmolr |= E1000_VMOLR_ROPE;
3526 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003527 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003528 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003529 wr32(E1000_RCTL, rctl);
3530
Alexander Duyck68d480c2009-10-05 06:33:08 +00003531 /*
3532 * In order to support SR-IOV and eventually VMDq it is necessary to set
3533 * the VMOLR to enable the appropriate modes. Without this workaround
3534 * we will have issues with VLAN tag stripping not being done for frames
3535 * that are only arriving because we are the default pool
3536 */
3537 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003538 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003539
Alexander Duyck68d480c2009-10-05 06:33:08 +00003540 vmolr |= rd32(E1000_VMOLR(vfn)) &
3541 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3542 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003543 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003544}
3545
Greg Rose13800462010-11-06 02:08:26 +00003546static void igb_check_wvbr(struct igb_adapter *adapter)
3547{
3548 struct e1000_hw *hw = &adapter->hw;
3549 u32 wvbr = 0;
3550
3551 switch (hw->mac.type) {
3552 case e1000_82576:
3553 case e1000_i350:
3554 if (!(wvbr = rd32(E1000_WVBR)))
3555 return;
3556 break;
3557 default:
3558 break;
3559 }
3560
3561 adapter->wvbr |= wvbr;
3562}
3563
3564#define IGB_STAGGERED_QUEUE_OFFSET 8
3565
3566static void igb_spoof_check(struct igb_adapter *adapter)
3567{
3568 int j;
3569
3570 if (!adapter->wvbr)
3571 return;
3572
3573 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3574 if (adapter->wvbr & (1 << j) ||
3575 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3576 dev_warn(&adapter->pdev->dev,
3577 "Spoof event(s) detected on VF %d\n", j);
3578 adapter->wvbr &=
3579 ~((1 << j) |
3580 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3581 }
3582 }
3583}
3584
Auke Kok9d5c8242008-01-24 02:22:38 -08003585/* Need to wait a few seconds after link up to get diagnostic information from
3586 * the phy */
3587static void igb_update_phy_info(unsigned long data)
3588{
3589 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003590 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003591}
3592
3593/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003594 * igb_has_link - check shared code for link and determine up/down
3595 * @adapter: pointer to driver private info
3596 **/
Nick Nunley31455352010-02-17 01:01:21 +00003597bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003598{
3599 struct e1000_hw *hw = &adapter->hw;
3600 bool link_active = false;
3601 s32 ret_val = 0;
3602
3603 /* get_link_status is set on LSC (link status) interrupt or
3604 * rx sequence error interrupt. get_link_status will stay
3605 * false until the e1000_check_for_link establishes link
3606 * for copper adapters ONLY
3607 */
3608 switch (hw->phy.media_type) {
3609 case e1000_media_type_copper:
3610 if (hw->mac.get_link_status) {
3611 ret_val = hw->mac.ops.check_for_link(hw);
3612 link_active = !hw->mac.get_link_status;
3613 } else {
3614 link_active = true;
3615 }
3616 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003617 case e1000_media_type_internal_serdes:
3618 ret_val = hw->mac.ops.check_for_link(hw);
3619 link_active = hw->mac.serdes_has_link;
3620 break;
3621 default:
3622 case e1000_media_type_unknown:
3623 break;
3624 }
3625
3626 return link_active;
3627}
3628
Stefan Assmann563988d2011-04-05 04:27:15 +00003629static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3630{
3631 bool ret = false;
3632 u32 ctrl_ext, thstat;
3633
3634 /* check for thermal sensor event on i350, copper only */
3635 if (hw->mac.type == e1000_i350) {
3636 thstat = rd32(E1000_THSTAT);
3637 ctrl_ext = rd32(E1000_CTRL_EXT);
3638
3639 if ((hw->phy.media_type == e1000_media_type_copper) &&
3640 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3641 ret = !!(thstat & event);
3642 }
3643 }
3644
3645 return ret;
3646}
3647
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003648/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003649 * igb_watchdog - Timer Call-back
3650 * @data: pointer to adapter cast into an unsigned long
3651 **/
3652static void igb_watchdog(unsigned long data)
3653{
3654 struct igb_adapter *adapter = (struct igb_adapter *)data;
3655 /* Do the rest outside of interrupt context */
3656 schedule_work(&adapter->watchdog_task);
3657}
3658
3659static void igb_watchdog_task(struct work_struct *work)
3660{
3661 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003662 struct igb_adapter,
3663 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003664 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003665 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003666 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003667 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003668
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003669 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003670 if (link) {
3671 if (!netif_carrier_ok(netdev)) {
3672 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003673 hw->mac.ops.get_speed_and_duplex(hw,
3674 &adapter->link_speed,
3675 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003676
3677 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003678 /* Links status message must follow this format */
3679 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003680 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003681 netdev->name,
3682 adapter->link_speed,
3683 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003684 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003685 ((ctrl & E1000_CTRL_TFCE) &&
3686 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3687 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3688 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003689
Stefan Assmann563988d2011-04-05 04:27:15 +00003690 /* check for thermal sensor event */
3691 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3692 printk(KERN_INFO "igb: %s The network adapter "
3693 "link speed was downshifted "
3694 "because it overheated.\n",
3695 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003696 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003697
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003698 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003699 adapter->tx_timeout_factor = 1;
3700 switch (adapter->link_speed) {
3701 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003702 adapter->tx_timeout_factor = 14;
3703 break;
3704 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003705 /* maybe add some timeout factor ? */
3706 break;
3707 }
3708
3709 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003710
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003711 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003712 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003713
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003714 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003715 if (!test_bit(__IGB_DOWN, &adapter->state))
3716 mod_timer(&adapter->phy_info_timer,
3717 round_jiffies(jiffies + 2 * HZ));
3718 }
3719 } else {
3720 if (netif_carrier_ok(netdev)) {
3721 adapter->link_speed = 0;
3722 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003723
3724 /* check for thermal sensor event */
3725 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3726 printk(KERN_ERR "igb: %s The network adapter "
3727 "was stopped because it "
3728 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003729 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003730 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003731
Alexander Duyck527d47c2008-11-27 00:21:39 -08003732 /* Links status message must follow this format */
3733 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3734 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003735 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003736
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003737 igb_ping_all_vfs(adapter);
3738
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003739 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003740 if (!test_bit(__IGB_DOWN, &adapter->state))
3741 mod_timer(&adapter->phy_info_timer,
3742 round_jiffies(jiffies + 2 * HZ));
3743 }
3744 }
3745
Eric Dumazet12dcd862010-10-15 17:27:10 +00003746 spin_lock(&adapter->stats64_lock);
3747 igb_update_stats(adapter, &adapter->stats64);
3748 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003749
Alexander Duyckdbabb062009-11-12 18:38:16 +00003750 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003751 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003752 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003753 /* We've lost link, so the controller stops DMA,
3754 * but we've got queued Tx work that's never going
3755 * to get done, so reset controller to flush Tx.
3756 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003757 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3758 adapter->tx_timeout_count++;
3759 schedule_work(&adapter->reset_task);
3760 /* return immediately since reset is imminent */
3761 return;
3762 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003763 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003764
Alexander Duyckdbabb062009-11-12 18:38:16 +00003765 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003766 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003767 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003768
Auke Kok9d5c8242008-01-24 02:22:38 -08003769 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003770 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003771 u32 eics = 0;
3772 for (i = 0; i < adapter->num_q_vectors; i++) {
3773 struct igb_q_vector *q_vector = adapter->q_vector[i];
3774 eics |= q_vector->eims_value;
3775 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003776 wr32(E1000_EICS, eics);
3777 } else {
3778 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3779 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003780
Greg Rose13800462010-11-06 02:08:26 +00003781 igb_spoof_check(adapter);
3782
Auke Kok9d5c8242008-01-24 02:22:38 -08003783 /* Reset the timer */
3784 if (!test_bit(__IGB_DOWN, &adapter->state))
3785 mod_timer(&adapter->watchdog_timer,
3786 round_jiffies(jiffies + 2 * HZ));
3787}
3788
3789enum latency_range {
3790 lowest_latency = 0,
3791 low_latency = 1,
3792 bulk_latency = 2,
3793 latency_invalid = 255
3794};
3795
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003796/**
3797 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3798 *
3799 * Stores a new ITR value based on strictly on packet size. This
3800 * algorithm is less sophisticated than that used in igb_update_itr,
3801 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003802 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003803 * were determined based on theoretical maximum wire speed and testing
3804 * data, in order to minimize response time while increasing bulk
3805 * throughput.
3806 * This functionality is controlled by the InterruptThrottleRate module
3807 * parameter (see igb_param.c)
3808 * NOTE: This function is called only when operating in a multiqueue
3809 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003810 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003811 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003812static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003813{
Alexander Duyck047e0032009-10-27 15:49:27 +00003814 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003815 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003816 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003817 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003818
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003819 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3820 * ints/sec - ITR timer value of 120 ticks.
3821 */
3822 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003823 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003824 goto set_itr_val;
3825 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003826
Alexander Duyck0ba82992011-08-26 07:45:47 +00003827 packets = q_vector->rx.total_packets;
3828 if (packets)
3829 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003830
Alexander Duyck0ba82992011-08-26 07:45:47 +00003831 packets = q_vector->tx.total_packets;
3832 if (packets)
3833 avg_wire_size = max_t(u32, avg_wire_size,
3834 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003835
3836 /* if avg_wire_size isn't set no work was done */
3837 if (!avg_wire_size)
3838 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003839
3840 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3841 avg_wire_size += 24;
3842
3843 /* Don't starve jumbo frames */
3844 avg_wire_size = min(avg_wire_size, 3000);
3845
3846 /* Give a little boost to mid-size frames */
3847 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3848 new_val = avg_wire_size / 3;
3849 else
3850 new_val = avg_wire_size / 2;
3851
Alexander Duyck0ba82992011-08-26 07:45:47 +00003852 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3853 if (new_val < IGB_20K_ITR &&
3854 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3855 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3856 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003857
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003858set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003859 if (new_val != q_vector->itr_val) {
3860 q_vector->itr_val = new_val;
3861 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003862 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003863clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003864 q_vector->rx.total_bytes = 0;
3865 q_vector->rx.total_packets = 0;
3866 q_vector->tx.total_bytes = 0;
3867 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003868}
3869
3870/**
3871 * igb_update_itr - update the dynamic ITR value based on statistics
3872 * Stores a new ITR value based on packets and byte
3873 * counts during the last interrupt. The advantage of per interrupt
3874 * computation is faster updates and more accurate ITR for the current
3875 * traffic pattern. Constants in this function were computed
3876 * based on theoretical maximum wire speed and thresholds were set based
3877 * on testing data as well as attempting to minimize response time
3878 * while increasing bulk throughput.
3879 * this functionality is controlled by the InterruptThrottleRate module
3880 * parameter (see igb_param.c)
3881 * NOTE: These calculations are only valid when operating in a single-
3882 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003883 * @q_vector: pointer to q_vector
3884 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003885 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003886static void igb_update_itr(struct igb_q_vector *q_vector,
3887 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003888{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003889 unsigned int packets = ring_container->total_packets;
3890 unsigned int bytes = ring_container->total_bytes;
3891 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003892
Alexander Duyck0ba82992011-08-26 07:45:47 +00003893 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003894 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003895 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003896
Alexander Duyck0ba82992011-08-26 07:45:47 +00003897 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003898 case lowest_latency:
3899 /* handle TSO and jumbo frames */
3900 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003901 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003902 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003904 break;
3905 case low_latency: /* 50 usec aka 20000 ints/s */
3906 if (bytes > 10000) {
3907 /* this if handles the TSO accounting */
3908 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003909 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003910 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003911 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003913 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003914 }
3915 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003916 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003917 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003918 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003919 }
3920 break;
3921 case bulk_latency: /* 250 usec aka 4000 ints/s */
3922 if (bytes > 25000) {
3923 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003924 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003925 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003926 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003927 }
3928 break;
3929 }
3930
Alexander Duyck0ba82992011-08-26 07:45:47 +00003931 /* clear work counters since we have the values we need */
3932 ring_container->total_bytes = 0;
3933 ring_container->total_packets = 0;
3934
3935 /* write updated itr to ring container */
3936 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003937}
3938
Alexander Duyck0ba82992011-08-26 07:45:47 +00003939static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003940{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003941 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003942 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003943 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003944
3945 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3946 if (adapter->link_speed != SPEED_1000) {
3947 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003948 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003949 goto set_itr_now;
3950 }
3951
Alexander Duyck0ba82992011-08-26 07:45:47 +00003952 igb_update_itr(q_vector, &q_vector->tx);
3953 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003954
Alexander Duyck0ba82992011-08-26 07:45:47 +00003955 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003956
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003957 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003958 if (current_itr == lowest_latency &&
3959 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3960 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003961 current_itr = low_latency;
3962
Auke Kok9d5c8242008-01-24 02:22:38 -08003963 switch (current_itr) {
3964 /* counts and packets in update_itr are dependent on these numbers */
3965 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003966 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003967 break;
3968 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003969 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003970 break;
3971 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003972 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003973 break;
3974 default:
3975 break;
3976 }
3977
3978set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003979 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003980 /* this attempts to bias the interrupt rate towards Bulk
3981 * by adding intermediate steps when interrupt rate is
3982 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003983 new_itr = new_itr > q_vector->itr_val ?
3984 max((new_itr * q_vector->itr_val) /
3985 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003986 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003987 new_itr;
3988 /* Don't write the value here; it resets the adapter's
3989 * internal timer, and causes us to delay far longer than
3990 * we should between interrupts. Instead, we write the ITR
3991 * value at the beginning of the next interrupt so the timing
3992 * ends up being correct.
3993 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003994 q_vector->itr_val = new_itr;
3995 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003996 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003997}
3998
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003999void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4000 u32 type_tucmd, u32 mss_l4len_idx)
4001{
4002 struct e1000_adv_tx_context_desc *context_desc;
4003 u16 i = tx_ring->next_to_use;
4004
4005 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4006
4007 i++;
4008 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4009
4010 /* set bits to identify this as an advanced context descriptor */
4011 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4012
4013 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00004014 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004015 mss_l4len_idx |= tx_ring->reg_idx << 4;
4016
4017 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4018 context_desc->seqnum_seed = 0;
4019 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4020 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4021}
4022
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004023static int igb_tso(struct igb_ring *tx_ring,
4024 struct igb_tx_buffer *first,
4025 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004026{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004027 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004028 u32 vlan_macip_lens, type_tucmd;
4029 u32 mss_l4len_idx, l4len;
4030
4031 if (!skb_is_gso(skb))
4032 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004033
4034 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004035 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004036 if (err)
4037 return err;
4038 }
4039
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004040 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4041 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004042
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004043 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004044 struct iphdr *iph = ip_hdr(skb);
4045 iph->tot_len = 0;
4046 iph->check = 0;
4047 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4048 iph->daddr, 0,
4049 IPPROTO_TCP,
4050 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004051 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004052 first->tx_flags |= IGB_TX_FLAGS_TSO |
4053 IGB_TX_FLAGS_CSUM |
4054 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004055 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004056 ipv6_hdr(skb)->payload_len = 0;
4057 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4058 &ipv6_hdr(skb)->daddr,
4059 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004060 first->tx_flags |= IGB_TX_FLAGS_TSO |
4061 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004062 }
4063
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004064 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004065 l4len = tcp_hdrlen(skb);
4066 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004067
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004068 /* update gso size and bytecount with header size */
4069 first->gso_segs = skb_shinfo(skb)->gso_segs;
4070 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4071
Auke Kok9d5c8242008-01-24 02:22:38 -08004072 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004073 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4074 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004075
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004076 /* VLAN MACLEN IPLEN */
4077 vlan_macip_lens = skb_network_header_len(skb);
4078 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004079 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004080
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004081 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004082
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004083 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004084}
4085
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004086static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004087{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004088 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004089 u32 vlan_macip_lens = 0;
4090 u32 mss_l4len_idx = 0;
4091 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004092
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004093 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004094 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4095 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004096 } else {
4097 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004098 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004099 case __constant_htons(ETH_P_IP):
4100 vlan_macip_lens |= skb_network_header_len(skb);
4101 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4102 l4_hdr = ip_hdr(skb)->protocol;
4103 break;
4104 case __constant_htons(ETH_P_IPV6):
4105 vlan_macip_lens |= skb_network_header_len(skb);
4106 l4_hdr = ipv6_hdr(skb)->nexthdr;
4107 break;
4108 default:
4109 if (unlikely(net_ratelimit())) {
4110 dev_warn(tx_ring->dev,
4111 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004112 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004113 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004114 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004115 }
4116
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004117 switch (l4_hdr) {
4118 case IPPROTO_TCP:
4119 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4120 mss_l4len_idx = tcp_hdrlen(skb) <<
4121 E1000_ADVTXD_L4LEN_SHIFT;
4122 break;
4123 case IPPROTO_SCTP:
4124 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4125 mss_l4len_idx = sizeof(struct sctphdr) <<
4126 E1000_ADVTXD_L4LEN_SHIFT;
4127 break;
4128 case IPPROTO_UDP:
4129 mss_l4len_idx = sizeof(struct udphdr) <<
4130 E1000_ADVTXD_L4LEN_SHIFT;
4131 break;
4132 default:
4133 if (unlikely(net_ratelimit())) {
4134 dev_warn(tx_ring->dev,
4135 "partial checksum but l4 proto=%x!\n",
4136 l4_hdr);
4137 }
4138 break;
4139 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004140
4141 /* update TX checksum flag */
4142 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004143 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004144
4145 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004146 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004147
4148 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004149}
4150
Alexander Duycke032afc2011-08-26 07:44:48 +00004151static __le32 igb_tx_cmd_type(u32 tx_flags)
4152{
4153 /* set type for advanced descriptor with frame checksum insertion */
4154 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4155 E1000_ADVTXD_DCMD_IFCS |
4156 E1000_ADVTXD_DCMD_DEXT);
4157
4158 /* set HW vlan bit if vlan is present */
4159 if (tx_flags & IGB_TX_FLAGS_VLAN)
4160 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4161
4162 /* set timestamp bit if present */
4163 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4164 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4165
4166 /* set segmentation bits for TSO */
4167 if (tx_flags & IGB_TX_FLAGS_TSO)
4168 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4169
4170 return cmd_type;
4171}
4172
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004173static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4174 union e1000_adv_tx_desc *tx_desc,
4175 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004176{
4177 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4178
4179 /* 82575 requires a unique index per ring if any offload is enabled */
4180 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004181 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004182 olinfo_status |= tx_ring->reg_idx << 4;
4183
4184 /* insert L4 checksum */
4185 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4186 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4187
4188 /* insert IPv4 checksum */
4189 if (tx_flags & IGB_TX_FLAGS_IPV4)
4190 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4191 }
4192
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004193 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004194}
4195
Alexander Duyckebe42d12011-08-26 07:45:09 +00004196/*
4197 * The largest size we can write to the descriptor is 65535. In order to
4198 * maintain a power of two alignment we have to limit ourselves to 32K.
4199 */
4200#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004201#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004202
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004203static void igb_tx_map(struct igb_ring *tx_ring,
4204 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004205 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004206{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004207 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004208 struct igb_tx_buffer *tx_buffer_info;
4209 union e1000_adv_tx_desc *tx_desc;
4210 dma_addr_t dma;
4211 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4212 unsigned int data_len = skb->data_len;
4213 unsigned int size = skb_headlen(skb);
4214 unsigned int paylen = skb->len - hdr_len;
4215 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004216 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004217 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004218
4219 tx_desc = IGB_TX_DESC(tx_ring, i);
4220
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004221 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004222 cmd_type = igb_tx_cmd_type(tx_flags);
4223
4224 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4225 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004226 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004227
Alexander Duyckebe42d12011-08-26 07:45:09 +00004228 /* record length, and DMA address */
4229 first->length = size;
4230 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004231 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004232
Alexander Duyckebe42d12011-08-26 07:45:09 +00004233 for (;;) {
4234 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4235 tx_desc->read.cmd_type_len =
4236 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004237
Alexander Duyckebe42d12011-08-26 07:45:09 +00004238 i++;
4239 tx_desc++;
4240 if (i == tx_ring->count) {
4241 tx_desc = IGB_TX_DESC(tx_ring, 0);
4242 i = 0;
4243 }
4244
4245 dma += IGB_MAX_DATA_PER_TXD;
4246 size -= IGB_MAX_DATA_PER_TXD;
4247
4248 tx_desc->read.olinfo_status = 0;
4249 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4250 }
4251
4252 if (likely(!data_len))
4253 break;
4254
4255 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4256
Alexander Duyck65689fe2009-03-20 00:17:43 +00004257 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004258 tx_desc++;
4259 if (i == tx_ring->count) {
4260 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004261 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004262 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004263
Alexander Duyckebe42d12011-08-26 07:45:09 +00004264 size = frag->size;
4265 data_len -= size;
4266
4267 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4268 size, DMA_TO_DEVICE);
4269 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004270 goto dma_error;
4271
Alexander Duyckebe42d12011-08-26 07:45:09 +00004272 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4273 tx_buffer_info->length = size;
4274 tx_buffer_info->dma = dma;
4275
4276 tx_desc->read.olinfo_status = 0;
4277 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4278
4279 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004280 }
4281
Alexander Duyckebe42d12011-08-26 07:45:09 +00004282 /* write last descriptor with RS and EOP bits */
4283 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4284 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004285
4286 /* set the timestamp */
4287 first->time_stamp = jiffies;
4288
Alexander Duyckebe42d12011-08-26 07:45:09 +00004289 /*
4290 * Force memory writes to complete before letting h/w know there
4291 * are new descriptors to fetch. (Only applicable for weak-ordered
4292 * memory model archs, such as IA-64).
4293 *
4294 * We also need this memory barrier to make certain all of the
4295 * status bits have been updated before next_to_watch is written.
4296 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004297 wmb();
4298
Alexander Duyckebe42d12011-08-26 07:45:09 +00004299 /* set next_to_watch value indicating a packet is present */
4300 first->next_to_watch = tx_desc;
4301
4302 i++;
4303 if (i == tx_ring->count)
4304 i = 0;
4305
Auke Kok9d5c8242008-01-24 02:22:38 -08004306 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004307
Alexander Duyckfce99e32009-10-27 15:51:27 +00004308 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004309
Auke Kok9d5c8242008-01-24 02:22:38 -08004310 /* we need this if more than one processor can write to our tail
4311 * at a time, it syncronizes IO on IA64/Altix systems */
4312 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004313
4314 return;
4315
4316dma_error:
4317 dev_err(tx_ring->dev, "TX DMA map failed\n");
4318
4319 /* clear dma mappings for failed tx_buffer_info map */
4320 for (;;) {
4321 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4322 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4323 if (tx_buffer_info == first)
4324 break;
4325 if (i == 0)
4326 i = tx_ring->count;
4327 i--;
4328 }
4329
4330 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004331}
4332
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004333static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004334{
Alexander Duycke694e962009-10-27 15:53:06 +00004335 struct net_device *netdev = tx_ring->netdev;
4336
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004337 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004338
Auke Kok9d5c8242008-01-24 02:22:38 -08004339 /* Herbert's original patch had:
4340 * smp_mb__after_netif_stop_queue();
4341 * but since that doesn't exist yet, just open code it. */
4342 smp_mb();
4343
4344 /* We need to check again in a case another CPU has just
4345 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004346 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004347 return -EBUSY;
4348
4349 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004350 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004351
4352 u64_stats_update_begin(&tx_ring->tx_syncp2);
4353 tx_ring->tx_stats.restart_queue2++;
4354 u64_stats_update_end(&tx_ring->tx_syncp2);
4355
Auke Kok9d5c8242008-01-24 02:22:38 -08004356 return 0;
4357}
4358
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004359static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004360{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004361 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004362 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004363 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004364}
4365
Alexander Duyckcd392f52011-08-26 07:43:59 +00004366netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4367 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004368{
Alexander Duyck8542db02011-08-26 07:44:43 +00004369 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004370 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004371 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004372 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004373 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004374
Auke Kok9d5c8242008-01-24 02:22:38 -08004375 /* need: 1 descriptor per page,
4376 * + 2 desc gap to keep tail from touching head,
4377 * + 1 desc for skb->data,
4378 * + 1 desc for context descriptor,
4379 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004380 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004381 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004382 return NETDEV_TX_BUSY;
4383 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004384
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004385 /* record the location of the first descriptor for this packet */
4386 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4387 first->skb = skb;
4388 first->bytecount = skb->len;
4389 first->gso_segs = 1;
4390
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004391 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4392 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004393 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004394 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004395
Jesse Grosseab6d182010-10-20 13:56:03 +00004396 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004397 tx_flags |= IGB_TX_FLAGS_VLAN;
4398 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4399 }
4400
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004401 /* record initial flags and protocol */
4402 first->tx_flags = tx_flags;
4403 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004404
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004405 tso = igb_tso(tx_ring, first, &hdr_len);
4406 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004407 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004408 else if (!tso)
4409 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004410
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004411 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004412
4413 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004414 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004415
Auke Kok9d5c8242008-01-24 02:22:38 -08004416 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004417
4418out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004419 igb_unmap_and_free_tx_resource(tx_ring, first);
4420
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004421 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004422}
4423
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004424static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4425 struct sk_buff *skb)
4426{
4427 unsigned int r_idx = skb->queue_mapping;
4428
4429 if (r_idx >= adapter->num_tx_queues)
4430 r_idx = r_idx % adapter->num_tx_queues;
4431
4432 return adapter->tx_ring[r_idx];
4433}
4434
Alexander Duyckcd392f52011-08-26 07:43:59 +00004435static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4436 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004437{
4438 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004439
4440 if (test_bit(__IGB_DOWN, &adapter->state)) {
4441 dev_kfree_skb_any(skb);
4442 return NETDEV_TX_OK;
4443 }
4444
4445 if (skb->len <= 0) {
4446 dev_kfree_skb_any(skb);
4447 return NETDEV_TX_OK;
4448 }
4449
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004450 /*
4451 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4452 * in order to meet this minimum size requirement.
4453 */
4454 if (skb->len < 17) {
4455 if (skb_padto(skb, 17))
4456 return NETDEV_TX_OK;
4457 skb->len = 17;
4458 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004459
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004460 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004461}
4462
4463/**
4464 * igb_tx_timeout - Respond to a Tx Hang
4465 * @netdev: network interface device structure
4466 **/
4467static void igb_tx_timeout(struct net_device *netdev)
4468{
4469 struct igb_adapter *adapter = netdev_priv(netdev);
4470 struct e1000_hw *hw = &adapter->hw;
4471
4472 /* Do the reset outside of interrupt context */
4473 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004474
Alexander Duyck55cac242009-11-19 12:42:21 +00004475 if (hw->mac.type == e1000_82580)
4476 hw->dev_spec._82575.global_device_reset = true;
4477
Auke Kok9d5c8242008-01-24 02:22:38 -08004478 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004479 wr32(E1000_EICS,
4480 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004481}
4482
4483static void igb_reset_task(struct work_struct *work)
4484{
4485 struct igb_adapter *adapter;
4486 adapter = container_of(work, struct igb_adapter, reset_task);
4487
Taku Izumic97ec422010-04-27 14:39:30 +00004488 igb_dump(adapter);
4489 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004490 igb_reinit_locked(adapter);
4491}
4492
4493/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004494 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004495 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004496 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004497 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004498 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004499static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4500 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004501{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004502 struct igb_adapter *adapter = netdev_priv(netdev);
4503
4504 spin_lock(&adapter->stats64_lock);
4505 igb_update_stats(adapter, &adapter->stats64);
4506 memcpy(stats, &adapter->stats64, sizeof(*stats));
4507 spin_unlock(&adapter->stats64_lock);
4508
4509 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004510}
4511
4512/**
4513 * igb_change_mtu - Change the Maximum Transfer Unit
4514 * @netdev: network interface device structure
4515 * @new_mtu: new value for maximum frame size
4516 *
4517 * Returns 0 on success, negative on failure
4518 **/
4519static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4520{
4521 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004522 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004523 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004524
Alexander Duyckc809d222009-10-27 23:52:13 +00004525 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004526 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004527 return -EINVAL;
4528 }
4529
Alexander Duyck153285f2011-08-26 07:43:32 +00004530#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004531 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004532 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004533 return -EINVAL;
4534 }
4535
4536 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4537 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004538
Auke Kok9d5c8242008-01-24 02:22:38 -08004539 /* igb_down has a dependency on max_frame_size */
4540 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004541
Alexander Duyck4c844852009-10-27 15:52:07 +00004542 if (netif_running(netdev))
4543 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004544
Alexander Duyck090b1792009-10-27 23:51:55 +00004545 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004546 netdev->mtu, new_mtu);
4547 netdev->mtu = new_mtu;
4548
4549 if (netif_running(netdev))
4550 igb_up(adapter);
4551 else
4552 igb_reset(adapter);
4553
4554 clear_bit(__IGB_RESETTING, &adapter->state);
4555
4556 return 0;
4557}
4558
4559/**
4560 * igb_update_stats - Update the board statistics counters
4561 * @adapter: board private structure
4562 **/
4563
Eric Dumazet12dcd862010-10-15 17:27:10 +00004564void igb_update_stats(struct igb_adapter *adapter,
4565 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004566{
4567 struct e1000_hw *hw = &adapter->hw;
4568 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004569 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004570 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004571 int i;
4572 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004573 unsigned int start;
4574 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004575
4576#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4577
4578 /*
4579 * Prevent stats update while adapter is being reset, or if the pci
4580 * connection is down.
4581 */
4582 if (adapter->link_speed == 0)
4583 return;
4584 if (pci_channel_offline(pdev))
4585 return;
4586
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004587 bytes = 0;
4588 packets = 0;
4589 for (i = 0; i < adapter->num_rx_queues; i++) {
4590 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004591 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004592
Alexander Duyck3025a442010-02-17 01:02:39 +00004593 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004594 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004595
4596 do {
4597 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4598 _bytes = ring->rx_stats.bytes;
4599 _packets = ring->rx_stats.packets;
4600 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4601 bytes += _bytes;
4602 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004603 }
4604
Alexander Duyck128e45e2009-11-12 18:37:38 +00004605 net_stats->rx_bytes = bytes;
4606 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004607
4608 bytes = 0;
4609 packets = 0;
4610 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004611 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004612 do {
4613 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4614 _bytes = ring->tx_stats.bytes;
4615 _packets = ring->tx_stats.packets;
4616 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4617 bytes += _bytes;
4618 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004619 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004620 net_stats->tx_bytes = bytes;
4621 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004622
4623 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004624 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4625 adapter->stats.gprc += rd32(E1000_GPRC);
4626 adapter->stats.gorc += rd32(E1000_GORCL);
4627 rd32(E1000_GORCH); /* clear GORCL */
4628 adapter->stats.bprc += rd32(E1000_BPRC);
4629 adapter->stats.mprc += rd32(E1000_MPRC);
4630 adapter->stats.roc += rd32(E1000_ROC);
4631
4632 adapter->stats.prc64 += rd32(E1000_PRC64);
4633 adapter->stats.prc127 += rd32(E1000_PRC127);
4634 adapter->stats.prc255 += rd32(E1000_PRC255);
4635 adapter->stats.prc511 += rd32(E1000_PRC511);
4636 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4637 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4638 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4639 adapter->stats.sec += rd32(E1000_SEC);
4640
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004641 mpc = rd32(E1000_MPC);
4642 adapter->stats.mpc += mpc;
4643 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004644 adapter->stats.scc += rd32(E1000_SCC);
4645 adapter->stats.ecol += rd32(E1000_ECOL);
4646 adapter->stats.mcc += rd32(E1000_MCC);
4647 adapter->stats.latecol += rd32(E1000_LATECOL);
4648 adapter->stats.dc += rd32(E1000_DC);
4649 adapter->stats.rlec += rd32(E1000_RLEC);
4650 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4651 adapter->stats.xontxc += rd32(E1000_XONTXC);
4652 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4653 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4654 adapter->stats.fcruc += rd32(E1000_FCRUC);
4655 adapter->stats.gptc += rd32(E1000_GPTC);
4656 adapter->stats.gotc += rd32(E1000_GOTCL);
4657 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004658 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004659 adapter->stats.ruc += rd32(E1000_RUC);
4660 adapter->stats.rfc += rd32(E1000_RFC);
4661 adapter->stats.rjc += rd32(E1000_RJC);
4662 adapter->stats.tor += rd32(E1000_TORH);
4663 adapter->stats.tot += rd32(E1000_TOTH);
4664 adapter->stats.tpr += rd32(E1000_TPR);
4665
4666 adapter->stats.ptc64 += rd32(E1000_PTC64);
4667 adapter->stats.ptc127 += rd32(E1000_PTC127);
4668 adapter->stats.ptc255 += rd32(E1000_PTC255);
4669 adapter->stats.ptc511 += rd32(E1000_PTC511);
4670 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4671 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4672
4673 adapter->stats.mptc += rd32(E1000_MPTC);
4674 adapter->stats.bptc += rd32(E1000_BPTC);
4675
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004676 adapter->stats.tpt += rd32(E1000_TPT);
4677 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004678
4679 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004680 /* read internal phy specific stats */
4681 reg = rd32(E1000_CTRL_EXT);
4682 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4683 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4684 adapter->stats.tncrs += rd32(E1000_TNCRS);
4685 }
4686
Auke Kok9d5c8242008-01-24 02:22:38 -08004687 adapter->stats.tsctc += rd32(E1000_TSCTC);
4688 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4689
4690 adapter->stats.iac += rd32(E1000_IAC);
4691 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4692 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4693 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4694 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4695 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4696 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4697 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4698 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4699
4700 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004701 net_stats->multicast = adapter->stats.mprc;
4702 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004703
4704 /* Rx Errors */
4705
4706 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004707 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004708 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004709 adapter->stats.crcerrs + adapter->stats.algnerrc +
4710 adapter->stats.ruc + adapter->stats.roc +
4711 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004712 net_stats->rx_length_errors = adapter->stats.ruc +
4713 adapter->stats.roc;
4714 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4715 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4716 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004717
4718 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004719 net_stats->tx_errors = adapter->stats.ecol +
4720 adapter->stats.latecol;
4721 net_stats->tx_aborted_errors = adapter->stats.ecol;
4722 net_stats->tx_window_errors = adapter->stats.latecol;
4723 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004724
4725 /* Tx Dropped needs to be maintained elsewhere */
4726
4727 /* Phy Stats */
4728 if (hw->phy.media_type == e1000_media_type_copper) {
4729 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004730 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004731 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4732 adapter->phy_stats.idle_errors += phy_tmp;
4733 }
4734 }
4735
4736 /* Management Stats */
4737 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4738 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4739 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004740
4741 /* OS2BMC Stats */
4742 reg = rd32(E1000_MANC);
4743 if (reg & E1000_MANC_EN_BMC2OS) {
4744 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4745 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4746 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4747 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4748 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004749}
4750
Auke Kok9d5c8242008-01-24 02:22:38 -08004751static irqreturn_t igb_msix_other(int irq, void *data)
4752{
Alexander Duyck047e0032009-10-27 15:49:27 +00004753 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004754 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004755 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004756 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004757
Alexander Duyck7f081d42010-01-07 17:41:00 +00004758 if (icr & E1000_ICR_DRSTA)
4759 schedule_work(&adapter->reset_task);
4760
Alexander Duyck047e0032009-10-27 15:49:27 +00004761 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004762 /* HW is reporting DMA is out of sync */
4763 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004764 /* The DMA Out of Sync is also indication of a spoof event
4765 * in IOV mode. Check the Wrong VM Behavior register to
4766 * see if it is really a spoof event. */
4767 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004768 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004769
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004770 /* Check for a mailbox event */
4771 if (icr & E1000_ICR_VMMB)
4772 igb_msg_task(adapter);
4773
4774 if (icr & E1000_ICR_LSC) {
4775 hw->mac.get_link_status = 1;
4776 /* guard against interrupt when we're going down */
4777 if (!test_bit(__IGB_DOWN, &adapter->state))
4778 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4779 }
4780
Alexander Duyck25568a52009-10-27 23:49:59 +00004781 if (adapter->vfs_allocated_count)
4782 wr32(E1000_IMS, E1000_IMS_LSC |
4783 E1000_IMS_VMMB |
4784 E1000_IMS_DOUTSYNC);
4785 else
4786 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004787 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004788
4789 return IRQ_HANDLED;
4790}
4791
Alexander Duyck047e0032009-10-27 15:49:27 +00004792static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004793{
Alexander Duyck26b39272010-02-17 01:00:41 +00004794 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004795 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004796
Alexander Duyck047e0032009-10-27 15:49:27 +00004797 if (!q_vector->set_itr)
4798 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004799
Alexander Duyck047e0032009-10-27 15:49:27 +00004800 if (!itr_val)
4801 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004802
Alexander Duyck26b39272010-02-17 01:00:41 +00004803 if (adapter->hw.mac.type == e1000_82575)
4804 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004805 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004806 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004807
4808 writel(itr_val, q_vector->itr_register);
4809 q_vector->set_itr = 0;
4810}
4811
4812static irqreturn_t igb_msix_ring(int irq, void *data)
4813{
4814 struct igb_q_vector *q_vector = data;
4815
4816 /* Write the ITR value calculated from the previous interrupt. */
4817 igb_write_itr(q_vector);
4818
4819 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004820
Auke Kok9d5c8242008-01-24 02:22:38 -08004821 return IRQ_HANDLED;
4822}
4823
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004824#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004825static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004826{
Alexander Duyck047e0032009-10-27 15:49:27 +00004827 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004828 struct e1000_hw *hw = &adapter->hw;
4829 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004830
Alexander Duyck047e0032009-10-27 15:49:27 +00004831 if (q_vector->cpu == cpu)
4832 goto out_no_update;
4833
Alexander Duyck0ba82992011-08-26 07:45:47 +00004834 if (q_vector->tx.ring) {
4835 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004836 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4837 if (hw->mac.type == e1000_82575) {
4838 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4839 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4840 } else {
4841 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4842 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4843 E1000_DCA_TXCTRL_CPUID_SHIFT;
4844 }
4845 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4846 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4847 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004848 if (q_vector->rx.ring) {
4849 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004850 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4851 if (hw->mac.type == e1000_82575) {
4852 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4853 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4854 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004855 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004856 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004857 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004858 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004859 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4860 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4861 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4862 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004863 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004864 q_vector->cpu = cpu;
4865out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004866 put_cpu();
4867}
4868
4869static void igb_setup_dca(struct igb_adapter *adapter)
4870{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004871 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004872 int i;
4873
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004874 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004875 return;
4876
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004877 /* Always use CB2 mode, difference is masked in the CB driver. */
4878 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4879
Alexander Duyck047e0032009-10-27 15:49:27 +00004880 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004881 adapter->q_vector[i]->cpu = -1;
4882 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004883 }
4884}
4885
4886static int __igb_notify_dca(struct device *dev, void *data)
4887{
4888 struct net_device *netdev = dev_get_drvdata(dev);
4889 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004890 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004891 struct e1000_hw *hw = &adapter->hw;
4892 unsigned long event = *(unsigned long *)data;
4893
4894 switch (event) {
4895 case DCA_PROVIDER_ADD:
4896 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004897 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004898 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004899 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004900 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004901 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004902 igb_setup_dca(adapter);
4903 break;
4904 }
4905 /* Fall Through since DCA is disabled. */
4906 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004907 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004908 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004909 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004910 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004911 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004912 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004913 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004914 }
4915 break;
4916 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004917
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004918 return 0;
4919}
4920
4921static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4922 void *p)
4923{
4924 int ret_val;
4925
4926 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4927 __igb_notify_dca);
4928
4929 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4930}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004931#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004932
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004933static void igb_ping_all_vfs(struct igb_adapter *adapter)
4934{
4935 struct e1000_hw *hw = &adapter->hw;
4936 u32 ping;
4937 int i;
4938
4939 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4940 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004941 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004942 ping |= E1000_VT_MSGTYPE_CTS;
4943 igb_write_mbx(hw, &ping, 1, i);
4944 }
4945}
4946
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004947static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4948{
4949 struct e1000_hw *hw = &adapter->hw;
4950 u32 vmolr = rd32(E1000_VMOLR(vf));
4951 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4952
Alexander Duyckd85b90042010-09-22 17:56:20 +00004953 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004954 IGB_VF_FLAG_MULTI_PROMISC);
4955 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4956
4957 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4958 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004959 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004960 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4961 } else {
4962 /*
4963 * if we have hashes and we are clearing a multicast promisc
4964 * flag we need to write the hashes to the MTA as this step
4965 * was previously skipped
4966 */
4967 if (vf_data->num_vf_mc_hashes > 30) {
4968 vmolr |= E1000_VMOLR_MPME;
4969 } else if (vf_data->num_vf_mc_hashes) {
4970 int j;
4971 vmolr |= E1000_VMOLR_ROMPE;
4972 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4973 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4974 }
4975 }
4976
4977 wr32(E1000_VMOLR(vf), vmolr);
4978
4979 /* there are flags left unprocessed, likely not supported */
4980 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4981 return -EINVAL;
4982
4983 return 0;
4984
4985}
4986
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004987static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4988 u32 *msgbuf, u32 vf)
4989{
4990 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4991 u16 *hash_list = (u16 *)&msgbuf[1];
4992 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4993 int i;
4994
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004995 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004996 * to this VF for later use to restore when the PF multi cast
4997 * list changes
4998 */
4999 vf_data->num_vf_mc_hashes = n;
5000
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005001 /* only up to 30 hash values supported */
5002 if (n > 30)
5003 n = 30;
5004
5005 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005006 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005007 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005008
5009 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005010 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005011
5012 return 0;
5013}
5014
5015static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5016{
5017 struct e1000_hw *hw = &adapter->hw;
5018 struct vf_data_storage *vf_data;
5019 int i, j;
5020
5021 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005022 u32 vmolr = rd32(E1000_VMOLR(i));
5023 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5024
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005025 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005026
5027 if ((vf_data->num_vf_mc_hashes > 30) ||
5028 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5029 vmolr |= E1000_VMOLR_MPME;
5030 } else if (vf_data->num_vf_mc_hashes) {
5031 vmolr |= E1000_VMOLR_ROMPE;
5032 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5033 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5034 }
5035 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005036 }
5037}
5038
5039static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5040{
5041 struct e1000_hw *hw = &adapter->hw;
5042 u32 pool_mask, reg, vid;
5043 int i;
5044
5045 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5046
5047 /* Find the vlan filter for this id */
5048 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5049 reg = rd32(E1000_VLVF(i));
5050
5051 /* remove the vf from the pool */
5052 reg &= ~pool_mask;
5053
5054 /* if pool is empty then remove entry from vfta */
5055 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5056 (reg & E1000_VLVF_VLANID_ENABLE)) {
5057 reg = 0;
5058 vid = reg & E1000_VLVF_VLANID_MASK;
5059 igb_vfta_set(hw, vid, false);
5060 }
5061
5062 wr32(E1000_VLVF(i), reg);
5063 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005064
5065 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005066}
5067
5068static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5069{
5070 struct e1000_hw *hw = &adapter->hw;
5071 u32 reg, i;
5072
Alexander Duyck51466232009-10-27 23:47:35 +00005073 /* The vlvf table only exists on 82576 hardware and newer */
5074 if (hw->mac.type < e1000_82576)
5075 return -1;
5076
5077 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005078 if (!adapter->vfs_allocated_count)
5079 return -1;
5080
5081 /* Find the vlan filter for this id */
5082 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5083 reg = rd32(E1000_VLVF(i));
5084 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5085 vid == (reg & E1000_VLVF_VLANID_MASK))
5086 break;
5087 }
5088
5089 if (add) {
5090 if (i == E1000_VLVF_ARRAY_SIZE) {
5091 /* Did not find a matching VLAN ID entry that was
5092 * enabled. Search for a free filter entry, i.e.
5093 * one without the enable bit set
5094 */
5095 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5096 reg = rd32(E1000_VLVF(i));
5097 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5098 break;
5099 }
5100 }
5101 if (i < E1000_VLVF_ARRAY_SIZE) {
5102 /* Found an enabled/available entry */
5103 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5104
5105 /* if !enabled we need to set this up in vfta */
5106 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005107 /* add VID to filter table */
5108 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005109 reg |= E1000_VLVF_VLANID_ENABLE;
5110 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005111 reg &= ~E1000_VLVF_VLANID_MASK;
5112 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005113 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005114
5115 /* do not modify RLPML for PF devices */
5116 if (vf >= adapter->vfs_allocated_count)
5117 return 0;
5118
5119 if (!adapter->vf_data[vf].vlans_enabled) {
5120 u32 size;
5121 reg = rd32(E1000_VMOLR(vf));
5122 size = reg & E1000_VMOLR_RLPML_MASK;
5123 size += 4;
5124 reg &= ~E1000_VMOLR_RLPML_MASK;
5125 reg |= size;
5126 wr32(E1000_VMOLR(vf), reg);
5127 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005128
Alexander Duyck51466232009-10-27 23:47:35 +00005129 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005130 }
5131 } else {
5132 if (i < E1000_VLVF_ARRAY_SIZE) {
5133 /* remove vf from the pool */
5134 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5135 /* if pool is empty then remove entry from vfta */
5136 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5137 reg = 0;
5138 igb_vfta_set(hw, vid, false);
5139 }
5140 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005141
5142 /* do not modify RLPML for PF devices */
5143 if (vf >= adapter->vfs_allocated_count)
5144 return 0;
5145
5146 adapter->vf_data[vf].vlans_enabled--;
5147 if (!adapter->vf_data[vf].vlans_enabled) {
5148 u32 size;
5149 reg = rd32(E1000_VMOLR(vf));
5150 size = reg & E1000_VMOLR_RLPML_MASK;
5151 size -= 4;
5152 reg &= ~E1000_VMOLR_RLPML_MASK;
5153 reg |= size;
5154 wr32(E1000_VMOLR(vf), reg);
5155 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005156 }
5157 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005158 return 0;
5159}
5160
5161static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5162{
5163 struct e1000_hw *hw = &adapter->hw;
5164
5165 if (vid)
5166 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5167 else
5168 wr32(E1000_VMVIR(vf), 0);
5169}
5170
5171static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5172 int vf, u16 vlan, u8 qos)
5173{
5174 int err = 0;
5175 struct igb_adapter *adapter = netdev_priv(netdev);
5176
5177 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5178 return -EINVAL;
5179 if (vlan || qos) {
5180 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5181 if (err)
5182 goto out;
5183 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5184 igb_set_vmolr(adapter, vf, !vlan);
5185 adapter->vf_data[vf].pf_vlan = vlan;
5186 adapter->vf_data[vf].pf_qos = qos;
5187 dev_info(&adapter->pdev->dev,
5188 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5189 if (test_bit(__IGB_DOWN, &adapter->state)) {
5190 dev_warn(&adapter->pdev->dev,
5191 "The VF VLAN has been set,"
5192 " but the PF device is not up.\n");
5193 dev_warn(&adapter->pdev->dev,
5194 "Bring the PF device up before"
5195 " attempting to use the VF device.\n");
5196 }
5197 } else {
5198 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5199 false, vf);
5200 igb_set_vmvir(adapter, vlan, vf);
5201 igb_set_vmolr(adapter, vf, true);
5202 adapter->vf_data[vf].pf_vlan = 0;
5203 adapter->vf_data[vf].pf_qos = 0;
5204 }
5205out:
5206 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005207}
5208
5209static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5210{
5211 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5212 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5213
5214 return igb_vlvf_set(adapter, vid, add, vf);
5215}
5216
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005217static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005218{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005219 /* clear flags - except flag that indicates PF has set the MAC */
5220 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005221 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005222
5223 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005224 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005225
5226 /* reset vlans for device */
5227 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005228 if (adapter->vf_data[vf].pf_vlan)
5229 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5230 adapter->vf_data[vf].pf_vlan,
5231 adapter->vf_data[vf].pf_qos);
5232 else
5233 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005234
5235 /* reset multicast table array for vf */
5236 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5237
5238 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005239 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005240}
5241
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005242static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5243{
5244 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5245
5246 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005247 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5248 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005249
5250 /* process remaining reset events */
5251 igb_vf_reset(adapter, vf);
5252}
5253
5254static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005255{
5256 struct e1000_hw *hw = &adapter->hw;
5257 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005258 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005259 u32 reg, msgbuf[3];
5260 u8 *addr = (u8 *)(&msgbuf[1]);
5261
5262 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005263 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005264
5265 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005266 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005267
5268 /* enable transmit and receive for vf */
5269 reg = rd32(E1000_VFTE);
5270 wr32(E1000_VFTE, reg | (1 << vf));
5271 reg = rd32(E1000_VFRE);
5272 wr32(E1000_VFRE, reg | (1 << vf));
5273
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005274 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005275
5276 /* reply to reset with ack and vf mac address */
5277 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5278 memcpy(addr, vf_mac, 6);
5279 igb_write_mbx(hw, msgbuf, 3, vf);
5280}
5281
5282static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5283{
Greg Rosede42edd2010-07-01 13:39:23 +00005284 /*
5285 * The VF MAC Address is stored in a packed array of bytes
5286 * starting at the second 32 bit word of the msg array
5287 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005288 unsigned char *addr = (char *)&msg[1];
5289 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005290
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005291 if (is_valid_ether_addr(addr))
5292 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005293
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005294 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005295}
5296
5297static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5298{
5299 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005300 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005301 u32 msg = E1000_VT_MSGTYPE_NACK;
5302
5303 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005304 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5305 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005306 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005307 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005308 }
5309}
5310
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005311static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005312{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005313 struct pci_dev *pdev = adapter->pdev;
5314 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005315 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005316 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005317 s32 retval;
5318
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005319 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005320
Alexander Duyckfef45f42009-12-11 22:57:34 -08005321 if (retval) {
5322 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005323 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005324 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5325 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5326 return;
5327 goto out;
5328 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005329
5330 /* this is a message we already processed, do nothing */
5331 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005332 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005333
5334 /*
5335 * until the vf completes a reset it should not be
5336 * allowed to start any configuration.
5337 */
5338
5339 if (msgbuf[0] == E1000_VF_RESET) {
5340 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005341 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005342 }
5343
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005344 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005345 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5346 return;
5347 retval = -1;
5348 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005349 }
5350
5351 switch ((msgbuf[0] & 0xFFFF)) {
5352 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005353 retval = -EINVAL;
5354 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5355 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5356 else
5357 dev_warn(&pdev->dev,
5358 "VF %d attempted to override administratively "
5359 "set MAC address\nReload the VF driver to "
5360 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005361 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005362 case E1000_VF_SET_PROMISC:
5363 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5364 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005365 case E1000_VF_SET_MULTICAST:
5366 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5367 break;
5368 case E1000_VF_SET_LPE:
5369 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5370 break;
5371 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005372 retval = -1;
5373 if (vf_data->pf_vlan)
5374 dev_warn(&pdev->dev,
5375 "VF %d attempted to override administratively "
5376 "set VLAN tag\nReload the VF driver to "
5377 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005378 else
5379 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005380 break;
5381 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005382 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005383 retval = -1;
5384 break;
5385 }
5386
Alexander Duyckfef45f42009-12-11 22:57:34 -08005387 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5388out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005389 /* notify the VF of the results of what it sent us */
5390 if (retval)
5391 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5392 else
5393 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5394
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005395 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005396}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005397
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005398static void igb_msg_task(struct igb_adapter *adapter)
5399{
5400 struct e1000_hw *hw = &adapter->hw;
5401 u32 vf;
5402
5403 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5404 /* process any reset requests */
5405 if (!igb_check_for_rst(hw, vf))
5406 igb_vf_reset_event(adapter, vf);
5407
5408 /* process any messages pending */
5409 if (!igb_check_for_msg(hw, vf))
5410 igb_rcv_msg_from_vf(adapter, vf);
5411
5412 /* process any acks */
5413 if (!igb_check_for_ack(hw, vf))
5414 igb_rcv_ack_from_vf(adapter, vf);
5415 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005416}
5417
Auke Kok9d5c8242008-01-24 02:22:38 -08005418/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005419 * igb_set_uta - Set unicast filter table address
5420 * @adapter: board private structure
5421 *
5422 * The unicast table address is a register array of 32-bit registers.
5423 * The table is meant to be used in a way similar to how the MTA is used
5424 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005425 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5426 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005427 **/
5428static void igb_set_uta(struct igb_adapter *adapter)
5429{
5430 struct e1000_hw *hw = &adapter->hw;
5431 int i;
5432
5433 /* The UTA table only exists on 82576 hardware and newer */
5434 if (hw->mac.type < e1000_82576)
5435 return;
5436
5437 /* we only need to do this if VMDq is enabled */
5438 if (!adapter->vfs_allocated_count)
5439 return;
5440
5441 for (i = 0; i < hw->mac.uta_reg_count; i++)
5442 array_wr32(E1000_UTA, i, ~0);
5443}
5444
5445/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005446 * igb_intr_msi - Interrupt Handler
5447 * @irq: interrupt number
5448 * @data: pointer to a network interface device structure
5449 **/
5450static irqreturn_t igb_intr_msi(int irq, void *data)
5451{
Alexander Duyck047e0032009-10-27 15:49:27 +00005452 struct igb_adapter *adapter = data;
5453 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005454 struct e1000_hw *hw = &adapter->hw;
5455 /* read ICR disables interrupts using IAM */
5456 u32 icr = rd32(E1000_ICR);
5457
Alexander Duyck047e0032009-10-27 15:49:27 +00005458 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005459
Alexander Duyck7f081d42010-01-07 17:41:00 +00005460 if (icr & E1000_ICR_DRSTA)
5461 schedule_work(&adapter->reset_task);
5462
Alexander Duyck047e0032009-10-27 15:49:27 +00005463 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005464 /* HW is reporting DMA is out of sync */
5465 adapter->stats.doosync++;
5466 }
5467
Auke Kok9d5c8242008-01-24 02:22:38 -08005468 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5469 hw->mac.get_link_status = 1;
5470 if (!test_bit(__IGB_DOWN, &adapter->state))
5471 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5472 }
5473
Alexander Duyck047e0032009-10-27 15:49:27 +00005474 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005475
5476 return IRQ_HANDLED;
5477}
5478
5479/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005480 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005481 * @irq: interrupt number
5482 * @data: pointer to a network interface device structure
5483 **/
5484static irqreturn_t igb_intr(int irq, void *data)
5485{
Alexander Duyck047e0032009-10-27 15:49:27 +00005486 struct igb_adapter *adapter = data;
5487 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005488 struct e1000_hw *hw = &adapter->hw;
5489 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5490 * need for the IMC write */
5491 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005492
5493 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5494 * not set, then the adapter didn't send an interrupt */
5495 if (!(icr & E1000_ICR_INT_ASSERTED))
5496 return IRQ_NONE;
5497
Alexander Duyck0ba82992011-08-26 07:45:47 +00005498 igb_write_itr(q_vector);
5499
Alexander Duyck7f081d42010-01-07 17:41:00 +00005500 if (icr & E1000_ICR_DRSTA)
5501 schedule_work(&adapter->reset_task);
5502
Alexander Duyck047e0032009-10-27 15:49:27 +00005503 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005504 /* HW is reporting DMA is out of sync */
5505 adapter->stats.doosync++;
5506 }
5507
Auke Kok9d5c8242008-01-24 02:22:38 -08005508 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5509 hw->mac.get_link_status = 1;
5510 /* guard against interrupt when we're going down */
5511 if (!test_bit(__IGB_DOWN, &adapter->state))
5512 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5513 }
5514
Alexander Duyck047e0032009-10-27 15:49:27 +00005515 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005516
5517 return IRQ_HANDLED;
5518}
5519
Alexander Duyck0ba82992011-08-26 07:45:47 +00005520void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005521{
Alexander Duyck047e0032009-10-27 15:49:27 +00005522 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005523 struct e1000_hw *hw = &adapter->hw;
5524
Alexander Duyck0ba82992011-08-26 07:45:47 +00005525 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5526 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5527 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5528 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005529 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005530 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005531 }
5532
5533 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5534 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005535 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005536 else
5537 igb_irq_enable(adapter);
5538 }
5539}
5540
Auke Kok9d5c8242008-01-24 02:22:38 -08005541/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005542 * igb_poll - NAPI Rx polling callback
5543 * @napi: napi polling structure
5544 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005545 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005546static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005547{
Alexander Duyck047e0032009-10-27 15:49:27 +00005548 struct igb_q_vector *q_vector = container_of(napi,
5549 struct igb_q_vector,
5550 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005551 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005552
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005553#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005554 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5555 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005556#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005557 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005558 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005559
Alexander Duyck0ba82992011-08-26 07:45:47 +00005560 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005561 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005562
Alexander Duyck16eb8812011-08-26 07:43:54 +00005563 /* If all work not completed, return budget and keep polling */
5564 if (!clean_complete)
5565 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005566
Alexander Duyck46544252009-02-19 20:39:04 -08005567 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005568 napi_complete(napi);
5569 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005570
Alexander Duyck16eb8812011-08-26 07:43:54 +00005571 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005572}
Al Viro6d8126f2008-03-16 22:23:24 +00005573
Auke Kok9d5c8242008-01-24 02:22:38 -08005574/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005575 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005576 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005577 * @shhwtstamps: timestamp structure to update
5578 * @regval: unsigned 64bit system time value.
5579 *
5580 * We need to convert the system time value stored in the RX/TXSTMP registers
5581 * into a hwtstamp which can be used by the upper level timestamping functions
5582 */
5583static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5584 struct skb_shared_hwtstamps *shhwtstamps,
5585 u64 regval)
5586{
5587 u64 ns;
5588
Alexander Duyck55cac242009-11-19 12:42:21 +00005589 /*
5590 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5591 * 24 to match clock shift we setup earlier.
5592 */
5593 if (adapter->hw.mac.type == e1000_82580)
5594 regval <<= IGB_82580_TSYNC_SHIFT;
5595
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005596 ns = timecounter_cyc2time(&adapter->clock, regval);
5597 timecompare_update(&adapter->compare, ns);
5598 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5599 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5600 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5601}
5602
5603/**
5604 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5605 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005606 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005607 *
5608 * If we were asked to do hardware stamping and such a time stamp is
5609 * available, then it must have been for this skb here because we only
5610 * allow only one such packet into the queue.
5611 */
Alexander Duyck06034642011-08-26 07:44:22 +00005612static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5613 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005614{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005615 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005616 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005617 struct skb_shared_hwtstamps shhwtstamps;
5618 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005619
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005620 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005621 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005622 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5623 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005624
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005625 regval = rd32(E1000_TXSTMPL);
5626 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5627
5628 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005629 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005630}
5631
5632/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005633 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005634 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005635 * returns true if ring is completely cleaned
5636 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005637static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005638{
Alexander Duyck047e0032009-10-27 15:49:27 +00005639 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005640 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005641 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005642 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005643 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005644 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005645 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005646
Alexander Duyck13fde972011-10-05 13:35:24 +00005647 if (test_bit(__IGB_DOWN, &adapter->state))
5648 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005649
Alexander Duyck06034642011-08-26 07:44:22 +00005650 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005651 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005652 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005653
Alexander Duyck13fde972011-10-05 13:35:24 +00005654 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005655 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005656
Alexander Duyck8542db02011-08-26 07:44:43 +00005657 /* prevent any other reads prior to eop_desc */
5658 rmb();
5659
5660 /* if next_to_watch is not set then there is no work pending */
5661 if (!eop_desc)
5662 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005663
5664 /* if DD is not set pending work has not been completed */
5665 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5666 break;
5667
Alexander Duyck8542db02011-08-26 07:44:43 +00005668 /* clear next_to_watch to prevent false hangs */
5669 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005670
Alexander Duyckebe42d12011-08-26 07:45:09 +00005671 /* update the statistics for this packet */
5672 total_bytes += tx_buffer->bytecount;
5673 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005674
Alexander Duyckebe42d12011-08-26 07:45:09 +00005675 /* retrieve hardware timestamp */
5676 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005677
Alexander Duyckebe42d12011-08-26 07:45:09 +00005678 /* free the skb */
5679 dev_kfree_skb_any(tx_buffer->skb);
5680 tx_buffer->skb = NULL;
5681
5682 /* unmap skb header data */
5683 dma_unmap_single(tx_ring->dev,
5684 tx_buffer->dma,
5685 tx_buffer->length,
5686 DMA_TO_DEVICE);
5687
5688 /* clear last DMA location and unmap remaining buffers */
5689 while (tx_desc != eop_desc) {
5690 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005691
Alexander Duyck13fde972011-10-05 13:35:24 +00005692 tx_buffer++;
5693 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005695 if (unlikely(!i)) {
5696 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005697 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005698 tx_desc = IGB_TX_DESC(tx_ring, 0);
5699 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005700
5701 /* unmap any remaining paged data */
5702 if (tx_buffer->dma) {
5703 dma_unmap_page(tx_ring->dev,
5704 tx_buffer->dma,
5705 tx_buffer->length,
5706 DMA_TO_DEVICE);
5707 }
5708 }
5709
5710 /* clear last DMA location */
5711 tx_buffer->dma = 0;
5712
5713 /* move us one more past the eop_desc for start of next pkt */
5714 tx_buffer++;
5715 tx_desc++;
5716 i++;
5717 if (unlikely(!i)) {
5718 i -= tx_ring->count;
5719 tx_buffer = tx_ring->tx_buffer_info;
5720 tx_desc = IGB_TX_DESC(tx_ring, 0);
5721 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005722 }
5723
Alexander Duyck8542db02011-08-26 07:44:43 +00005724 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005725 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005726 u64_stats_update_begin(&tx_ring->tx_syncp);
5727 tx_ring->tx_stats.bytes += total_bytes;
5728 tx_ring->tx_stats.packets += total_packets;
5729 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005730 q_vector->tx.total_bytes += total_bytes;
5731 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005732
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005733 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005734 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005735
Alexander Duyck8542db02011-08-26 07:44:43 +00005736 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005737
Auke Kok9d5c8242008-01-24 02:22:38 -08005738 /* Detect a transmit hang in hardware, this serializes the
5739 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005740 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005741 if (eop_desc &&
5742 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005743 (adapter->tx_timeout_factor * HZ)) &&
5744 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005745
Auke Kok9d5c8242008-01-24 02:22:38 -08005746 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005747 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005748 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005749 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005750 " TDH <%x>\n"
5751 " TDT <%x>\n"
5752 " next_to_use <%x>\n"
5753 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005754 "buffer_info[next_to_clean]\n"
5755 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005756 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005757 " jiffies <%lx>\n"
5758 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005759 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005760 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005761 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005762 tx_ring->next_to_use,
5763 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005764 tx_buffer->time_stamp,
5765 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005766 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005767 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005768 netif_stop_subqueue(tx_ring->netdev,
5769 tx_ring->queue_index);
5770
5771 /* we are about to reset, no point in enabling stuff */
5772 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005773 }
5774 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005775
5776 if (unlikely(total_packets &&
5777 netif_carrier_ok(tx_ring->netdev) &&
5778 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5779 /* Make sure that anybody stopping the queue after this
5780 * sees the new next_to_clean.
5781 */
5782 smp_mb();
5783 if (__netif_subqueue_stopped(tx_ring->netdev,
5784 tx_ring->queue_index) &&
5785 !(test_bit(__IGB_DOWN, &adapter->state))) {
5786 netif_wake_subqueue(tx_ring->netdev,
5787 tx_ring->queue_index);
5788
5789 u64_stats_update_begin(&tx_ring->tx_syncp);
5790 tx_ring->tx_stats.restart_queue++;
5791 u64_stats_update_end(&tx_ring->tx_syncp);
5792 }
5793 }
5794
5795 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005796}
5797
Alexander Duyckcd392f52011-08-26 07:43:59 +00005798static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005799 union e1000_adv_rx_desc *rx_desc,
5800 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005801{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005802 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005803
Alexander Duyck294e7d72011-08-26 07:45:57 +00005804 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005805 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005806 return;
5807
5808 /* Rx checksum disabled via ethtool */
5809 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005810 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005811
Auke Kok9d5c8242008-01-24 02:22:38 -08005812 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005813 if (igb_test_staterr(rx_desc,
5814 E1000_RXDEXT_STATERR_TCPE |
5815 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005816 /*
5817 * work around errata with sctp packets where the TCPE aka
5818 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5819 * packets, (aka let the stack check the crc32c)
5820 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005821 if (!((skb->len == 60) &&
5822 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005823 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005824 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005825 u64_stats_update_end(&ring->rx_syncp);
5826 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005827 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005828 return;
5829 }
5830 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005831 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5832 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005833 skb->ip_summed = CHECKSUM_UNNECESSARY;
5834
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005835 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5836 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005837}
5838
Alexander Duyck077887c2011-08-26 07:46:29 +00005839static inline void igb_rx_hash(struct igb_ring *ring,
5840 union e1000_adv_rx_desc *rx_desc,
5841 struct sk_buff *skb)
5842{
5843 if (ring->netdev->features & NETIF_F_RXHASH)
5844 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5845}
5846
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005847static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5848 union e1000_adv_rx_desc *rx_desc,
5849 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005850{
5851 struct igb_adapter *adapter = q_vector->adapter;
5852 struct e1000_hw *hw = &adapter->hw;
5853 u64 regval;
5854
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005855 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5856 E1000_RXDADV_STAT_TS))
5857 return;
5858
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005859 /*
5860 * If this bit is set, then the RX registers contain the time stamp. No
5861 * other packet will be time stamped until we read these registers, so
5862 * read the registers to make them available again. Because only one
5863 * packet can be time stamped at a time, we know that the register
5864 * values must belong to this one here and therefore we don't need to
5865 * compare any of the additional attributes stored for it.
5866 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005867 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005868 * can turn into a skb_shared_hwtstamps.
5869 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005870 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005871 u32 *stamp = (u32 *)skb->data;
5872 regval = le32_to_cpu(*(stamp + 2));
5873 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5874 skb_pull(skb, IGB_TS_HDR_LEN);
5875 } else {
5876 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5877 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005878
Nick Nunley757b77e2010-03-26 11:36:47 +00005879 regval = rd32(E1000_RXSTMPL);
5880 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5881 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005882
5883 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5884}
Alexander Duyck44390ca2011-08-26 07:43:38 +00005885static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005886{
5887 /* HW will not DMA in data larger than the given buffer, even if it
5888 * parses the (NFS, of course) header to be larger. In that case, it
5889 * fills the header buffer and spills the rest into the page.
5890 */
5891 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5892 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005893 if (hlen > IGB_RX_HDR_LEN)
5894 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005895 return hlen;
5896}
5897
Alexander Duyckcd392f52011-08-26 07:43:59 +00005898static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005899{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005900 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005901 union e1000_adv_rx_desc *rx_desc;
5902 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005903 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005904 u16 cleaned_count = igb_desc_unused(rx_ring);
5905 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005906
Alexander Duyck601369062011-08-26 07:44:05 +00005907 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005908
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005909 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005910 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005911 struct sk_buff *skb = buffer_info->skb;
5912 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005913
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005914 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005915 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005916
5917 i++;
5918 if (i == rx_ring->count)
5919 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005920
Alexander Duyck601369062011-08-26 07:44:05 +00005921 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005922 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005923
Alexander Duyck16eb8812011-08-26 07:43:54 +00005924 /*
5925 * This memory barrier is needed to keep us from reading
5926 * any other fields out of the rx_desc until we know the
5927 * RXD_STAT_DD bit is set
5928 */
5929 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005930
Alexander Duyck16eb8812011-08-26 07:43:54 +00005931 if (!skb_is_nonlinear(skb)) {
5932 __skb_put(skb, igb_get_hlen(rx_desc));
5933 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005934 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005935 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005936 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005937 }
5938
Alexander Duyck16eb8812011-08-26 07:43:54 +00005939 if (rx_desc->wb.upper.length) {
5940 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005941
Koki Sanagiaa913402010-04-27 01:01:19 +00005942 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005943 buffer_info->page,
5944 buffer_info->page_offset,
5945 length);
5946
Alexander Duyck16eb8812011-08-26 07:43:54 +00005947 skb->len += length;
5948 skb->data_len += length;
5949 skb->truesize += length;
5950
Alexander Duyckd1eff352009-11-12 18:38:35 +00005951 if ((page_count(buffer_info->page) != 1) ||
5952 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005953 buffer_info->page = NULL;
5954 else
5955 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005956
Alexander Duyck16eb8812011-08-26 07:43:54 +00005957 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5958 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5959 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005960 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005961
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005962 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005963 struct igb_rx_buffer *next_buffer;
5964 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005965 buffer_info->skb = next_buffer->skb;
5966 buffer_info->dma = next_buffer->dma;
5967 next_buffer->skb = skb;
5968 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005969 goto next_desc;
5970 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005971
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005972 if (igb_test_staterr(rx_desc,
5973 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005974 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005975 goto next_desc;
5976 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005977
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005978 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00005979 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005980 igb_rx_checksum(rx_ring, rx_desc, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005981
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005982 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005983 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
Alexander Duyck047e0032009-10-27 15:49:27 +00005984
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005985 __vlan_hwaccel_put_tag(skb, vid);
5986 }
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005987
5988 total_bytes += skb->len;
5989 total_packets++;
5990
5991 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
5992
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005993 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005994
Alexander Duyck16eb8812011-08-26 07:43:54 +00005995 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08005996next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00005997 if (!budget)
5998 break;
5999
6000 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006001 /* return some buffers to hardware, one at a time is too slow */
6002 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006003 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006004 cleaned_count = 0;
6005 }
6006
6007 /* use prefetched values */
6008 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006009 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006010
Auke Kok9d5c8242008-01-24 02:22:38 -08006011 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006012 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006013 rx_ring->rx_stats.packets += total_packets;
6014 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006015 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006016 q_vector->rx.total_packets += total_packets;
6017 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006018
6019 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006020 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006021
Alexander Duyck16eb8812011-08-26 07:43:54 +00006022 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006023}
6024
Alexander Duyckc023cd82011-08-26 07:43:43 +00006025static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006026 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006027{
6028 struct sk_buff *skb = bi->skb;
6029 dma_addr_t dma = bi->dma;
6030
6031 if (dma)
6032 return true;
6033
6034 if (likely(!skb)) {
6035 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6036 IGB_RX_HDR_LEN);
6037 bi->skb = skb;
6038 if (!skb) {
6039 rx_ring->rx_stats.alloc_failed++;
6040 return false;
6041 }
6042
6043 /* initialize skb for ring */
6044 skb_record_rx_queue(skb, rx_ring->queue_index);
6045 }
6046
6047 dma = dma_map_single(rx_ring->dev, skb->data,
6048 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6049
6050 if (dma_mapping_error(rx_ring->dev, dma)) {
6051 rx_ring->rx_stats.alloc_failed++;
6052 return false;
6053 }
6054
6055 bi->dma = dma;
6056 return true;
6057}
6058
6059static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006060 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006061{
6062 struct page *page = bi->page;
6063 dma_addr_t page_dma = bi->page_dma;
6064 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6065
6066 if (page_dma)
6067 return true;
6068
6069 if (!page) {
6070 page = netdev_alloc_page(rx_ring->netdev);
6071 bi->page = page;
6072 if (unlikely(!page)) {
6073 rx_ring->rx_stats.alloc_failed++;
6074 return false;
6075 }
6076 }
6077
6078 page_dma = dma_map_page(rx_ring->dev, page,
6079 page_offset, PAGE_SIZE / 2,
6080 DMA_FROM_DEVICE);
6081
6082 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6083 rx_ring->rx_stats.alloc_failed++;
6084 return false;
6085 }
6086
6087 bi->page_dma = page_dma;
6088 bi->page_offset = page_offset;
6089 return true;
6090}
6091
Auke Kok9d5c8242008-01-24 02:22:38 -08006092/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006093 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006094 * @adapter: address of board private structure
6095 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006096void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006097{
Auke Kok9d5c8242008-01-24 02:22:38 -08006098 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006099 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006100 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006101
Alexander Duyck601369062011-08-26 07:44:05 +00006102 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006103 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006104 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006105
6106 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006107 if (!igb_alloc_mapped_skb(rx_ring, bi))
6108 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006109
Alexander Duyckc023cd82011-08-26 07:43:43 +00006110 /* Refresh the desc even if buffer_addrs didn't change
6111 * because each write-back erases this info. */
6112 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006113
Alexander Duyckc023cd82011-08-26 07:43:43 +00006114 if (!igb_alloc_mapped_page(rx_ring, bi))
6115 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006116
Alexander Duyckc023cd82011-08-26 07:43:43 +00006117 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006118
Alexander Duyckc023cd82011-08-26 07:43:43 +00006119 rx_desc++;
6120 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006121 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006122 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006123 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006124 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006125 i -= rx_ring->count;
6126 }
6127
6128 /* clear the hdr_addr for the next_to_use descriptor */
6129 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006130 }
6131
Alexander Duyckc023cd82011-08-26 07:43:43 +00006132 i += rx_ring->count;
6133
Auke Kok9d5c8242008-01-24 02:22:38 -08006134 if (rx_ring->next_to_use != i) {
6135 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006136
6137 /* Force memory writes to complete before letting h/w
6138 * know there are new descriptors to fetch. (Only
6139 * applicable for weak-ordered memory model archs,
6140 * such as IA-64). */
6141 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006142 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006143 }
6144}
6145
6146/**
6147 * igb_mii_ioctl -
6148 * @netdev:
6149 * @ifreq:
6150 * @cmd:
6151 **/
6152static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6153{
6154 struct igb_adapter *adapter = netdev_priv(netdev);
6155 struct mii_ioctl_data *data = if_mii(ifr);
6156
6157 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6158 return -EOPNOTSUPP;
6159
6160 switch (cmd) {
6161 case SIOCGMIIPHY:
6162 data->phy_id = adapter->hw.phy.addr;
6163 break;
6164 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006165 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6166 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006167 return -EIO;
6168 break;
6169 case SIOCSMIIREG:
6170 default:
6171 return -EOPNOTSUPP;
6172 }
6173 return 0;
6174}
6175
6176/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006177 * igb_hwtstamp_ioctl - control hardware time stamping
6178 * @netdev:
6179 * @ifreq:
6180 * @cmd:
6181 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006182 * Outgoing time stamping can be enabled and disabled. Play nice and
6183 * disable it when requested, although it shouldn't case any overhead
6184 * when no packet needs it. At most one packet in the queue may be
6185 * marked for time stamping, otherwise it would be impossible to tell
6186 * for sure to which packet the hardware time stamp belongs.
6187 *
6188 * Incoming time stamping has to be configured via the hardware
6189 * filters. Not all combinations are supported, in particular event
6190 * type has to be specified. Matching the kind of event packet is
6191 * not supported, with the exception of "all V2 events regardless of
6192 * level 2 or 4".
6193 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006194 **/
6195static int igb_hwtstamp_ioctl(struct net_device *netdev,
6196 struct ifreq *ifr, int cmd)
6197{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006198 struct igb_adapter *adapter = netdev_priv(netdev);
6199 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006200 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006201 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6202 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006203 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006204 bool is_l4 = false;
6205 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006206 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006207
6208 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6209 return -EFAULT;
6210
6211 /* reserved for future extensions */
6212 if (config.flags)
6213 return -EINVAL;
6214
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006215 switch (config.tx_type) {
6216 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006217 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006218 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006219 break;
6220 default:
6221 return -ERANGE;
6222 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006223
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006224 switch (config.rx_filter) {
6225 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006226 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006227 break;
6228 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6229 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6230 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6231 case HWTSTAMP_FILTER_ALL:
6232 /*
6233 * register TSYNCRXCFG must be set, therefore it is not
6234 * possible to time stamp both Sync and Delay_Req messages
6235 * => fall back to time stamping all packets
6236 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006237 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006238 config.rx_filter = HWTSTAMP_FILTER_ALL;
6239 break;
6240 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006241 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006242 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006243 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006244 break;
6245 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006246 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006247 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006248 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006249 break;
6250 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6251 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006252 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006253 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006254 is_l2 = true;
6255 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006256 config.rx_filter = HWTSTAMP_FILTER_SOME;
6257 break;
6258 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006260 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006261 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006262 is_l2 = true;
6263 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006264 config.rx_filter = HWTSTAMP_FILTER_SOME;
6265 break;
6266 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6267 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6268 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006269 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006270 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006271 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006272 break;
6273 default:
6274 return -ERANGE;
6275 }
6276
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006277 if (hw->mac.type == e1000_82575) {
6278 if (tsync_rx_ctl | tsync_tx_ctl)
6279 return -EINVAL;
6280 return 0;
6281 }
6282
Nick Nunley757b77e2010-03-26 11:36:47 +00006283 /*
6284 * Per-packet timestamping only works if all packets are
6285 * timestamped, so enable timestamping in all packets as
6286 * long as one rx filter was configured.
6287 */
6288 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6289 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6290 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6291 }
6292
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006293 /* enable/disable TX */
6294 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006295 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6296 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006297 wr32(E1000_TSYNCTXCTL, regval);
6298
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006299 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006300 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006301 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6302 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006303 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006304
6305 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006306 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6307
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006308 /* define ethertype filter for timestamped packets */
6309 if (is_l2)
6310 wr32(E1000_ETQF(3),
6311 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6312 E1000_ETQF_1588 | /* enable timestamping */
6313 ETH_P_1588)); /* 1588 eth protocol type */
6314 else
6315 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006316
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006317#define PTP_PORT 319
6318 /* L4 Queue Filter[3]: filter by destination port and protocol */
6319 if (is_l4) {
6320 u32 ftqf = (IPPROTO_UDP /* UDP */
6321 | E1000_FTQF_VF_BP /* VF not compared */
6322 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6323 | E1000_FTQF_MASK); /* mask all inputs */
6324 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006325
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006326 wr32(E1000_IMIR(3), htons(PTP_PORT));
6327 wr32(E1000_IMIREXT(3),
6328 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6329 if (hw->mac.type == e1000_82576) {
6330 /* enable source port check */
6331 wr32(E1000_SPQF(3), htons(PTP_PORT));
6332 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6333 }
6334 wr32(E1000_FTQF(3), ftqf);
6335 } else {
6336 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6337 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006338 wrfl();
6339
6340 adapter->hwtstamp_config = config;
6341
6342 /* clear TX/RX time stamp registers, just to be sure */
6343 regval = rd32(E1000_TXSTMPH);
6344 regval = rd32(E1000_RXSTMPH);
6345
6346 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6347 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006348}
6349
6350/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006351 * igb_ioctl -
6352 * @netdev:
6353 * @ifreq:
6354 * @cmd:
6355 **/
6356static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6357{
6358 switch (cmd) {
6359 case SIOCGMIIPHY:
6360 case SIOCGMIIREG:
6361 case SIOCSMIIREG:
6362 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006363 case SIOCSHWTSTAMP:
6364 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006365 default:
6366 return -EOPNOTSUPP;
6367 }
6368}
6369
Alexander Duyck009bc062009-07-23 18:08:35 +00006370s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6371{
6372 struct igb_adapter *adapter = hw->back;
6373 u16 cap_offset;
6374
Jon Masonbdaae042011-06-27 07:44:01 +00006375 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006376 if (!cap_offset)
6377 return -E1000_ERR_CONFIG;
6378
6379 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6380
6381 return 0;
6382}
6383
6384s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6385{
6386 struct igb_adapter *adapter = hw->back;
6387 u16 cap_offset;
6388
Jon Masonbdaae042011-06-27 07:44:01 +00006389 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006390 if (!cap_offset)
6391 return -E1000_ERR_CONFIG;
6392
6393 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6394
6395 return 0;
6396}
6397
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006398static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006399{
6400 struct igb_adapter *adapter = netdev_priv(netdev);
6401 struct e1000_hw *hw = &adapter->hw;
6402 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006403 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006404
Alexander Duyck5faf0302011-08-26 07:46:08 +00006405 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006406 /* enable VLAN tag insert/strip */
6407 ctrl = rd32(E1000_CTRL);
6408 ctrl |= E1000_CTRL_VME;
6409 wr32(E1000_CTRL, ctrl);
6410
Alexander Duyck51466232009-10-27 23:47:35 +00006411 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006412 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006413 rctl &= ~E1000_RCTL_CFIEN;
6414 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006415 } else {
6416 /* disable VLAN tag insert/strip */
6417 ctrl = rd32(E1000_CTRL);
6418 ctrl &= ~E1000_CTRL_VME;
6419 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006420 }
6421
Alexander Duycke1739522009-02-19 20:39:44 -08006422 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006423}
6424
6425static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6426{
6427 struct igb_adapter *adapter = netdev_priv(netdev);
6428 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006429 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006430
Alexander Duyck51466232009-10-27 23:47:35 +00006431 /* attempt to add filter to vlvf array */
6432 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006433
Alexander Duyck51466232009-10-27 23:47:35 +00006434 /* add the filter since PF can receive vlans w/o entry in vlvf */
6435 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006436
6437 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006438}
6439
6440static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6441{
6442 struct igb_adapter *adapter = netdev_priv(netdev);
6443 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006444 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006445 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006446
Alexander Duyck51466232009-10-27 23:47:35 +00006447 /* remove vlan from VLVF table array */
6448 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006449
Alexander Duyck51466232009-10-27 23:47:35 +00006450 /* if vid was not present in VLVF just remove it from table */
6451 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006452 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006453
6454 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006455}
6456
6457static void igb_restore_vlan(struct igb_adapter *adapter)
6458{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006459 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006460
Alexander Duyck5faf0302011-08-26 07:46:08 +00006461 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6462
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006463 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6464 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006465}
6466
David Decotigny14ad2512011-04-27 18:32:43 +00006467int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006468{
Alexander Duyck090b1792009-10-27 23:51:55 +00006469 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006470 struct e1000_mac_info *mac = &adapter->hw.mac;
6471
6472 mac->autoneg = 0;
6473
David Decotigny14ad2512011-04-27 18:32:43 +00006474 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6475 * for the switch() below to work */
6476 if ((spd & 1) || (dplx & ~1))
6477 goto err_inval;
6478
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006479 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6480 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006481 spd != SPEED_1000 &&
6482 dplx != DUPLEX_FULL)
6483 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006484
David Decotigny14ad2512011-04-27 18:32:43 +00006485 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006486 case SPEED_10 + DUPLEX_HALF:
6487 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6488 break;
6489 case SPEED_10 + DUPLEX_FULL:
6490 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6491 break;
6492 case SPEED_100 + DUPLEX_HALF:
6493 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6494 break;
6495 case SPEED_100 + DUPLEX_FULL:
6496 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6497 break;
6498 case SPEED_1000 + DUPLEX_FULL:
6499 mac->autoneg = 1;
6500 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6501 break;
6502 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6503 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006504 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006505 }
6506 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006507
6508err_inval:
6509 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6510 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006511}
6512
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006513static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006514{
6515 struct net_device *netdev = pci_get_drvdata(pdev);
6516 struct igb_adapter *adapter = netdev_priv(netdev);
6517 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006518 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006519 u32 wufc = adapter->wol;
6520#ifdef CONFIG_PM
6521 int retval = 0;
6522#endif
6523
6524 netif_device_detach(netdev);
6525
Alexander Duycka88f10e2008-07-08 15:13:38 -07006526 if (netif_running(netdev))
6527 igb_close(netdev);
6528
Alexander Duyck047e0032009-10-27 15:49:27 +00006529 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006530
6531#ifdef CONFIG_PM
6532 retval = pci_save_state(pdev);
6533 if (retval)
6534 return retval;
6535#endif
6536
6537 status = rd32(E1000_STATUS);
6538 if (status & E1000_STATUS_LU)
6539 wufc &= ~E1000_WUFC_LNKC;
6540
6541 if (wufc) {
6542 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006543 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006544
6545 /* turn on all-multi mode if wake on multicast is enabled */
6546 if (wufc & E1000_WUFC_MC) {
6547 rctl = rd32(E1000_RCTL);
6548 rctl |= E1000_RCTL_MPE;
6549 wr32(E1000_RCTL, rctl);
6550 }
6551
6552 ctrl = rd32(E1000_CTRL);
6553 /* advertise wake from D3Cold */
6554 #define E1000_CTRL_ADVD3WUC 0x00100000
6555 /* phy power management enable */
6556 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6557 ctrl |= E1000_CTRL_ADVD3WUC;
6558 wr32(E1000_CTRL, ctrl);
6559
Auke Kok9d5c8242008-01-24 02:22:38 -08006560 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006561 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006562
6563 wr32(E1000_WUC, E1000_WUC_PME_EN);
6564 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006565 } else {
6566 wr32(E1000_WUC, 0);
6567 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006568 }
6569
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006570 *enable_wake = wufc || adapter->en_mng_pt;
6571 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006572 igb_power_down_link(adapter);
6573 else
6574 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006575
6576 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6577 * would have already happened in close and is redundant. */
6578 igb_release_hw_control(adapter);
6579
6580 pci_disable_device(pdev);
6581
Auke Kok9d5c8242008-01-24 02:22:38 -08006582 return 0;
6583}
6584
6585#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006586static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6587{
6588 int retval;
6589 bool wake;
6590
6591 retval = __igb_shutdown(pdev, &wake);
6592 if (retval)
6593 return retval;
6594
6595 if (wake) {
6596 pci_prepare_to_sleep(pdev);
6597 } else {
6598 pci_wake_from_d3(pdev, false);
6599 pci_set_power_state(pdev, PCI_D3hot);
6600 }
6601
6602 return 0;
6603}
6604
Auke Kok9d5c8242008-01-24 02:22:38 -08006605static int igb_resume(struct pci_dev *pdev)
6606{
6607 struct net_device *netdev = pci_get_drvdata(pdev);
6608 struct igb_adapter *adapter = netdev_priv(netdev);
6609 struct e1000_hw *hw = &adapter->hw;
6610 u32 err;
6611
6612 pci_set_power_state(pdev, PCI_D0);
6613 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006614 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006615
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006616 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006617 if (err) {
6618 dev_err(&pdev->dev,
6619 "igb: Cannot enable PCI device from suspend\n");
6620 return err;
6621 }
6622 pci_set_master(pdev);
6623
6624 pci_enable_wake(pdev, PCI_D3hot, 0);
6625 pci_enable_wake(pdev, PCI_D3cold, 0);
6626
Alexander Duyck047e0032009-10-27 15:49:27 +00006627 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006628 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6629 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006630 }
6631
Auke Kok9d5c8242008-01-24 02:22:38 -08006632 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006633
6634 /* let the f/w know that the h/w is now under the control of the
6635 * driver. */
6636 igb_get_hw_control(adapter);
6637
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 wr32(E1000_WUS, ~0);
6639
Alexander Duycka88f10e2008-07-08 15:13:38 -07006640 if (netif_running(netdev)) {
6641 err = igb_open(netdev);
6642 if (err)
6643 return err;
6644 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006645
6646 netif_device_attach(netdev);
6647
Auke Kok9d5c8242008-01-24 02:22:38 -08006648 return 0;
6649}
6650#endif
6651
6652static void igb_shutdown(struct pci_dev *pdev)
6653{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006654 bool wake;
6655
6656 __igb_shutdown(pdev, &wake);
6657
6658 if (system_state == SYSTEM_POWER_OFF) {
6659 pci_wake_from_d3(pdev, wake);
6660 pci_set_power_state(pdev, PCI_D3hot);
6661 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006662}
6663
6664#ifdef CONFIG_NET_POLL_CONTROLLER
6665/*
6666 * Polling 'interrupt' - used by things like netconsole to send skbs
6667 * without having to re-enable interrupts. It's not called while
6668 * the interrupt routine is executing.
6669 */
6670static void igb_netpoll(struct net_device *netdev)
6671{
6672 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006673 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006674 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006675
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006676 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006677 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006678 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006679 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006680 return;
6681 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006682
Alexander Duyck047e0032009-10-27 15:49:27 +00006683 for (i = 0; i < adapter->num_q_vectors; i++) {
6684 struct igb_q_vector *q_vector = adapter->q_vector[i];
6685 wr32(E1000_EIMC, q_vector->eims_value);
6686 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006687 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006688}
6689#endif /* CONFIG_NET_POLL_CONTROLLER */
6690
6691/**
6692 * igb_io_error_detected - called when PCI error is detected
6693 * @pdev: Pointer to PCI device
6694 * @state: The current pci connection state
6695 *
6696 * This function is called after a PCI bus error affecting
6697 * this device has been detected.
6698 */
6699static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6700 pci_channel_state_t state)
6701{
6702 struct net_device *netdev = pci_get_drvdata(pdev);
6703 struct igb_adapter *adapter = netdev_priv(netdev);
6704
6705 netif_device_detach(netdev);
6706
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006707 if (state == pci_channel_io_perm_failure)
6708 return PCI_ERS_RESULT_DISCONNECT;
6709
Auke Kok9d5c8242008-01-24 02:22:38 -08006710 if (netif_running(netdev))
6711 igb_down(adapter);
6712 pci_disable_device(pdev);
6713
6714 /* Request a slot slot reset. */
6715 return PCI_ERS_RESULT_NEED_RESET;
6716}
6717
6718/**
6719 * igb_io_slot_reset - called after the pci bus has been reset.
6720 * @pdev: Pointer to PCI device
6721 *
6722 * Restart the card from scratch, as if from a cold-boot. Implementation
6723 * resembles the first-half of the igb_resume routine.
6724 */
6725static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6726{
6727 struct net_device *netdev = pci_get_drvdata(pdev);
6728 struct igb_adapter *adapter = netdev_priv(netdev);
6729 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006730 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006731 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006732
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006733 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006734 dev_err(&pdev->dev,
6735 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006736 result = PCI_ERS_RESULT_DISCONNECT;
6737 } else {
6738 pci_set_master(pdev);
6739 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006740 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006741
6742 pci_enable_wake(pdev, PCI_D3hot, 0);
6743 pci_enable_wake(pdev, PCI_D3cold, 0);
6744
6745 igb_reset(adapter);
6746 wr32(E1000_WUS, ~0);
6747 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006748 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006749
Jeff Kirsherea943d42008-12-11 20:34:19 -08006750 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6751 if (err) {
6752 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6753 "failed 0x%0x\n", err);
6754 /* non-fatal, continue */
6755 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006756
Alexander Duyck40a914f2008-11-27 00:24:37 -08006757 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006758}
6759
6760/**
6761 * igb_io_resume - called when traffic can start flowing again.
6762 * @pdev: Pointer to PCI device
6763 *
6764 * This callback is called when the error recovery driver tells us that
6765 * its OK to resume normal operation. Implementation resembles the
6766 * second-half of the igb_resume routine.
6767 */
6768static void igb_io_resume(struct pci_dev *pdev)
6769{
6770 struct net_device *netdev = pci_get_drvdata(pdev);
6771 struct igb_adapter *adapter = netdev_priv(netdev);
6772
Auke Kok9d5c8242008-01-24 02:22:38 -08006773 if (netif_running(netdev)) {
6774 if (igb_up(adapter)) {
6775 dev_err(&pdev->dev, "igb_up failed after reset\n");
6776 return;
6777 }
6778 }
6779
6780 netif_device_attach(netdev);
6781
6782 /* let the f/w know that the h/w is now under the control of the
6783 * driver. */
6784 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006785}
6786
Alexander Duyck26ad9172009-10-05 06:32:49 +00006787static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6788 u8 qsel)
6789{
6790 u32 rar_low, rar_high;
6791 struct e1000_hw *hw = &adapter->hw;
6792
6793 /* HW expects these in little endian so we reverse the byte order
6794 * from network order (big endian) to little endian
6795 */
6796 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6797 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6798 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6799
6800 /* Indicate to hardware the Address is Valid. */
6801 rar_high |= E1000_RAH_AV;
6802
6803 if (hw->mac.type == e1000_82575)
6804 rar_high |= E1000_RAH_POOL_1 * qsel;
6805 else
6806 rar_high |= E1000_RAH_POOL_1 << qsel;
6807
6808 wr32(E1000_RAL(index), rar_low);
6809 wrfl();
6810 wr32(E1000_RAH(index), rar_high);
6811 wrfl();
6812}
6813
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006814static int igb_set_vf_mac(struct igb_adapter *adapter,
6815 int vf, unsigned char *mac_addr)
6816{
6817 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006818 /* VF MAC addresses start at end of receive addresses and moves
6819 * torwards the first, as a result a collision should not be possible */
6820 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006821
Alexander Duyck37680112009-02-19 20:40:30 -08006822 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006823
Alexander Duyck26ad9172009-10-05 06:32:49 +00006824 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006825
6826 return 0;
6827}
6828
Williams, Mitch A8151d292010-02-10 01:44:24 +00006829static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6830{
6831 struct igb_adapter *adapter = netdev_priv(netdev);
6832 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6833 return -EINVAL;
6834 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6835 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6836 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6837 " change effective.");
6838 if (test_bit(__IGB_DOWN, &adapter->state)) {
6839 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6840 " but the PF device is not up.\n");
6841 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6842 " attempting to use the VF device.\n");
6843 }
6844 return igb_set_vf_mac(adapter, vf, mac);
6845}
6846
Lior Levy17dc5662011-02-08 02:28:46 +00006847static int igb_link_mbps(int internal_link_speed)
6848{
6849 switch (internal_link_speed) {
6850 case SPEED_100:
6851 return 100;
6852 case SPEED_1000:
6853 return 1000;
6854 default:
6855 return 0;
6856 }
6857}
6858
6859static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6860 int link_speed)
6861{
6862 int rf_dec, rf_int;
6863 u32 bcnrc_val;
6864
6865 if (tx_rate != 0) {
6866 /* Calculate the rate factor values to set */
6867 rf_int = link_speed / tx_rate;
6868 rf_dec = (link_speed - (rf_int * tx_rate));
6869 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6870
6871 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6872 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6873 E1000_RTTBCNRC_RF_INT_MASK);
6874 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6875 } else {
6876 bcnrc_val = 0;
6877 }
6878
6879 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6880 wr32(E1000_RTTBCNRC, bcnrc_val);
6881}
6882
6883static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6884{
6885 int actual_link_speed, i;
6886 bool reset_rate = false;
6887
6888 /* VF TX rate limit was not set or not supported */
6889 if ((adapter->vf_rate_link_speed == 0) ||
6890 (adapter->hw.mac.type != e1000_82576))
6891 return;
6892
6893 actual_link_speed = igb_link_mbps(adapter->link_speed);
6894 if (actual_link_speed != adapter->vf_rate_link_speed) {
6895 reset_rate = true;
6896 adapter->vf_rate_link_speed = 0;
6897 dev_info(&adapter->pdev->dev,
6898 "Link speed has been changed. VF Transmit "
6899 "rate is disabled\n");
6900 }
6901
6902 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6903 if (reset_rate)
6904 adapter->vf_data[i].tx_rate = 0;
6905
6906 igb_set_vf_rate_limit(&adapter->hw, i,
6907 adapter->vf_data[i].tx_rate,
6908 actual_link_speed);
6909 }
6910}
6911
Williams, Mitch A8151d292010-02-10 01:44:24 +00006912static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6913{
Lior Levy17dc5662011-02-08 02:28:46 +00006914 struct igb_adapter *adapter = netdev_priv(netdev);
6915 struct e1000_hw *hw = &adapter->hw;
6916 int actual_link_speed;
6917
6918 if (hw->mac.type != e1000_82576)
6919 return -EOPNOTSUPP;
6920
6921 actual_link_speed = igb_link_mbps(adapter->link_speed);
6922 if ((vf >= adapter->vfs_allocated_count) ||
6923 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6924 (tx_rate < 0) || (tx_rate > actual_link_speed))
6925 return -EINVAL;
6926
6927 adapter->vf_rate_link_speed = actual_link_speed;
6928 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6929 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6930
6931 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006932}
6933
6934static int igb_ndo_get_vf_config(struct net_device *netdev,
6935 int vf, struct ifla_vf_info *ivi)
6936{
6937 struct igb_adapter *adapter = netdev_priv(netdev);
6938 if (vf >= adapter->vfs_allocated_count)
6939 return -EINVAL;
6940 ivi->vf = vf;
6941 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006942 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006943 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6944 ivi->qos = adapter->vf_data[vf].pf_qos;
6945 return 0;
6946}
6947
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006948static void igb_vmm_control(struct igb_adapter *adapter)
6949{
6950 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006951 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006952
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006953 switch (hw->mac.type) {
6954 case e1000_82575:
6955 default:
6956 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006957 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006958 case e1000_82576:
6959 /* notify HW that the MAC is adding vlan tags */
6960 reg = rd32(E1000_DTXCTL);
6961 reg |= E1000_DTXCTL_VLAN_ADDED;
6962 wr32(E1000_DTXCTL, reg);
6963 case e1000_82580:
6964 /* enable replication vlan tag stripping */
6965 reg = rd32(E1000_RPLOLR);
6966 reg |= E1000_RPLOLR_STRVLAN;
6967 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006968 case e1000_i350:
6969 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006970 break;
6971 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006972
Alexander Duyckd4960302009-10-27 15:53:45 +00006973 if (adapter->vfs_allocated_count) {
6974 igb_vmdq_set_loopback_pf(hw, true);
6975 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006976 igb_vmdq_set_anti_spoofing_pf(hw, true,
6977 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006978 } else {
6979 igb_vmdq_set_loopback_pf(hw, false);
6980 igb_vmdq_set_replication_pf(hw, false);
6981 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006982}
6983
Auke Kok9d5c8242008-01-24 02:22:38 -08006984/* igb_main.c */