blob: 1fcba22c6403e1eb669559728f813b1efc7a2d36 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106bb2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
Alexander Duyck7d13a7d2011-08-26 07:44:32 +000048#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080052#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070054#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070055#include <linux/dca.h>
56#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080057#include "igb.h"
58
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define MAJ 3
Carolyn Wybornya28dc432011-10-07 07:00:27 +000060#define MIN 2
61#define BUILD 10
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080062#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000063__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080064char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000068static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080069
Auke Kok9d5c8242008-01-24 02:22:38 -080070static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000074static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000111static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000114static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000124static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000133static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000137static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000139static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700140static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700141#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700142static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000143static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000148static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800155static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000164static void igb_check_vf_rate_limit(struct igb_adapter *);
RongQing Li46a01692011-10-18 22:52:35 +0000165
166#ifdef CONFIG_PCI_IOV
Greg Rose0224d662011-10-14 02:57:14 +0000167static int igb_vf_configure(struct igb_adapter *adapter, int vf);
168static int igb_find_enabled_vfs(struct igb_adapter *adapter);
169static int igb_check_vf_assignment(struct igb_adapter *adapter);
RongQing Li46a01692011-10-18 22:52:35 +0000170#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800171
Auke Kok9d5c8242008-01-24 02:22:38 -0800172#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000173static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800174static int igb_resume(struct pci_dev *);
175#endif
176static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700177#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700178static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
179static struct notifier_block dca_notifier = {
180 .notifier_call = igb_notify_dca,
181 .next = NULL,
182 .priority = 0
183};
184#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800185#ifdef CONFIG_NET_POLL_CONTROLLER
186/* for netdump / net console */
187static void igb_netpoll(struct net_device *);
188#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800189#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000190static unsigned int max_vfs = 0;
191module_param(max_vfs, uint, 0);
192MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
193 "per physical function");
194#endif /* CONFIG_PCI_IOV */
195
Auke Kok9d5c8242008-01-24 02:22:38 -0800196static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
197 pci_channel_state_t);
198static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
199static void igb_io_resume(struct pci_dev *);
200
201static struct pci_error_handlers igb_err_handler = {
202 .error_detected = igb_io_error_detected,
203 .slot_reset = igb_io_slot_reset,
204 .resume = igb_io_resume,
205};
206
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +0000207static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
Auke Kok9d5c8242008-01-24 02:22:38 -0800208
209static struct pci_driver igb_driver = {
210 .name = igb_driver_name,
211 .id_table = igb_pci_tbl,
212 .probe = igb_probe,
213 .remove = __devexit_p(igb_remove),
214#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300215 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800216 .suspend = igb_suspend,
217 .resume = igb_resume,
218#endif
219 .shutdown = igb_shutdown,
220 .err_handler = &igb_err_handler
221};
222
223MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
224MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
225MODULE_LICENSE("GPL");
226MODULE_VERSION(DRV_VERSION);
227
Taku Izumic97ec422010-04-27 14:39:30 +0000228struct igb_reg_info {
229 u32 ofs;
230 char *name;
231};
232
233static const struct igb_reg_info igb_reg_info_tbl[] = {
234
235 /* General Registers */
236 {E1000_CTRL, "CTRL"},
237 {E1000_STATUS, "STATUS"},
238 {E1000_CTRL_EXT, "CTRL_EXT"},
239
240 /* Interrupt Registers */
241 {E1000_ICR, "ICR"},
242
243 /* RX Registers */
244 {E1000_RCTL, "RCTL"},
245 {E1000_RDLEN(0), "RDLEN"},
246 {E1000_RDH(0), "RDH"},
247 {E1000_RDT(0), "RDT"},
248 {E1000_RXDCTL(0), "RXDCTL"},
249 {E1000_RDBAL(0), "RDBAL"},
250 {E1000_RDBAH(0), "RDBAH"},
251
252 /* TX Registers */
253 {E1000_TCTL, "TCTL"},
254 {E1000_TDBAL(0), "TDBAL"},
255 {E1000_TDBAH(0), "TDBAH"},
256 {E1000_TDLEN(0), "TDLEN"},
257 {E1000_TDH(0), "TDH"},
258 {E1000_TDT(0), "TDT"},
259 {E1000_TXDCTL(0), "TXDCTL"},
260 {E1000_TDFH, "TDFH"},
261 {E1000_TDFT, "TDFT"},
262 {E1000_TDFHS, "TDFHS"},
263 {E1000_TDFPC, "TDFPC"},
264
265 /* List Terminator */
266 {}
267};
268
269/*
270 * igb_regdump - register printout routine
271 */
272static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
273{
274 int n = 0;
275 char rname[16];
276 u32 regs[8];
277
278 switch (reginfo->ofs) {
279 case E1000_RDLEN(0):
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDLEN(n));
282 break;
283 case E1000_RDH(0):
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_RDH(n));
286 break;
287 case E1000_RDT(0):
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_RDT(n));
290 break;
291 case E1000_RXDCTL(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_RXDCTL(n));
294 break;
295 case E1000_RDBAL(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_RDBAL(n));
298 break;
299 case E1000_RDBAH(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDBAH(n));
302 break;
303 case E1000_TDBAL(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RDBAL(n));
306 break;
307 case E1000_TDBAH(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_TDBAH(n));
310 break;
311 case E1000_TDLEN(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_TDLEN(n));
314 break;
315 case E1000_TDH(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_TDH(n));
318 break;
319 case E1000_TDT(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_TDT(n));
322 break;
323 case E1000_TXDCTL(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_TXDCTL(n));
326 break;
327 default:
328 printk(KERN_INFO "%-15s %08x\n",
329 reginfo->name, rd32(reginfo->ofs));
330 return;
331 }
332
333 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
334 printk(KERN_INFO "%-15s ", rname);
335 for (n = 0; n < 4; n++)
336 printk(KERN_CONT "%08x ", regs[n]);
337 printk(KERN_CONT "\n");
338}
339
340/*
341 * igb_dump - Print registers, tx-rings and rx-rings
342 */
343static void igb_dump(struct igb_adapter *adapter)
344{
345 struct net_device *netdev = adapter->netdev;
346 struct e1000_hw *hw = &adapter->hw;
347 struct igb_reg_info *reginfo;
Taku Izumic97ec422010-04-27 14:39:30 +0000348 struct igb_ring *tx_ring;
349 union e1000_adv_tx_desc *tx_desc;
350 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000351 struct igb_ring *rx_ring;
352 union e1000_adv_rx_desc *rx_desc;
353 u32 staterr;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +0000354 u16 i, n;
Taku Izumic97ec422010-04-27 14:39:30 +0000355
356 if (!netif_msg_hw(adapter))
357 return;
358
359 /* Print netdevice Info */
360 if (netdev) {
361 dev_info(&adapter->pdev->dev, "Net device Info\n");
362 printk(KERN_INFO "Device Name state "
363 "trans_start last_rx\n");
364 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
365 netdev->name,
366 netdev->state,
367 netdev->trans_start,
368 netdev->last_rx);
369 }
370
371 /* Print Registers */
372 dev_info(&adapter->pdev->dev, "Register Dump\n");
373 printk(KERN_INFO " Register Name Value\n");
374 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
375 reginfo->name; reginfo++) {
376 igb_regdump(hw, reginfo);
377 }
378
379 /* Print TX Ring Summary */
380 if (!netdev || !netif_running(netdev))
381 goto exit;
382
383 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
384 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
385 " leng ntw timestamp\n");
386 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000387 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000388 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000389 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Alexander Duyck8542db02011-08-26 07:44:43 +0000390 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
Taku Izumic97ec422010-04-27 14:39:30 +0000391 n, tx_ring->next_to_use, tx_ring->next_to_clean,
392 (u64)buffer_info->dma,
393 buffer_info->length,
394 buffer_info->next_to_watch,
395 (u64)buffer_info->time_stamp);
396 }
397
398 /* Print TX Rings */
399 if (!netif_msg_tx_done(adapter))
400 goto rx_ring_summary;
401
402 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
403
404 /* Transmit Descriptor Formats
405 *
406 * Advanced Transmit Descriptor
407 * +--------------------------------------------------------------+
408 * 0 | Buffer Address [63:0] |
409 * +--------------------------------------------------------------+
410 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
411 * +--------------------------------------------------------------+
412 * 63 46 45 40 39 38 36 35 32 31 24 15 0
413 */
414
415 for (n = 0; n < adapter->num_tx_queues; n++) {
416 tx_ring = adapter->tx_ring[n];
417 printk(KERN_INFO "------------------------------------\n");
418 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
419 printk(KERN_INFO "------------------------------------\n");
420 printk(KERN_INFO "T [desc] [address 63:0 ] "
421 "[PlPOCIStDDM Ln] [bi->dma ] "
422 "leng ntw timestamp bi->skb\n");
423
424 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000425 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000426 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000427 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000428 u0 = (struct my_u0 *)tx_desc;
429 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
Alexander Duyck8542db02011-08-26 07:44:43 +0000430 " %04X %p %016llX %p", i,
Taku Izumic97ec422010-04-27 14:39:30 +0000431 le64_to_cpu(u0->a),
432 le64_to_cpu(u0->b),
433 (u64)buffer_info->dma,
434 buffer_info->length,
435 buffer_info->next_to_watch,
436 (u64)buffer_info->time_stamp,
437 buffer_info->skb);
438 if (i == tx_ring->next_to_use &&
439 i == tx_ring->next_to_clean)
440 printk(KERN_CONT " NTC/U\n");
441 else if (i == tx_ring->next_to_use)
442 printk(KERN_CONT " NTU\n");
443 else if (i == tx_ring->next_to_clean)
444 printk(KERN_CONT " NTC\n");
445 else
446 printk(KERN_CONT "\n");
447
448 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
449 print_hex_dump(KERN_INFO, "",
450 DUMP_PREFIX_ADDRESS,
451 16, 1, phys_to_virt(buffer_info->dma),
452 buffer_info->length, true);
453 }
454 }
455
456 /* Print RX Rings Summary */
457rx_ring_summary:
458 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
459 printk(KERN_INFO "Queue [NTU] [NTC]\n");
460 for (n = 0; n < adapter->num_rx_queues; n++) {
461 rx_ring = adapter->rx_ring[n];
462 printk(KERN_INFO " %5d %5X %5X\n", n,
463 rx_ring->next_to_use, rx_ring->next_to_clean);
464 }
465
466 /* Print RX Rings */
467 if (!netif_msg_rx_status(adapter))
468 goto exit;
469
470 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
471
472 /* Advanced Receive Descriptor (Read) Format
473 * 63 1 0
474 * +-----------------------------------------------------+
475 * 0 | Packet Buffer Address [63:1] |A0/NSE|
476 * +----------------------------------------------+------+
477 * 8 | Header Buffer Address [63:1] | DD |
478 * +-----------------------------------------------------+
479 *
480 *
481 * Advanced Receive Descriptor (Write-Back) Format
482 *
483 * 63 48 47 32 31 30 21 20 17 16 4 3 0
484 * +------------------------------------------------------+
485 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
486 * | Checksum Ident | | | | Type | Type |
487 * +------------------------------------------------------+
488 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
489 * +------------------------------------------------------+
490 * 63 48 47 32 31 20 19 0
491 */
492
493 for (n = 0; n < adapter->num_rx_queues; n++) {
494 rx_ring = adapter->rx_ring[n];
495 printk(KERN_INFO "------------------------------------\n");
496 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
497 printk(KERN_INFO "------------------------------------\n");
498 printk(KERN_INFO "R [desc] [ PktBuf A0] "
499 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
500 "<-- Adv Rx Read format\n");
501 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
502 "[vl er S cks ln] ---------------- [bi->skb] "
503 "<-- Adv Rx Write-Back format\n");
504
505 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000506 struct igb_rx_buffer *buffer_info;
507 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000508 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000509 u0 = (struct my_u0 *)rx_desc;
510 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
511 if (staterr & E1000_RXD_STAT_DD) {
512 /* Descriptor Done */
513 printk(KERN_INFO "RWB[0x%03X] %016llX "
514 "%016llX ---------------- %p", i,
515 le64_to_cpu(u0->a),
516 le64_to_cpu(u0->b),
517 buffer_info->skb);
518 } else {
519 printk(KERN_INFO "R [0x%03X] %016llX "
520 "%016llX %016llX %p", i,
521 le64_to_cpu(u0->a),
522 le64_to_cpu(u0->b),
523 (u64)buffer_info->dma,
524 buffer_info->skb);
525
526 if (netif_msg_pktdata(adapter)) {
527 print_hex_dump(KERN_INFO, "",
528 DUMP_PREFIX_ADDRESS,
529 16, 1,
530 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000531 IGB_RX_HDR_LEN, true);
532 print_hex_dump(KERN_INFO, "",
533 DUMP_PREFIX_ADDRESS,
534 16, 1,
535 phys_to_virt(
536 buffer_info->page_dma +
537 buffer_info->page_offset),
538 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000539 }
540 }
541
542 if (i == rx_ring->next_to_use)
543 printk(KERN_CONT " NTU\n");
544 else if (i == rx_ring->next_to_clean)
545 printk(KERN_CONT " NTC\n");
546 else
547 printk(KERN_CONT "\n");
548
549 }
550 }
551
552exit:
553 return;
554}
555
556
Patrick Ohly38c845c2009-02-12 05:03:41 +0000557/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000558 * igb_read_clock - read raw cycle counter (to be used by time counter)
559 */
560static cycle_t igb_read_clock(const struct cyclecounter *tc)
561{
562 struct igb_adapter *adapter =
563 container_of(tc, struct igb_adapter, cycles);
564 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000565 u64 stamp = 0;
566 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000567
Alexander Duyck55cac242009-11-19 12:42:21 +0000568 /*
569 * The timestamp latches on lowest register read. For the 82580
570 * the lowest register is SYSTIMR instead of SYSTIML. However we never
571 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
572 */
Alexander Duyck06218a82011-08-26 07:46:55 +0000573 if (hw->mac.type >= e1000_82580) {
Alexander Duyck55cac242009-11-19 12:42:21 +0000574 stamp = rd32(E1000_SYSTIMR) >> 8;
575 shift = IGB_82580_TSYNC_SHIFT;
576 }
577
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000578 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
579 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000580 return stamp;
581}
582
Auke Kok9d5c8242008-01-24 02:22:38 -0800583/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000584 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800585 * used by hardware layer to print debugging information
586 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000587struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800588{
589 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000590 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800591}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000592
593/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800594 * igb_init_module - Driver Registration Routine
595 *
596 * igb_init_module is the first routine called when the driver is
597 * loaded. All it does is register with the PCI subsystem.
598 **/
599static int __init igb_init_module(void)
600{
601 int ret;
602 printk(KERN_INFO "%s - version %s\n",
603 igb_driver_string, igb_driver_version);
604
605 printk(KERN_INFO "%s\n", igb_copyright);
606
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700607#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700608 dca_register_notify(&dca_notifier);
609#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800610 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800611 return ret;
612}
613
614module_init(igb_init_module);
615
616/**
617 * igb_exit_module - Driver Exit Cleanup Routine
618 *
619 * igb_exit_module is called just before the driver is removed
620 * from memory.
621 **/
622static void __exit igb_exit_module(void)
623{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700624#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700625 dca_unregister_notify(&dca_notifier);
626#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800627 pci_unregister_driver(&igb_driver);
628}
629
630module_exit(igb_exit_module);
631
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800632#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
633/**
634 * igb_cache_ring_register - Descriptor ring to register mapping
635 * @adapter: board private structure to initialize
636 *
637 * Once we know the feature-set enabled for the device, we'll cache
638 * the register offset the descriptor ring is assigned to.
639 **/
640static void igb_cache_ring_register(struct igb_adapter *adapter)
641{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000642 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000643 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800644
645 switch (adapter->hw.mac.type) {
646 case e1000_82576:
647 /* The queues are allocated for virtualization such that VF 0
648 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
649 * In order to avoid collision we start at the first free queue
650 * and continue consuming queues in the same sequence
651 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000653 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000654 adapter->rx_ring[i]->reg_idx = rbase_offset +
655 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000656 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800657 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000658 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000659 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800660 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000661 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000663 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800665 break;
666 }
667}
668
Alexander Duyck047e0032009-10-27 15:49:27 +0000669static void igb_free_queues(struct igb_adapter *adapter)
670{
Alexander Duyck3025a442010-02-17 01:02:39 +0000671 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000672
Alexander Duyck3025a442010-02-17 01:02:39 +0000673 for (i = 0; i < adapter->num_tx_queues; i++) {
674 kfree(adapter->tx_ring[i]);
675 adapter->tx_ring[i] = NULL;
676 }
677 for (i = 0; i < adapter->num_rx_queues; i++) {
678 kfree(adapter->rx_ring[i]);
679 adapter->rx_ring[i] = NULL;
680 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000681 adapter->num_rx_queues = 0;
682 adapter->num_tx_queues = 0;
683}
684
Auke Kok9d5c8242008-01-24 02:22:38 -0800685/**
686 * igb_alloc_queues - Allocate memory for all rings
687 * @adapter: board private structure to initialize
688 *
689 * We allocate one ring per queue at run-time since we don't know the
690 * number of queues at compile-time.
691 **/
692static int igb_alloc_queues(struct igb_adapter *adapter)
693{
Alexander Duyck3025a442010-02-17 01:02:39 +0000694 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800695 int i;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000696 int orig_node = adapter->node;
Auke Kok9d5c8242008-01-24 02:22:38 -0800697
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700698 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000699 if (orig_node == -1) {
700 int cur_node = next_online_node(adapter->node);
701 if (cur_node == MAX_NUMNODES)
702 cur_node = first_online_node;
703 adapter->node = cur_node;
704 }
705 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
706 adapter->node);
707 if (!ring)
708 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000709 if (!ring)
710 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800711 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700712 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000713 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000714 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000715 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000716 /* For 82575, context index must be unique per ring. */
717 if (adapter->hw.mac.type == e1000_82575)
Alexander Duyck866cff02011-08-26 07:45:36 +0000718 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
Alexander Duyck3025a442010-02-17 01:02:39 +0000719 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700720 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000721 /* Restore the adapter's original node */
722 adapter->node = orig_node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000723
Auke Kok9d5c8242008-01-24 02:22:38 -0800724 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000725 if (orig_node == -1) {
726 int cur_node = next_online_node(adapter->node);
727 if (cur_node == MAX_NUMNODES)
728 cur_node = first_online_node;
729 adapter->node = cur_node;
730 }
731 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
732 adapter->node);
733 if (!ring)
734 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
Alexander Duyck3025a442010-02-17 01:02:39 +0000735 if (!ring)
736 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800737 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700738 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000739 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000740 ring->netdev = adapter->netdev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000741 ring->numa_node = adapter->node;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000742 /* set flag indicating ring supports SCTP checksum offload */
743 if (adapter->hw.mac.type >= e1000_82576)
Alexander Duyck866cff02011-08-26 07:45:36 +0000744 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
Alexander Duyck8be10e92011-08-26 07:47:11 +0000745
746 /* On i350, loopback VLAN packets have the tag byte-swapped. */
747 if (adapter->hw.mac.type == e1000_i350)
748 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
749
Alexander Duyck3025a442010-02-17 01:02:39 +0000750 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800751 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000752 /* Restore the adapter's original node */
753 adapter->node = orig_node;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800754
755 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000756
Auke Kok9d5c8242008-01-24 02:22:38 -0800757 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800758
Alexander Duyck047e0032009-10-27 15:49:27 +0000759err:
Alexander Duyck81c2fc22011-08-26 07:45:20 +0000760 /* Restore the adapter's original node */
761 adapter->node = orig_node;
Alexander Duyck047e0032009-10-27 15:49:27 +0000762 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700763
Alexander Duyck047e0032009-10-27 15:49:27 +0000764 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700765}
766
Alexander Duyck4be000c2011-08-26 07:45:52 +0000767/**
768 * igb_write_ivar - configure ivar for given MSI-X vector
769 * @hw: pointer to the HW structure
770 * @msix_vector: vector number we are allocating to a given ring
771 * @index: row index of IVAR register to write within IVAR table
772 * @offset: column offset of in IVAR, should be multiple of 8
773 *
774 * This function is intended to handle the writing of the IVAR register
775 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
776 * each containing an cause allocation for an Rx and Tx ring, and a
777 * variable number of rows depending on the number of queues supported.
778 **/
779static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
780 int index, int offset)
781{
782 u32 ivar = array_rd32(E1000_IVAR0, index);
783
784 /* clear any bits that are currently set */
785 ivar &= ~((u32)0xFF << offset);
786
787 /* write vector and valid bit */
788 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
789
790 array_wr32(E1000_IVAR0, index, ivar);
791}
792
Auke Kok9d5c8242008-01-24 02:22:38 -0800793#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000794static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800795{
Alexander Duyck047e0032009-10-27 15:49:27 +0000796 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800797 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck047e0032009-10-27 15:49:27 +0000798 int rx_queue = IGB_N0_QUEUE;
799 int tx_queue = IGB_N0_QUEUE;
Alexander Duyck4be000c2011-08-26 07:45:52 +0000800 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000801
Alexander Duyck0ba82992011-08-26 07:45:47 +0000802 if (q_vector->rx.ring)
803 rx_queue = q_vector->rx.ring->reg_idx;
804 if (q_vector->tx.ring)
805 tx_queue = q_vector->tx.ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700806
807 switch (hw->mac.type) {
808 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800809 /* The 82575 assigns vectors using a bitmask, which matches the
810 bitmask for the EICR/EIMS/EIMC registers. To assign one
811 or more queues to a vector, we write the appropriate bits
812 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000813 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800814 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000815 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800816 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000817 if (!adapter->msix_entries && msix_vector == 0)
818 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800819 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000820 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700821 break;
822 case e1000_82576:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000823 /*
824 * 82576 uses a table that essentially consists of 2 columns
825 * with 8 rows. The ordering is column-major so we use the
826 * lower 3 bits as the row index, and the 4th bit as the
827 * column offset.
828 */
829 if (rx_queue > IGB_N0_QUEUE)
830 igb_write_ivar(hw, msix_vector,
831 rx_queue & 0x7,
832 (rx_queue & 0x8) << 1);
833 if (tx_queue > IGB_N0_QUEUE)
834 igb_write_ivar(hw, msix_vector,
835 tx_queue & 0x7,
836 ((tx_queue & 0x8) << 1) + 8);
Alexander Duyck047e0032009-10-27 15:49:27 +0000837 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700838 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000839 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000840 case e1000_i350:
Alexander Duyck4be000c2011-08-26 07:45:52 +0000841 /*
842 * On 82580 and newer adapters the scheme is similar to 82576
843 * however instead of ordering column-major we have things
844 * ordered row-major. So we traverse the table by using
845 * bit 0 as the column offset, and the remaining bits as the
846 * row index.
847 */
848 if (rx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 rx_queue >> 1,
851 (rx_queue & 0x1) << 4);
852 if (tx_queue > IGB_N0_QUEUE)
853 igb_write_ivar(hw, msix_vector,
854 tx_queue >> 1,
855 ((tx_queue & 0x1) << 4) + 8);
Alexander Duyck55cac242009-11-19 12:42:21 +0000856 q_vector->eims_value = 1 << msix_vector;
857 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700858 default:
859 BUG();
860 break;
861 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000862
863 /* add q_vector eims value to global eims_enable_mask */
864 adapter->eims_enable_mask |= q_vector->eims_value;
865
866 /* configure q_vector to set itr on first interrupt */
867 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800868}
869
870/**
871 * igb_configure_msix - Configure MSI-X hardware
872 *
873 * igb_configure_msix sets up the hardware to properly
874 * generate MSI-X interrupts.
875 **/
876static void igb_configure_msix(struct igb_adapter *adapter)
877{
878 u32 tmp;
879 int i, vector = 0;
880 struct e1000_hw *hw = &adapter->hw;
881
882 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800883
884 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700885 switch (hw->mac.type) {
886 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800887 tmp = rd32(E1000_CTRL_EXT);
888 /* enable MSI-X PBA support*/
889 tmp |= E1000_CTRL_EXT_PBA_CLR;
890
891 /* Auto-Mask interrupts upon ICR read. */
892 tmp |= E1000_CTRL_EXT_EIAME;
893 tmp |= E1000_CTRL_EXT_IRCA;
894
895 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000896
897 /* enable msix_other interrupt */
898 array_wr32(E1000_MSIXBM(0), vector++,
899 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700900 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800901
Alexander Duyck2d064c02008-07-08 15:10:12 -0700902 break;
903
904 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000905 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000906 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000907 /* Turn on MSI-X capability first, or our settings
908 * won't stick. And it will take days to debug. */
909 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
910 E1000_GPIE_PBA | E1000_GPIE_EIAME |
911 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700912
Alexander Duyck047e0032009-10-27 15:49:27 +0000913 /* enable msix_other interrupt */
914 adapter->eims_other = 1 << vector;
915 tmp = (vector++ | E1000_IVAR_VALID) << 8;
916
917 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700918 break;
919 default:
920 /* do nothing, since nothing else supports MSI-X */
921 break;
922 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000923
924 adapter->eims_enable_mask |= adapter->eims_other;
925
Alexander Duyck26b39272010-02-17 01:00:41 +0000926 for (i = 0; i < adapter->num_q_vectors; i++)
927 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000928
Auke Kok9d5c8242008-01-24 02:22:38 -0800929 wrfl();
930}
931
932/**
933 * igb_request_msix - Initialize MSI-X interrupts
934 *
935 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
936 * kernel.
937 **/
938static int igb_request_msix(struct igb_adapter *adapter)
939{
940 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000941 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800942 int i, err = 0, vector = 0;
943
Auke Kok9d5c8242008-01-24 02:22:38 -0800944 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800945 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800946 if (err)
947 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 vector++;
949
950 for (i = 0; i < adapter->num_q_vectors; i++) {
951 struct igb_q_vector *q_vector = adapter->q_vector[i];
952
953 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
954
Alexander Duyck0ba82992011-08-26 07:45:47 +0000955 if (q_vector->rx.ring && q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000956 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000957 q_vector->rx.ring->queue_index);
958 else if (q_vector->tx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000959 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000960 q_vector->tx.ring->queue_index);
961 else if (q_vector->rx.ring)
Alexander Duyck047e0032009-10-27 15:49:27 +0000962 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
Alexander Duyck0ba82992011-08-26 07:45:47 +0000963 q_vector->rx.ring->queue_index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000964 else
965 sprintf(q_vector->name, "%s-unused", netdev->name);
966
967 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800968 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000969 q_vector);
970 if (err)
971 goto out;
972 vector++;
973 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800974
Auke Kok9d5c8242008-01-24 02:22:38 -0800975 igb_configure_msix(adapter);
976 return 0;
977out:
978 return err;
979}
980
981static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
982{
983 if (adapter->msix_entries) {
984 pci_disable_msix(adapter->pdev);
985 kfree(adapter->msix_entries);
986 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000987 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800988 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000989 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800990}
991
Alexander Duyck047e0032009-10-27 15:49:27 +0000992/**
993 * igb_free_q_vectors - Free memory allocated for interrupt vectors
994 * @adapter: board private structure to initialize
995 *
996 * This function frees the memory allocated to the q_vectors. In addition if
997 * NAPI is enabled it will delete any references to the NAPI struct prior
998 * to freeing the q_vector.
999 **/
1000static void igb_free_q_vectors(struct igb_adapter *adapter)
1001{
1002 int v_idx;
1003
1004 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1005 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1006 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001007 if (!q_vector)
1008 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +00001009 netif_napi_del(&q_vector->napi);
1010 kfree(q_vector);
1011 }
1012 adapter->num_q_vectors = 0;
1013}
1014
1015/**
1016 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1017 *
1018 * This function resets the device so that it has 0 rx queues, tx queues, and
1019 * MSI-X interrupts allocated.
1020 */
1021static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1022{
1023 igb_free_queues(adapter);
1024 igb_free_q_vectors(adapter);
1025 igb_reset_interrupt_capability(adapter);
1026}
Auke Kok9d5c8242008-01-24 02:22:38 -08001027
1028/**
1029 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1030 *
1031 * Attempt to configure interrupts using the best available
1032 * capabilities of the hardware and kernel.
1033 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001034static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001035{
1036 int err;
1037 int numvecs, i;
1038
Alexander Duyck83b71802009-02-06 23:15:45 +00001039 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001040 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001041 if (adapter->vfs_allocated_count)
1042 adapter->num_tx_queues = 1;
1043 else
1044 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001045
Alexander Duyck047e0032009-10-27 15:49:27 +00001046 /* start with one vector for every rx queue */
1047 numvecs = adapter->num_rx_queues;
1048
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001049 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001050 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1051 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001052
1053 /* store the number of vectors reserved for queues */
1054 adapter->num_q_vectors = numvecs;
1055
1056 /* add 1 vector for link status interrupts */
1057 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001058 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1059 GFP_KERNEL);
1060 if (!adapter->msix_entries)
1061 goto msi_only;
1062
1063 for (i = 0; i < numvecs; i++)
1064 adapter->msix_entries[i].entry = i;
1065
1066 err = pci_enable_msix(adapter->pdev,
1067 adapter->msix_entries,
1068 numvecs);
1069 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001070 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001071
1072 igb_reset_interrupt_capability(adapter);
1073
1074 /* If we can't do MSI-X, try MSI */
1075msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001076#ifdef CONFIG_PCI_IOV
1077 /* disable SR-IOV for non MSI-X configurations */
1078 if (adapter->vf_data) {
1079 struct e1000_hw *hw = &adapter->hw;
1080 /* disable iov and allow time for transactions to clear */
1081 pci_disable_sriov(adapter->pdev);
1082 msleep(500);
1083
1084 kfree(adapter->vf_data);
1085 adapter->vf_data = NULL;
1086 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001087 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001088 msleep(100);
1089 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1090 }
1091#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001092 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001093 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001094 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001095 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001096 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001097 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001098 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001099 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001100out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001101 /* Notify the stack of the (possibly) reduced queue counts. */
1102 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1103 return netif_set_real_num_rx_queues(adapter->netdev,
1104 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001105}
1106
1107/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001108 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1109 * @adapter: board private structure to initialize
1110 *
1111 * We allocate one q_vector per queue interrupt. If allocation fails we
1112 * return -ENOMEM.
1113 **/
1114static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1115{
1116 struct igb_q_vector *q_vector;
1117 struct e1000_hw *hw = &adapter->hw;
1118 int v_idx;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001119 int orig_node = adapter->node;
Alexander Duyck047e0032009-10-27 15:49:27 +00001120
1121 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001122 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1123 adapter->num_tx_queues)) &&
1124 (adapter->num_rx_queues == v_idx))
1125 adapter->node = orig_node;
1126 if (orig_node == -1) {
1127 int cur_node = next_online_node(adapter->node);
1128 if (cur_node == MAX_NUMNODES)
1129 cur_node = first_online_node;
1130 adapter->node = cur_node;
1131 }
1132 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1133 adapter->node);
1134 if (!q_vector)
1135 q_vector = kzalloc(sizeof(struct igb_q_vector),
1136 GFP_KERNEL);
Alexander Duyck047e0032009-10-27 15:49:27 +00001137 if (!q_vector)
1138 goto err_out;
1139 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001140 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1141 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001142 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1143 adapter->q_vector[v_idx] = q_vector;
1144 }
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001145 /* Restore the adapter's original node */
1146 adapter->node = orig_node;
1147
Alexander Duyck047e0032009-10-27 15:49:27 +00001148 return 0;
1149
1150err_out:
Alexander Duyck81c2fc22011-08-26 07:45:20 +00001151 /* Restore the adapter's original node */
1152 adapter->node = orig_node;
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001153 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001154 return -ENOMEM;
1155}
1156
1157static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1158 int ring_idx, int v_idx)
1159{
Alexander Duyck3025a442010-02-17 01:02:39 +00001160 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001161
Alexander Duyck0ba82992011-08-26 07:45:47 +00001162 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1163 q_vector->rx.ring->q_vector = q_vector;
1164 q_vector->rx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001165 q_vector->itr_val = adapter->rx_itr_setting;
1166 if (q_vector->itr_val && q_vector->itr_val <= 3)
1167 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001168}
1169
1170static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1171 int ring_idx, int v_idx)
1172{
Alexander Duyck3025a442010-02-17 01:02:39 +00001173 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001174
Alexander Duyck0ba82992011-08-26 07:45:47 +00001175 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1176 q_vector->tx.ring->q_vector = q_vector;
1177 q_vector->tx.count++;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001178 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck0ba82992011-08-26 07:45:47 +00001179 q_vector->tx.work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001180 if (q_vector->itr_val && q_vector->itr_val <= 3)
1181 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001182}
1183
1184/**
1185 * igb_map_ring_to_vector - maps allocated queues to vectors
1186 *
1187 * This function maps the recently allocated queues to vectors.
1188 **/
1189static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1190{
1191 int i;
1192 int v_idx = 0;
1193
1194 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1195 (adapter->num_q_vectors < adapter->num_tx_queues))
1196 return -ENOMEM;
1197
1198 if (adapter->num_q_vectors >=
1199 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1200 for (i = 0; i < adapter->num_rx_queues; i++)
1201 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1202 for (i = 0; i < adapter->num_tx_queues; i++)
1203 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1204 } else {
1205 for (i = 0; i < adapter->num_rx_queues; i++) {
1206 if (i < adapter->num_tx_queues)
1207 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1208 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1209 }
1210 for (; i < adapter->num_tx_queues; i++)
1211 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1212 }
1213 return 0;
1214}
1215
1216/**
1217 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1218 *
1219 * This function initializes the interrupts and allocates all of the queues.
1220 **/
1221static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1222{
1223 struct pci_dev *pdev = adapter->pdev;
1224 int err;
1225
Ben Hutchings21adef32010-09-27 08:28:39 +00001226 err = igb_set_interrupt_capability(adapter);
1227 if (err)
1228 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001229
1230 err = igb_alloc_q_vectors(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1233 goto err_alloc_q_vectors;
1234 }
1235
1236 err = igb_alloc_queues(adapter);
1237 if (err) {
1238 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1239 goto err_alloc_queues;
1240 }
1241
1242 err = igb_map_ring_to_vector(adapter);
1243 if (err) {
1244 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1245 goto err_map_queues;
1246 }
1247
1248
1249 return 0;
1250err_map_queues:
1251 igb_free_queues(adapter);
1252err_alloc_queues:
1253 igb_free_q_vectors(adapter);
1254err_alloc_q_vectors:
1255 igb_reset_interrupt_capability(adapter);
1256 return err;
1257}
1258
1259/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001260 * igb_request_irq - initialize interrupts
1261 *
1262 * Attempts to configure interrupts using the best available
1263 * capabilities of the hardware and kernel.
1264 **/
1265static int igb_request_irq(struct igb_adapter *adapter)
1266{
1267 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001268 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001269 int err = 0;
1270
1271 if (adapter->msix_entries) {
1272 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001273 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001274 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001275 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001276 igb_clear_interrupt_scheme(adapter);
Alexander Duyckc74d5882011-08-26 07:46:45 +00001277 if (!pci_enable_msi(pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001278 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001279 igb_free_all_tx_resources(adapter);
1280 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001281 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001282 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001283 adapter->num_q_vectors = 1;
1284 err = igb_alloc_q_vectors(adapter);
1285 if (err) {
1286 dev_err(&pdev->dev,
1287 "Unable to allocate memory for vectors\n");
1288 goto request_done;
1289 }
1290 err = igb_alloc_queues(adapter);
1291 if (err) {
1292 dev_err(&pdev->dev,
1293 "Unable to allocate memory for queues\n");
1294 igb_free_q_vectors(adapter);
1295 goto request_done;
1296 }
1297 igb_setup_all_tx_resources(adapter);
1298 igb_setup_all_rx_resources(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001299 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001300
Alexander Duyckc74d5882011-08-26 07:46:45 +00001301 igb_assign_vector(adapter->q_vector[0], 0);
1302
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001303 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Alexander Duyckc74d5882011-08-26 07:46:45 +00001304 err = request_irq(pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001305 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001306 if (!err)
1307 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001308
Auke Kok9d5c8242008-01-24 02:22:38 -08001309 /* fall back to legacy interrupts */
1310 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001311 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001312 }
1313
Alexander Duyckc74d5882011-08-26 07:46:45 +00001314 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001315 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001316
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001317 if (err)
Alexander Duyckc74d5882011-08-26 07:46:45 +00001318 dev_err(&pdev->dev, "Error %d getting interrupt\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001319 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001320
1321request_done:
1322 return err;
1323}
1324
1325static void igb_free_irq(struct igb_adapter *adapter)
1326{
Auke Kok9d5c8242008-01-24 02:22:38 -08001327 if (adapter->msix_entries) {
1328 int vector = 0, i;
1329
Alexander Duyck047e0032009-10-27 15:49:27 +00001330 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001331
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001332 for (i = 0; i < adapter->num_q_vectors; i++)
Alexander Duyck047e0032009-10-27 15:49:27 +00001333 free_irq(adapter->msix_entries[vector++].vector,
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001334 adapter->q_vector[i]);
Alexander Duyck047e0032009-10-27 15:49:27 +00001335 } else {
1336 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001337 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001338}
1339
1340/**
1341 * igb_irq_disable - Mask off interrupt generation on the NIC
1342 * @adapter: board private structure
1343 **/
1344static void igb_irq_disable(struct igb_adapter *adapter)
1345{
1346 struct e1000_hw *hw = &adapter->hw;
1347
Alexander Duyck25568a52009-10-27 23:49:59 +00001348 /*
1349 * we need to be careful when disabling interrupts. The VFs are also
1350 * mapped into these registers and so clearing the bits can cause
1351 * issues on the VF drivers so we only need to clear what we set
1352 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001353 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001354 u32 regval = rd32(E1000_EIAM);
1355 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1356 wr32(E1000_EIMC, adapter->eims_enable_mask);
1357 regval = rd32(E1000_EIAC);
1358 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001359 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001360
1361 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001362 wr32(E1000_IMC, ~0);
1363 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001364 if (adapter->msix_entries) {
1365 int i;
1366 for (i = 0; i < adapter->num_q_vectors; i++)
1367 synchronize_irq(adapter->msix_entries[i].vector);
1368 } else {
1369 synchronize_irq(adapter->pdev->irq);
1370 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001371}
1372
1373/**
1374 * igb_irq_enable - Enable default interrupt generation settings
1375 * @adapter: board private structure
1376 **/
1377static void igb_irq_enable(struct igb_adapter *adapter)
1378{
1379 struct e1000_hw *hw = &adapter->hw;
1380
1381 if (adapter->msix_entries) {
Alexander Duyck06218a82011-08-26 07:46:55 +00001382 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001383 u32 regval = rd32(E1000_EIAC);
1384 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1385 regval = rd32(E1000_EIAM);
1386 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001387 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001388 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001389 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001390 ims |= E1000_IMS_VMMB;
1391 }
1392 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001393 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001394 wr32(E1000_IMS, IMS_ENABLE_MASK |
1395 E1000_IMS_DRSTA);
1396 wr32(E1000_IAM, IMS_ENABLE_MASK |
1397 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001398 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001399}
1400
1401static void igb_update_mng_vlan(struct igb_adapter *adapter)
1402{
Alexander Duyck51466232009-10-27 23:47:35 +00001403 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001404 u16 vid = adapter->hw.mng_cookie.vlan_id;
1405 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001406
Alexander Duyck51466232009-10-27 23:47:35 +00001407 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1408 /* add VID to filter table */
1409 igb_vfta_set(hw, vid, true);
1410 adapter->mng_vlan_id = vid;
1411 } else {
1412 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1413 }
1414
1415 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1416 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001417 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001418 /* remove VID from filter table */
1419 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001420 }
1421}
1422
1423/**
1424 * igb_release_hw_control - release control of the h/w to f/w
1425 * @adapter: address of board private structure
1426 *
1427 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1428 * For ASF and Pass Through versions of f/w this means that the
1429 * driver is no longer loaded.
1430 *
1431 **/
1432static void igb_release_hw_control(struct igb_adapter *adapter)
1433{
1434 struct e1000_hw *hw = &adapter->hw;
1435 u32 ctrl_ext;
1436
1437 /* Let firmware take over control of h/w */
1438 ctrl_ext = rd32(E1000_CTRL_EXT);
1439 wr32(E1000_CTRL_EXT,
1440 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1441}
1442
Auke Kok9d5c8242008-01-24 02:22:38 -08001443/**
1444 * igb_get_hw_control - get control of the h/w from f/w
1445 * @adapter: address of board private structure
1446 *
1447 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1448 * For ASF and Pass Through versions of f/w this means that
1449 * the driver is loaded.
1450 *
1451 **/
1452static void igb_get_hw_control(struct igb_adapter *adapter)
1453{
1454 struct e1000_hw *hw = &adapter->hw;
1455 u32 ctrl_ext;
1456
1457 /* Let firmware know the driver has taken over */
1458 ctrl_ext = rd32(E1000_CTRL_EXT);
1459 wr32(E1000_CTRL_EXT,
1460 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1461}
1462
Auke Kok9d5c8242008-01-24 02:22:38 -08001463/**
1464 * igb_configure - configure the hardware for RX and TX
1465 * @adapter: private board structure
1466 **/
1467static void igb_configure(struct igb_adapter *adapter)
1468{
1469 struct net_device *netdev = adapter->netdev;
1470 int i;
1471
1472 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001473 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001474
1475 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001476
Alexander Duyck85b430b2009-10-27 15:50:29 +00001477 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001478 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001479 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001480
1481 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001482 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001483
1484 igb_rx_fifo_flush_82575(&adapter->hw);
1485
Alexander Duyckc493ea42009-03-20 00:16:50 +00001486 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 * at least 1 descriptor unused to make sure
1488 * next_to_use != next_to_clean */
1489 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001490 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001491 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001492 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001493}
1494
Nick Nunley88a268c2010-02-17 01:01:59 +00001495/**
1496 * igb_power_up_link - Power up the phy/serdes link
1497 * @adapter: address of board private structure
1498 **/
1499void igb_power_up_link(struct igb_adapter *adapter)
1500{
1501 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1502 igb_power_up_phy_copper(&adapter->hw);
1503 else
1504 igb_power_up_serdes_link_82575(&adapter->hw);
1505}
1506
1507/**
1508 * igb_power_down_link - Power down the phy/serdes link
1509 * @adapter: address of board private structure
1510 */
1511static void igb_power_down_link(struct igb_adapter *adapter)
1512{
1513 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1514 igb_power_down_phy_copper_82575(&adapter->hw);
1515 else
1516 igb_shutdown_serdes_link_82575(&adapter->hw);
1517}
Auke Kok9d5c8242008-01-24 02:22:38 -08001518
1519/**
1520 * igb_up - Open the interface and prepare it to handle traffic
1521 * @adapter: board private structure
1522 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001523int igb_up(struct igb_adapter *adapter)
1524{
1525 struct e1000_hw *hw = &adapter->hw;
1526 int i;
1527
1528 /* hardware has been reset, we need to reload some things */
1529 igb_configure(adapter);
1530
1531 clear_bit(__IGB_DOWN, &adapter->state);
1532
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001533 for (i = 0; i < adapter->num_q_vectors; i++)
1534 napi_enable(&(adapter->q_vector[i]->napi));
1535
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001536 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001537 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001538 else
1539 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001540
1541 /* Clear any pending interrupts. */
1542 rd32(E1000_ICR);
1543 igb_irq_enable(adapter);
1544
Alexander Duyckd4960302009-10-27 15:53:45 +00001545 /* notify VFs that reset has been completed */
1546 if (adapter->vfs_allocated_count) {
1547 u32 reg_data = rd32(E1000_CTRL_EXT);
1548 reg_data |= E1000_CTRL_EXT_PFRSTD;
1549 wr32(E1000_CTRL_EXT, reg_data);
1550 }
1551
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001552 netif_tx_start_all_queues(adapter->netdev);
1553
Alexander Duyck25568a52009-10-27 23:49:59 +00001554 /* start the watchdog. */
1555 hw->mac.get_link_status = 1;
1556 schedule_work(&adapter->watchdog_task);
1557
Auke Kok9d5c8242008-01-24 02:22:38 -08001558 return 0;
1559}
1560
1561void igb_down(struct igb_adapter *adapter)
1562{
Auke Kok9d5c8242008-01-24 02:22:38 -08001563 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001564 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001565 u32 tctl, rctl;
1566 int i;
1567
1568 /* signal that we're down so the interrupt handler does not
1569 * reschedule our watchdog timer */
1570 set_bit(__IGB_DOWN, &adapter->state);
1571
1572 /* disable receives in the hardware */
1573 rctl = rd32(E1000_RCTL);
1574 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1575 /* flush and sleep below */
1576
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001577 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001578
1579 /* disable transmits in the hardware */
1580 tctl = rd32(E1000_TCTL);
1581 tctl &= ~E1000_TCTL_EN;
1582 wr32(E1000_TCTL, tctl);
1583 /* flush both disables and wait for them to finish */
1584 wrfl();
1585 msleep(10);
1586
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00001587 for (i = 0; i < adapter->num_q_vectors; i++)
1588 napi_disable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08001589
Auke Kok9d5c8242008-01-24 02:22:38 -08001590 igb_irq_disable(adapter);
1591
1592 del_timer_sync(&adapter->watchdog_timer);
1593 del_timer_sync(&adapter->phy_info_timer);
1594
Auke Kok9d5c8242008-01-24 02:22:38 -08001595 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001596
1597 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001598 spin_lock(&adapter->stats64_lock);
1599 igb_update_stats(adapter, &adapter->stats64);
1600 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001601
Auke Kok9d5c8242008-01-24 02:22:38 -08001602 adapter->link_speed = 0;
1603 adapter->link_duplex = 0;
1604
Jeff Kirsher30236822008-06-24 17:01:15 -07001605 if (!pci_channel_offline(adapter->pdev))
1606 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001607 igb_clean_all_tx_rings(adapter);
1608 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001609#ifdef CONFIG_IGB_DCA
1610
1611 /* since we reset the hardware DCA settings were cleared */
1612 igb_setup_dca(adapter);
1613#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001614}
1615
1616void igb_reinit_locked(struct igb_adapter *adapter)
1617{
1618 WARN_ON(in_interrupt());
1619 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1620 msleep(1);
1621 igb_down(adapter);
1622 igb_up(adapter);
1623 clear_bit(__IGB_RESETTING, &adapter->state);
1624}
1625
1626void igb_reset(struct igb_adapter *adapter)
1627{
Alexander Duyck090b1792009-10-27 23:51:55 +00001628 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001629 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001630 struct e1000_mac_info *mac = &hw->mac;
1631 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001632 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1633 u16 hwm;
1634
1635 /* Repartition Pba for greater than 9k mtu
1636 * To take effect CTRL.RST is required.
1637 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001638 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001639 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001640 case e1000_82580:
1641 pba = rd32(E1000_RXPBS);
1642 pba = igb_rxpbs_adjust_82580(pba);
1643 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001644 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001645 pba = rd32(E1000_RXPBS);
1646 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001647 break;
1648 case e1000_82575:
1649 default:
1650 pba = E1000_PBA_34K;
1651 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001652 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001653
Alexander Duyck2d064c02008-07-08 15:10:12 -07001654 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1655 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001656 /* adjust PBA for jumbo frames */
1657 wr32(E1000_PBA, pba);
1658
1659 /* To maintain wire speed transmits, the Tx FIFO should be
1660 * large enough to accommodate two full transmit packets,
1661 * rounded up to the next 1KB and expressed in KB. Likewise,
1662 * the Rx FIFO should be large enough to accommodate at least
1663 * one full receive packet and is similarly rounded up and
1664 * expressed in KB. */
1665 pba = rd32(E1000_PBA);
1666 /* upper 16 bits has Tx packet buffer allocation size in KB */
1667 tx_space = pba >> 16;
1668 /* lower 16 bits has Rx packet buffer allocation size in KB */
1669 pba &= 0xffff;
1670 /* the tx fifo also stores 16 bytes of information about the tx
1671 * but don't include ethernet FCS because hardware appends it */
1672 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001673 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001674 ETH_FCS_LEN) * 2;
1675 min_tx_space = ALIGN(min_tx_space, 1024);
1676 min_tx_space >>= 10;
1677 /* software strips receive CRC, so leave room for it */
1678 min_rx_space = adapter->max_frame_size;
1679 min_rx_space = ALIGN(min_rx_space, 1024);
1680 min_rx_space >>= 10;
1681
1682 /* If current Tx allocation is less than the min Tx FIFO size,
1683 * and the min Tx FIFO size is less than the current Rx FIFO
1684 * allocation, take space away from current Rx allocation */
1685 if (tx_space < min_tx_space &&
1686 ((min_tx_space - tx_space) < pba)) {
1687 pba = pba - (min_tx_space - tx_space);
1688
1689 /* if short on rx space, rx wins and must trump tx
1690 * adjustment */
1691 if (pba < min_rx_space)
1692 pba = min_rx_space;
1693 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001694 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001695 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001696
1697 /* flow control settings */
1698 /* The high water mark must be low enough to fit one full frame
1699 * (or the size used for early receive) above it in the Rx FIFO.
1700 * Set it to the lower of:
1701 * - 90% of the Rx FIFO size, or
1702 * - the full Rx FIFO size minus one full frame */
1703 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001704 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001705
Alexander Duyckd405ea32009-12-23 13:21:27 +00001706 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1707 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001708 fc->pause_time = 0xFFFF;
1709 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001710 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001711
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001712 /* disable receive for all VFs and wait one second */
1713 if (adapter->vfs_allocated_count) {
1714 int i;
1715 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001716 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001717
1718 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001719 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001720
1721 /* disable transmits and receives */
1722 wr32(E1000_VFRE, 0);
1723 wr32(E1000_VFTE, 0);
1724 }
1725
Auke Kok9d5c8242008-01-24 02:22:38 -08001726 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001727 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001728 wr32(E1000_WUC, 0);
1729
Alexander Duyck330a6d62009-10-27 23:51:35 +00001730 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001731 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001732
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00001733 igb_init_dmac(adapter, pba);
Nick Nunley88a268c2010-02-17 01:01:59 +00001734 if (!netif_running(adapter->netdev))
1735 igb_power_down_link(adapter);
1736
Auke Kok9d5c8242008-01-24 02:22:38 -08001737 igb_update_mng_vlan(adapter);
1738
1739 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1740 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1741
Alexander Duyck330a6d62009-10-27 23:51:35 +00001742 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001743}
1744
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001745static netdev_features_t igb_fix_features(struct net_device *netdev,
1746 netdev_features_t features)
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001747{
1748 /*
1749 * Since there is no support for separate rx/tx vlan accel
1750 * enable/disable make sure tx flag is always in same state as rx.
1751 */
1752 if (features & NETIF_F_HW_VLAN_RX)
1753 features |= NETIF_F_HW_VLAN_TX;
1754 else
1755 features &= ~NETIF_F_HW_VLAN_TX;
1756
1757 return features;
1758}
1759
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001760static int igb_set_features(struct net_device *netdev,
1761 netdev_features_t features)
Michał Mirosławac52caa2011-06-08 08:38:01 +00001762{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001763 netdev_features_t changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001764
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001765 if (changed & NETIF_F_HW_VLAN_RX)
1766 igb_vlan_mode(netdev, features);
1767
Michał Mirosławac52caa2011-06-08 08:38:01 +00001768 return 0;
1769}
1770
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001771static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001772 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001773 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001774 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001775 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001776 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001777 .ndo_set_mac_address = igb_set_mac,
1778 .ndo_change_mtu = igb_change_mtu,
1779 .ndo_do_ioctl = igb_ioctl,
1780 .ndo_tx_timeout = igb_tx_timeout,
1781 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001782 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1783 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001784 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1785 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1786 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1787 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001788#ifdef CONFIG_NET_POLL_CONTROLLER
1789 .ndo_poll_controller = igb_netpoll,
1790#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001791 .ndo_fix_features = igb_fix_features,
1792 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001793};
1794
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001795/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001796 * igb_probe - Device Initialization Routine
1797 * @pdev: PCI device information struct
1798 * @ent: entry in igb_pci_tbl
1799 *
1800 * Returns 0 on success, negative on failure
1801 *
1802 * igb_probe initializes an adapter identified by a pci_dev structure.
1803 * The OS initialization, configuring of the adapter private structure,
1804 * and a hardware reset occur.
1805 **/
1806static int __devinit igb_probe(struct pci_dev *pdev,
1807 const struct pci_device_id *ent)
1808{
1809 struct net_device *netdev;
1810 struct igb_adapter *adapter;
1811 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001812 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001813 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001814 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001815 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1816 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001817 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001818 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001819 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001820
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001821 /* Catch broken hardware that put the wrong VF device ID in
1822 * the PCIe SR-IOV capability.
1823 */
1824 if (pdev->is_virtfn) {
1825 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1826 pci_name(pdev), pdev->vendor, pdev->device);
1827 return -EINVAL;
1828 }
1829
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001830 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001831 if (err)
1832 return err;
1833
1834 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001835 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001836 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001837 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001838 if (!err)
1839 pci_using_dac = 1;
1840 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001841 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001842 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001843 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001844 if (err) {
1845 dev_err(&pdev->dev, "No usable DMA "
1846 "configuration, aborting\n");
1847 goto err_dma;
1848 }
1849 }
1850 }
1851
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001852 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1853 IORESOURCE_MEM),
1854 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001855 if (err)
1856 goto err_pci_reg;
1857
Frans Pop19d5afd2009-10-02 10:04:12 -07001858 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001859
Auke Kok9d5c8242008-01-24 02:22:38 -08001860 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001861 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001862
1863 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001864 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001865 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001866 if (!netdev)
1867 goto err_alloc_etherdev;
1868
1869 SET_NETDEV_DEV(netdev, &pdev->dev);
1870
1871 pci_set_drvdata(pdev, netdev);
1872 adapter = netdev_priv(netdev);
1873 adapter->netdev = netdev;
1874 adapter->pdev = pdev;
1875 hw = &adapter->hw;
1876 hw->back = adapter;
1877 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1878
1879 mmio_start = pci_resource_start(pdev, 0);
1880 mmio_len = pci_resource_len(pdev, 0);
1881
1882 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001883 hw->hw_addr = ioremap(mmio_start, mmio_len);
1884 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001885 goto err_ioremap;
1886
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001887 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001888 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001889 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001890
1891 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1892
1893 netdev->mem_start = mmio_start;
1894 netdev->mem_end = mmio_start + mmio_len;
1895
Auke Kok9d5c8242008-01-24 02:22:38 -08001896 /* PCI config space info */
1897 hw->vendor_id = pdev->vendor;
1898 hw->device_id = pdev->device;
1899 hw->revision_id = pdev->revision;
1900 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1901 hw->subsystem_device_id = pdev->subsystem_device;
1902
Auke Kok9d5c8242008-01-24 02:22:38 -08001903 /* Copy the default MAC, PHY and NVM function pointers */
1904 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1905 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1906 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1907 /* Initialize skew-specific constants */
1908 err = ei->get_invariants(hw);
1909 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001910 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001911
Alexander Duyck450c87c2009-02-06 23:22:11 +00001912 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001913 err = igb_sw_init(adapter);
1914 if (err)
1915 goto err_sw_init;
1916
1917 igb_get_bus_info_pcie(hw);
1918
1919 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001920
1921 /* Copper options */
1922 if (hw->phy.media_type == e1000_media_type_copper) {
1923 hw->phy.mdix = AUTO_ALL_MODES;
1924 hw->phy.disable_polarity_correction = false;
1925 hw->phy.ms_type = e1000_ms_hw_default;
1926 }
1927
1928 if (igb_check_reset_block(hw))
1929 dev_info(&pdev->dev,
1930 "PHY reset is blocked due to SOL/IDER session.\n");
1931
Alexander Duyck077887c2011-08-26 07:46:29 +00001932 /*
1933 * features is initialized to 0 in allocation, it might have bits
1934 * set by igb_sw_init so we should use an or instead of an
1935 * assignment.
1936 */
1937 netdev->features |= NETIF_F_SG |
1938 NETIF_F_IP_CSUM |
1939 NETIF_F_IPV6_CSUM |
1940 NETIF_F_TSO |
1941 NETIF_F_TSO6 |
1942 NETIF_F_RXHASH |
1943 NETIF_F_RXCSUM |
1944 NETIF_F_HW_VLAN_RX |
1945 NETIF_F_HW_VLAN_TX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001946
Alexander Duyck077887c2011-08-26 07:46:29 +00001947 /* copy netdev features into list of user selectable features */
1948 netdev->hw_features |= netdev->features;
Auke Kok9d5c8242008-01-24 02:22:38 -08001949
Alexander Duyck077887c2011-08-26 07:46:29 +00001950 /* set this bit last since it cannot be part of hw_features */
1951 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1952
1953 netdev->vlan_features |= NETIF_F_TSO |
1954 NETIF_F_TSO6 |
1955 NETIF_F_IP_CSUM |
1956 NETIF_F_IPV6_CSUM |
1957 NETIF_F_SG;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001958
Yi Zou7b872a52010-09-22 17:57:58 +00001959 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001960 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001961 netdev->vlan_features |= NETIF_F_HIGHDMA;
1962 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001963
Michał Mirosławac52caa2011-06-08 08:38:01 +00001964 if (hw->mac.type >= e1000_82576) {
1965 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001966 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001967 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001968
Jiri Pirko01789342011-08-16 06:29:00 +00001969 netdev->priv_flags |= IFF_UNICAST_FLT;
1970
Alexander Duyck330a6d62009-10-27 23:51:35 +00001971 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001972
1973 /* before reading the NVM, reset the controller to put the device in a
1974 * known good starting state */
1975 hw->mac.ops.reset_hw(hw);
1976
1977 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001978 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001979 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1980 err = -EIO;
1981 goto err_eeprom;
1982 }
1983
1984 /* copy the MAC address out of the NVM */
1985 if (hw->mac.ops.read_mac_addr(hw))
1986 dev_err(&pdev->dev, "NVM Read Error\n");
1987
1988 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1989 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1990
1991 if (!is_valid_ether_addr(netdev->perm_addr)) {
1992 dev_err(&pdev->dev, "Invalid MAC Address\n");
1993 err = -EIO;
1994 goto err_eeprom;
1995 }
1996
Joe Perchesc061b182010-08-23 18:20:03 +00001997 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00001998 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00001999 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002000 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002001
2002 INIT_WORK(&adapter->reset_task, igb_reset_task);
2003 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2004
Alexander Duyck450c87c2009-02-06 23:22:11 +00002005 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002006 adapter->fc_autoneg = true;
2007 hw->mac.autoneg = true;
2008 hw->phy.autoneg_advertised = 0x2f;
2009
Alexander Duyck0cce1192009-07-23 18:10:24 +00002010 hw->fc.requested_mode = e1000_fc_default;
2011 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002012
Auke Kok9d5c8242008-01-24 02:22:38 -08002013 igb_validate_mdi_setting(hw);
2014
Auke Kok9d5c8242008-01-24 02:22:38 -08002015 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2016 * enable the ACPI Magic Packet filter
2017 */
2018
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002019 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002020 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002021 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002022 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2023 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2024 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002025 else if (hw->bus.func == 1)
2026 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002027
2028 if (eeprom_data & eeprom_apme_mask)
2029 adapter->eeprom_wol |= E1000_WUFC_MAG;
2030
2031 /* now that we have the eeprom settings, apply the special cases where
2032 * the eeprom may be wrong or the board simply won't support wake on
2033 * lan on a particular port */
2034 switch (pdev->device) {
2035 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2036 adapter->eeprom_wol = 0;
2037 break;
2038 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002039 case E1000_DEV_ID_82576_FIBER:
2040 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002041 /* Wake events only supported on port A for dual fiber
2042 * regardless of eeprom setting */
2043 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2044 adapter->eeprom_wol = 0;
2045 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002046 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002047 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002048 /* if quad port adapter, disable WoL on all but port A */
2049 if (global_quad_port_a != 0)
2050 adapter->eeprom_wol = 0;
2051 else
2052 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2053 /* Reset for multiple quad port adapters */
2054 if (++global_quad_port_a == 4)
2055 global_quad_port_a = 0;
2056 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002057 }
2058
2059 /* initialize the wol settings based on the eeprom settings */
2060 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002061 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002062
2063 /* reset the hardware with the new settings */
2064 igb_reset(adapter);
2065
2066 /* let the f/w know that the h/w is now under the control of the
2067 * driver. */
2068 igb_get_hw_control(adapter);
2069
Auke Kok9d5c8242008-01-24 02:22:38 -08002070 strcpy(netdev->name, "eth%d");
2071 err = register_netdev(netdev);
2072 if (err)
2073 goto err_register;
2074
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002075 /* carrier off reporting is important to ethtool even BEFORE open */
2076 netif_carrier_off(netdev);
2077
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002078#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002079 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002080 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002081 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002082 igb_setup_dca(adapter);
2083 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002084
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002085#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002086 /* do hw tstamp init after resetting */
2087 igb_init_hw_timer(adapter);
2088
Auke Kok9d5c8242008-01-24 02:22:38 -08002089 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2090 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002091 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002092 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002093 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002094 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002095 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002096 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2097 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2098 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2099 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002100 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002101
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002102 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2103 if (ret_val)
2104 strcpy(part_str, "Unknown");
2105 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002106 dev_info(&pdev->dev,
2107 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2108 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002109 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002110 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002111 switch (hw->mac.type) {
2112 case e1000_i350:
2113 igb_set_eee_i350(hw);
2114 break;
2115 default:
2116 break;
2117 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002118 return 0;
2119
2120err_register:
2121 igb_release_hw_control(adapter);
2122err_eeprom:
2123 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002124 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002125
2126 if (hw->flash_address)
2127 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002128err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002129 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002130 iounmap(hw->hw_addr);
2131err_ioremap:
2132 free_netdev(netdev);
2133err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002134 pci_release_selected_regions(pdev,
2135 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002136err_pci_reg:
2137err_dma:
2138 pci_disable_device(pdev);
2139 return err;
2140}
2141
2142/**
2143 * igb_remove - Device Removal Routine
2144 * @pdev: PCI device information struct
2145 *
2146 * igb_remove is called by the PCI subsystem to alert the driver
2147 * that it should release a PCI device. The could be caused by a
2148 * Hot-Plug event, or because the driver is going to be removed from
2149 * memory.
2150 **/
2151static void __devexit igb_remove(struct pci_dev *pdev)
2152{
2153 struct net_device *netdev = pci_get_drvdata(pdev);
2154 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002155 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002156
Tejun Heo760141a2010-12-12 16:45:14 +01002157 /*
2158 * The watchdog timer may be rescheduled, so explicitly
2159 * disable watchdog from being rescheduled.
2160 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002161 set_bit(__IGB_DOWN, &adapter->state);
2162 del_timer_sync(&adapter->watchdog_timer);
2163 del_timer_sync(&adapter->phy_info_timer);
2164
Tejun Heo760141a2010-12-12 16:45:14 +01002165 cancel_work_sync(&adapter->reset_task);
2166 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002167
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002168#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002169 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002170 dev_info(&pdev->dev, "DCA disabled\n");
2171 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002172 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002173 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002174 }
2175#endif
2176
Auke Kok9d5c8242008-01-24 02:22:38 -08002177 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2178 * would have already happened in close and is redundant. */
2179 igb_release_hw_control(adapter);
2180
2181 unregister_netdev(netdev);
2182
Alexander Duyck047e0032009-10-27 15:49:27 +00002183 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002184
Alexander Duyck37680112009-02-19 20:40:30 -08002185#ifdef CONFIG_PCI_IOV
2186 /* reclaim resources allocated to VFs */
2187 if (adapter->vf_data) {
2188 /* disable iov and allow time for transactions to clear */
Greg Rose0224d662011-10-14 02:57:14 +00002189 if (!igb_check_vf_assignment(adapter)) {
2190 pci_disable_sriov(pdev);
2191 msleep(500);
2192 } else {
2193 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2194 }
Alexander Duyck37680112009-02-19 20:40:30 -08002195
2196 kfree(adapter->vf_data);
2197 adapter->vf_data = NULL;
2198 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002199 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002200 msleep(100);
2201 dev_info(&pdev->dev, "IOV Disabled\n");
2202 }
2203#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002204
Alexander Duyck28b07592009-02-06 23:20:31 +00002205 iounmap(hw->hw_addr);
2206 if (hw->flash_address)
2207 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002208 pci_release_selected_regions(pdev,
2209 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002210
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002211 kfree(adapter->shadow_vfta);
Auke Kok9d5c8242008-01-24 02:22:38 -08002212 free_netdev(netdev);
2213
Frans Pop19d5afd2009-10-02 10:04:12 -07002214 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002215
Auke Kok9d5c8242008-01-24 02:22:38 -08002216 pci_disable_device(pdev);
2217}
2218
2219/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002220 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2221 * @adapter: board private structure to initialize
2222 *
2223 * This function initializes the vf specific data storage and then attempts to
2224 * allocate the VFs. The reason for ordering it this way is because it is much
2225 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2226 * the memory for the VFs.
2227 **/
2228static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2229{
2230#ifdef CONFIG_PCI_IOV
2231 struct pci_dev *pdev = adapter->pdev;
Greg Rose0224d662011-10-14 02:57:14 +00002232 int old_vfs = igb_find_enabled_vfs(adapter);
2233 int i;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002234
Greg Rose0224d662011-10-14 02:57:14 +00002235 if (old_vfs) {
2236 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2237 "max_vfs setting of %d\n", old_vfs, max_vfs);
2238 adapter->vfs_allocated_count = old_vfs;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002239 }
2240
Greg Rose0224d662011-10-14 02:57:14 +00002241 if (!adapter->vfs_allocated_count)
2242 return;
2243
2244 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2245 sizeof(struct vf_data_storage), GFP_KERNEL);
2246 /* if allocation failed then we do not support SR-IOV */
2247 if (!adapter->vf_data) {
Alexander Duycka6b623e2009-10-27 23:47:53 +00002248 adapter->vfs_allocated_count = 0;
Greg Rose0224d662011-10-14 02:57:14 +00002249 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2250 "Data Storage\n");
2251 goto out;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002252 }
Greg Rose0224d662011-10-14 02:57:14 +00002253
2254 if (!old_vfs) {
2255 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2256 goto err_out;
2257 }
2258 dev_info(&pdev->dev, "%d VFs allocated\n",
2259 adapter->vfs_allocated_count);
2260 for (i = 0; i < adapter->vfs_allocated_count; i++)
2261 igb_vf_configure(adapter, i);
2262
2263 /* DMA Coalescing is not supported in IOV mode. */
2264 adapter->flags &= ~IGB_FLAG_DMAC;
2265 goto out;
2266err_out:
2267 kfree(adapter->vf_data);
2268 adapter->vf_data = NULL;
2269 adapter->vfs_allocated_count = 0;
2270out:
2271 return;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002272#endif /* CONFIG_PCI_IOV */
2273}
2274
Alexander Duyck115f4592009-11-12 18:37:00 +00002275/**
2276 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2277 * @adapter: board private structure to initialize
2278 *
2279 * igb_init_hw_timer initializes the function pointer and values for the hw
2280 * timer found in hardware.
2281 **/
2282static void igb_init_hw_timer(struct igb_adapter *adapter)
2283{
2284 struct e1000_hw *hw = &adapter->hw;
2285
2286 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002287 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002288 case e1000_82580:
2289 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2290 adapter->cycles.read = igb_read_clock;
2291 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2292 adapter->cycles.mult = 1;
2293 /*
2294 * The 82580 timesync updates the system timer every 8ns by 8ns
2295 * and the value cannot be shifted. Instead we need to shift
2296 * the registers to generate a 64bit timer value. As a result
2297 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2298 * 24 in order to generate a larger value for synchronization.
2299 */
2300 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2301 /* disable system timer temporarily by setting bit 31 */
2302 wr32(E1000_TSAUXC, 0x80000000);
2303 wrfl();
2304
2305 /* Set registers so that rollover occurs soon to test this. */
2306 wr32(E1000_SYSTIMR, 0x00000000);
2307 wr32(E1000_SYSTIML, 0x80000000);
2308 wr32(E1000_SYSTIMH, 0x000000FF);
2309 wrfl();
2310
2311 /* enable system timer by clearing bit 31 */
2312 wr32(E1000_TSAUXC, 0x0);
2313 wrfl();
2314
2315 timecounter_init(&adapter->clock,
2316 &adapter->cycles,
2317 ktime_to_ns(ktime_get_real()));
2318 /*
2319 * Synchronize our NIC clock against system wall clock. NIC
2320 * time stamp reading requires ~3us per sample, each sample
2321 * was pretty stable even under load => only require 10
2322 * samples for each offset comparison.
2323 */
2324 memset(&adapter->compare, 0, sizeof(adapter->compare));
2325 adapter->compare.source = &adapter->clock;
2326 adapter->compare.target = ktime_get_real;
2327 adapter->compare.num_samples = 10;
2328 timecompare_update(&adapter->compare, 0);
2329 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002330 case e1000_82576:
2331 /*
2332 * Initialize hardware timer: we keep it running just in case
2333 * that some program needs it later on.
2334 */
2335 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2336 adapter->cycles.read = igb_read_clock;
2337 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2338 adapter->cycles.mult = 1;
2339 /**
2340 * Scale the NIC clock cycle by a large factor so that
2341 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002342 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002343 * factor are a) that the clock register overflows more quickly
2344 * (not such a big deal) and b) that the increment per tick has
2345 * to fit into 24 bits. As a result we need to use a shift of
2346 * 19 so we can fit a value of 16 into the TIMINCA register.
2347 */
2348 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2349 wr32(E1000_TIMINCA,
2350 (1 << E1000_TIMINCA_16NS_SHIFT) |
2351 (16 << IGB_82576_TSYNC_SHIFT));
2352
2353 /* Set registers so that rollover occurs soon to test this. */
2354 wr32(E1000_SYSTIML, 0x00000000);
2355 wr32(E1000_SYSTIMH, 0xFF800000);
2356 wrfl();
2357
2358 timecounter_init(&adapter->clock,
2359 &adapter->cycles,
2360 ktime_to_ns(ktime_get_real()));
2361 /*
2362 * Synchronize our NIC clock against system wall clock. NIC
2363 * time stamp reading requires ~3us per sample, each sample
2364 * was pretty stable even under load => only require 10
2365 * samples for each offset comparison.
2366 */
2367 memset(&adapter->compare, 0, sizeof(adapter->compare));
2368 adapter->compare.source = &adapter->clock;
2369 adapter->compare.target = ktime_get_real;
2370 adapter->compare.num_samples = 10;
2371 timecompare_update(&adapter->compare, 0);
2372 break;
2373 case e1000_82575:
2374 /* 82575 does not support timesync */
2375 default:
2376 break;
2377 }
2378
2379}
2380
Alexander Duycka6b623e2009-10-27 23:47:53 +00002381/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002382 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2383 * @adapter: board private structure to initialize
2384 *
2385 * igb_sw_init initializes the Adapter private data structure.
2386 * Fields are initialized based on PCI device information and
2387 * OS network device settings (MTU size).
2388 **/
2389static int __devinit igb_sw_init(struct igb_adapter *adapter)
2390{
2391 struct e1000_hw *hw = &adapter->hw;
2392 struct net_device *netdev = adapter->netdev;
2393 struct pci_dev *pdev = adapter->pdev;
2394
2395 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2396
Alexander Duyck13fde972011-10-05 13:35:24 +00002397 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002398 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2399 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002400
2401 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002402 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2403 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2404
Alexander Duyck13fde972011-10-05 13:35:24 +00002405 /* set default work limits */
2406 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2407
Alexander Duyck153285f2011-08-26 07:43:32 +00002408 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2409 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002410 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2411
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002412 adapter->node = -1;
2413
Eric Dumazet12dcd862010-10-15 17:27:10 +00002414 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002415#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002416 switch (hw->mac.type) {
2417 case e1000_82576:
2418 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002419 if (max_vfs > 7) {
2420 dev_warn(&pdev->dev,
2421 "Maximum of 7 VFs per PF, using max\n");
2422 adapter->vfs_allocated_count = 7;
2423 } else
2424 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002425 break;
2426 default:
2427 break;
2428 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002429#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002430 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002431 /* i350 cannot do RSS and SR-IOV at the same time */
2432 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2433 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002434
2435 /*
2436 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2437 * then we should combine the queues into a queue pair in order to
2438 * conserve interrupts due to limited supply
2439 */
2440 if ((adapter->rss_queues > 4) ||
2441 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2442 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2443
Carolyn Wyborny1128c752011-10-14 00:13:49 +00002444 /* Setup and initialize a copy of the hw vlan table array */
2445 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2446 E1000_VLAN_FILTER_TBL_SIZE,
2447 GFP_ATOMIC);
2448
Alexander Duycka6b623e2009-10-27 23:47:53 +00002449 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002450 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002451 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2452 return -ENOMEM;
2453 }
2454
Alexander Duycka6b623e2009-10-27 23:47:53 +00002455 igb_probe_vfs(adapter);
2456
Auke Kok9d5c8242008-01-24 02:22:38 -08002457 /* Explicitly disable IRQ since the NIC can be in any state. */
2458 igb_irq_disable(adapter);
2459
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002460 if (hw->mac.type == e1000_i350)
2461 adapter->flags &= ~IGB_FLAG_DMAC;
2462
Auke Kok9d5c8242008-01-24 02:22:38 -08002463 set_bit(__IGB_DOWN, &adapter->state);
2464 return 0;
2465}
2466
2467/**
2468 * igb_open - Called when a network interface is made active
2469 * @netdev: network interface device structure
2470 *
2471 * Returns 0 on success, negative value on failure
2472 *
2473 * The open entry point is called when a network interface is made
2474 * active by the system (IFF_UP). At this point all resources needed
2475 * for transmit and receive operations are allocated, the interrupt
2476 * handler is registered with the OS, the watchdog timer is started,
2477 * and the stack is notified that the interface is ready.
2478 **/
2479static int igb_open(struct net_device *netdev)
2480{
2481 struct igb_adapter *adapter = netdev_priv(netdev);
2482 struct e1000_hw *hw = &adapter->hw;
2483 int err;
2484 int i;
2485
2486 /* disallow open during test */
2487 if (test_bit(__IGB_TESTING, &adapter->state))
2488 return -EBUSY;
2489
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002490 netif_carrier_off(netdev);
2491
Auke Kok9d5c8242008-01-24 02:22:38 -08002492 /* allocate transmit descriptors */
2493 err = igb_setup_all_tx_resources(adapter);
2494 if (err)
2495 goto err_setup_tx;
2496
2497 /* allocate receive descriptors */
2498 err = igb_setup_all_rx_resources(adapter);
2499 if (err)
2500 goto err_setup_rx;
2501
Nick Nunley88a268c2010-02-17 01:01:59 +00002502 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002503
Auke Kok9d5c8242008-01-24 02:22:38 -08002504 /* before we allocate an interrupt, we must be ready to handle it.
2505 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2506 * as soon as we call pci_request_irq, so we have to setup our
2507 * clean_rx handler before we do so. */
2508 igb_configure(adapter);
2509
2510 err = igb_request_irq(adapter);
2511 if (err)
2512 goto err_req_irq;
2513
2514 /* From here on the code is the same as igb_up() */
2515 clear_bit(__IGB_DOWN, &adapter->state);
2516
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00002517 for (i = 0; i < adapter->num_q_vectors; i++)
2518 napi_enable(&(adapter->q_vector[i]->napi));
Auke Kok9d5c8242008-01-24 02:22:38 -08002519
2520 /* Clear any pending interrupts. */
2521 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002522
2523 igb_irq_enable(adapter);
2524
Alexander Duyckd4960302009-10-27 15:53:45 +00002525 /* notify VFs that reset has been completed */
2526 if (adapter->vfs_allocated_count) {
2527 u32 reg_data = rd32(E1000_CTRL_EXT);
2528 reg_data |= E1000_CTRL_EXT_PFRSTD;
2529 wr32(E1000_CTRL_EXT, reg_data);
2530 }
2531
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002532 netif_tx_start_all_queues(netdev);
2533
Alexander Duyck25568a52009-10-27 23:49:59 +00002534 /* start the watchdog. */
2535 hw->mac.get_link_status = 1;
2536 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002537
2538 return 0;
2539
2540err_req_irq:
2541 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002542 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002543 igb_free_all_rx_resources(adapter);
2544err_setup_rx:
2545 igb_free_all_tx_resources(adapter);
2546err_setup_tx:
2547 igb_reset(adapter);
2548
2549 return err;
2550}
2551
2552/**
2553 * igb_close - Disables a network interface
2554 * @netdev: network interface device structure
2555 *
2556 * Returns 0, this is not allowed to fail
2557 *
2558 * The close entry point is called when an interface is de-activated
2559 * by the OS. The hardware is still under the driver's control, but
2560 * needs to be disabled. A global MAC reset is issued to stop the
2561 * hardware, and all transmit and receive resources are freed.
2562 **/
2563static int igb_close(struct net_device *netdev)
2564{
2565 struct igb_adapter *adapter = netdev_priv(netdev);
2566
2567 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2568 igb_down(adapter);
2569
2570 igb_free_irq(adapter);
2571
2572 igb_free_all_tx_resources(adapter);
2573 igb_free_all_rx_resources(adapter);
2574
Auke Kok9d5c8242008-01-24 02:22:38 -08002575 return 0;
2576}
2577
2578/**
2579 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002580 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2581 *
2582 * Return 0 on success, negative on failure
2583 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002584int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002585{
Alexander Duyck59d71982010-04-27 13:09:25 +00002586 struct device *dev = tx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002587 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002588 int size;
2589
Alexander Duyck06034642011-08-26 07:44:22 +00002590 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002591 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2592 if (!tx_ring->tx_buffer_info)
2593 tx_ring->tx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002594 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002595 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002596
2597 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002598 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002599 tx_ring->size = ALIGN(tx_ring->size, 4096);
2600
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002601 set_dev_node(dev, tx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002602 tx_ring->desc = dma_alloc_coherent(dev,
2603 tx_ring->size,
2604 &tx_ring->dma,
2605 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002606 set_dev_node(dev, orig_node);
2607 if (!tx_ring->desc)
2608 tx_ring->desc = dma_alloc_coherent(dev,
2609 tx_ring->size,
2610 &tx_ring->dma,
2611 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002612
2613 if (!tx_ring->desc)
2614 goto err;
2615
Auke Kok9d5c8242008-01-24 02:22:38 -08002616 tx_ring->next_to_use = 0;
2617 tx_ring->next_to_clean = 0;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002618
Auke Kok9d5c8242008-01-24 02:22:38 -08002619 return 0;
2620
2621err:
Alexander Duyck06034642011-08-26 07:44:22 +00002622 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002623 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002624 "Unable to allocate memory for the transmit descriptor ring\n");
2625 return -ENOMEM;
2626}
2627
2628/**
2629 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2630 * (Descriptors) for all queues
2631 * @adapter: board private structure
2632 *
2633 * Return 0 on success, negative on failure
2634 **/
2635static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2636{
Alexander Duyck439705e2009-10-27 23:49:20 +00002637 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 int i, err = 0;
2639
2640 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002641 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002642 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002643 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002644 "Allocation for Tx Queue %u failed\n", i);
2645 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002646 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002647 break;
2648 }
2649 }
2650
2651 return err;
2652}
2653
2654/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002655 * igb_setup_tctl - configure the transmit control registers
2656 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002657 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002658void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002659{
Auke Kok9d5c8242008-01-24 02:22:38 -08002660 struct e1000_hw *hw = &adapter->hw;
2661 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002662
Alexander Duyck85b430b2009-10-27 15:50:29 +00002663 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2664 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002665
2666 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002667 tctl = rd32(E1000_TCTL);
2668 tctl &= ~E1000_TCTL_CT;
2669 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2670 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2671
2672 igb_config_collision_dist(hw);
2673
Auke Kok9d5c8242008-01-24 02:22:38 -08002674 /* Enable transmits */
2675 tctl |= E1000_TCTL_EN;
2676
2677 wr32(E1000_TCTL, tctl);
2678}
2679
2680/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002681 * igb_configure_tx_ring - Configure transmit ring after Reset
2682 * @adapter: board private structure
2683 * @ring: tx ring to configure
2684 *
2685 * Configure a transmit ring after a reset.
2686 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002687void igb_configure_tx_ring(struct igb_adapter *adapter,
2688 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002689{
2690 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002691 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002692 u64 tdba = ring->dma;
2693 int reg_idx = ring->reg_idx;
2694
2695 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002696 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002697 wrfl();
2698 mdelay(10);
2699
2700 wr32(E1000_TDLEN(reg_idx),
2701 ring->count * sizeof(union e1000_adv_tx_desc));
2702 wr32(E1000_TDBAL(reg_idx),
2703 tdba & 0x00000000ffffffffULL);
2704 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2705
Alexander Duyckfce99e32009-10-27 15:51:27 +00002706 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002707 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002708 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002709
2710 txdctl |= IGB_TX_PTHRESH;
2711 txdctl |= IGB_TX_HTHRESH << 8;
2712 txdctl |= IGB_TX_WTHRESH << 16;
2713
2714 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2715 wr32(E1000_TXDCTL(reg_idx), txdctl);
2716}
2717
2718/**
2719 * igb_configure_tx - Configure transmit Unit after Reset
2720 * @adapter: board private structure
2721 *
2722 * Configure the Tx unit of the MAC after a reset.
2723 **/
2724static void igb_configure_tx(struct igb_adapter *adapter)
2725{
2726 int i;
2727
2728 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002729 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002730}
2731
2732/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002733 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002734 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2735 *
2736 * Returns 0 on success, negative on failure
2737 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002738int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002739{
Alexander Duyck59d71982010-04-27 13:09:25 +00002740 struct device *dev = rx_ring->dev;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002741 int orig_node = dev_to_node(dev);
Auke Kok9d5c8242008-01-24 02:22:38 -08002742 int size, desc_len;
2743
Alexander Duyck06034642011-08-26 07:44:22 +00002744 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002745 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2746 if (!rx_ring->rx_buffer_info)
2747 rx_ring->rx_buffer_info = vzalloc(size);
Alexander Duyck06034642011-08-26 07:44:22 +00002748 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002749 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002750
2751 desc_len = sizeof(union e1000_adv_rx_desc);
2752
2753 /* Round up to nearest 4K */
2754 rx_ring->size = rx_ring->count * desc_len;
2755 rx_ring->size = ALIGN(rx_ring->size, 4096);
2756
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002757 set_dev_node(dev, rx_ring->numa_node);
Alexander Duyck59d71982010-04-27 13:09:25 +00002758 rx_ring->desc = dma_alloc_coherent(dev,
2759 rx_ring->size,
2760 &rx_ring->dma,
2761 GFP_KERNEL);
Alexander Duyck81c2fc22011-08-26 07:45:20 +00002762 set_dev_node(dev, orig_node);
2763 if (!rx_ring->desc)
2764 rx_ring->desc = dma_alloc_coherent(dev,
2765 rx_ring->size,
2766 &rx_ring->dma,
2767 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002768
2769 if (!rx_ring->desc)
2770 goto err;
2771
2772 rx_ring->next_to_clean = 0;
2773 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002774
Auke Kok9d5c8242008-01-24 02:22:38 -08002775 return 0;
2776
2777err:
Alexander Duyck06034642011-08-26 07:44:22 +00002778 vfree(rx_ring->rx_buffer_info);
2779 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002780 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2781 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002782 return -ENOMEM;
2783}
2784
2785/**
2786 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2787 * (Descriptors) for all queues
2788 * @adapter: board private structure
2789 *
2790 * Return 0 on success, negative on failure
2791 **/
2792static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2793{
Alexander Duyck439705e2009-10-27 23:49:20 +00002794 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002795 int i, err = 0;
2796
2797 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002798 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002799 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002800 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002801 "Allocation for Rx Queue %u failed\n", i);
2802 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002803 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002804 break;
2805 }
2806 }
2807
2808 return err;
2809}
2810
2811/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002812 * igb_setup_mrqc - configure the multiple receive queue control registers
2813 * @adapter: Board private structure
2814 **/
2815static void igb_setup_mrqc(struct igb_adapter *adapter)
2816{
2817 struct e1000_hw *hw = &adapter->hw;
2818 u32 mrqc, rxcsum;
2819 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2820 union e1000_reta {
2821 u32 dword;
2822 u8 bytes[4];
2823 } reta;
2824 static const u8 rsshash[40] = {
2825 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2826 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2827 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2828 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2829
2830 /* Fill out hash function seeds */
2831 for (j = 0; j < 10; j++) {
2832 u32 rsskey = rsshash[(j * 4)];
2833 rsskey |= rsshash[(j * 4) + 1] << 8;
2834 rsskey |= rsshash[(j * 4) + 2] << 16;
2835 rsskey |= rsshash[(j * 4) + 3] << 24;
2836 array_wr32(E1000_RSSRK(0), j, rsskey);
2837 }
2838
Alexander Duycka99955f2009-11-12 18:37:19 +00002839 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002840
2841 if (adapter->vfs_allocated_count) {
2842 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2843 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002844 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002845 case e1000_82580:
2846 num_rx_queues = 1;
2847 shift = 0;
2848 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002849 case e1000_82576:
2850 shift = 3;
2851 num_rx_queues = 2;
2852 break;
2853 case e1000_82575:
2854 shift = 2;
2855 shift2 = 6;
2856 default:
2857 break;
2858 }
2859 } else {
2860 if (hw->mac.type == e1000_82575)
2861 shift = 6;
2862 }
2863
2864 for (j = 0; j < (32 * 4); j++) {
2865 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2866 if (shift2)
2867 reta.bytes[j & 3] |= num_rx_queues << shift2;
2868 if ((j & 3) == 3)
2869 wr32(E1000_RETA(j >> 2), reta.dword);
2870 }
2871
2872 /*
2873 * Disable raw packet checksumming so that RSS hash is placed in
2874 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2875 * offloads as they are enabled by default
2876 */
2877 rxcsum = rd32(E1000_RXCSUM);
2878 rxcsum |= E1000_RXCSUM_PCSD;
2879
2880 if (adapter->hw.mac.type >= e1000_82576)
2881 /* Enable Receive Checksum Offload for SCTP */
2882 rxcsum |= E1000_RXCSUM_CRCOFL;
2883
2884 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2885 wr32(E1000_RXCSUM, rxcsum);
2886
2887 /* If VMDq is enabled then we set the appropriate mode for that, else
2888 * we default to RSS so that an RSS hash is calculated per packet even
2889 * if we are only using one queue */
2890 if (adapter->vfs_allocated_count) {
2891 if (hw->mac.type > e1000_82575) {
2892 /* Set the default pool for the PF's first queue */
2893 u32 vtctl = rd32(E1000_VT_CTL);
2894 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2895 E1000_VT_CTL_DISABLE_DEF_POOL);
2896 vtctl |= adapter->vfs_allocated_count <<
2897 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2898 wr32(E1000_VT_CTL, vtctl);
2899 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002900 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002901 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2902 else
2903 mrqc = E1000_MRQC_ENABLE_VMDQ;
2904 } else {
2905 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2906 }
2907 igb_vmm_control(adapter);
2908
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002909 /*
2910 * Generate RSS hash based on TCP port numbers and/or
2911 * IPv4/v6 src and dst addresses since UDP cannot be
2912 * hashed reliably due to IP fragmentation
2913 */
2914 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2915 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2916 E1000_MRQC_RSS_FIELD_IPV6 |
2917 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2918 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002919
2920 wr32(E1000_MRQC, mrqc);
2921}
2922
2923/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002924 * igb_setup_rctl - configure the receive control registers
2925 * @adapter: Board private structure
2926 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002927void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002928{
2929 struct e1000_hw *hw = &adapter->hw;
2930 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002931
2932 rctl = rd32(E1000_RCTL);
2933
2934 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002935 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002936
Alexander Duyck69d728b2008-11-25 01:04:03 -08002937 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002938 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002939
Auke Kok87cb7e82008-07-08 15:08:29 -07002940 /*
2941 * enable stripping of CRC. It's unlikely this will break BMC
2942 * redirection as it did with e1000. Newer features require
2943 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002944 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002945 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002946
Alexander Duyck559e9c42009-10-27 23:52:50 +00002947 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002948 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002949
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002950 /* enable LPE to prevent packets larger than max_frame_size */
2951 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002952
Alexander Duyck952f72a2009-10-27 15:51:07 +00002953 /* disable queue 0 to prevent tail write w/o re-config */
2954 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002955
Alexander Duycke1739522009-02-19 20:39:44 -08002956 /* Attention!!! For SR-IOV PF driver operations you must enable
2957 * queue drop for all VF and PF queues to prevent head of line blocking
2958 * if an un-trusted VF does not provide descriptors to hardware.
2959 */
2960 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002961 /* set all queue drop enable bits */
2962 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002963 }
2964
Auke Kok9d5c8242008-01-24 02:22:38 -08002965 wr32(E1000_RCTL, rctl);
2966}
2967
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002968static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2969 int vfn)
2970{
2971 struct e1000_hw *hw = &adapter->hw;
2972 u32 vmolr;
2973
2974 /* if it isn't the PF check to see if VFs are enabled and
2975 * increase the size to support vlan tags */
2976 if (vfn < adapter->vfs_allocated_count &&
2977 adapter->vf_data[vfn].vlans_enabled)
2978 size += VLAN_TAG_SIZE;
2979
2980 vmolr = rd32(E1000_VMOLR(vfn));
2981 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2982 vmolr |= size | E1000_VMOLR_LPE;
2983 wr32(E1000_VMOLR(vfn), vmolr);
2984
2985 return 0;
2986}
2987
Auke Kok9d5c8242008-01-24 02:22:38 -08002988/**
Alexander Duycke1739522009-02-19 20:39:44 -08002989 * igb_rlpml_set - set maximum receive packet size
2990 * @adapter: board private structure
2991 *
2992 * Configure maximum receivable packet size.
2993 **/
2994static void igb_rlpml_set(struct igb_adapter *adapter)
2995{
Alexander Duyck153285f2011-08-26 07:43:32 +00002996 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002997 struct e1000_hw *hw = &adapter->hw;
2998 u16 pf_id = adapter->vfs_allocated_count;
2999
Alexander Duycke1739522009-02-19 20:39:44 -08003000 if (pf_id) {
3001 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00003002 /*
3003 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3004 * to our max jumbo frame size, in case we need to enable
3005 * jumbo frames on one of the rings later.
3006 * This will not pass over-length frames into the default
3007 * queue because it's gated by the VMOLR.RLPML.
3008 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003009 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08003010 }
3011
3012 wr32(E1000_RLPML, max_frame_size);
3013}
3014
Williams, Mitch A8151d292010-02-10 01:44:24 +00003015static inline void igb_set_vmolr(struct igb_adapter *adapter,
3016 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003017{
3018 struct e1000_hw *hw = &adapter->hw;
3019 u32 vmolr;
3020
3021 /*
3022 * This register exists only on 82576 and newer so if we are older then
3023 * we should exit and do nothing
3024 */
3025 if (hw->mac.type < e1000_82576)
3026 return;
3027
3028 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003029 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3030 if (aupe)
3031 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3032 else
3033 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003034
3035 /* clear all bits that might not be set */
3036 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3037
Alexander Duycka99955f2009-11-12 18:37:19 +00003038 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003039 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3040 /*
3041 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3042 * multicast packets
3043 */
3044 if (vfn <= adapter->vfs_allocated_count)
3045 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3046
3047 wr32(E1000_VMOLR(vfn), vmolr);
3048}
3049
Alexander Duycke1739522009-02-19 20:39:44 -08003050/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003051 * igb_configure_rx_ring - Configure a receive ring after Reset
3052 * @adapter: board private structure
3053 * @ring: receive ring to be configured
3054 *
3055 * Configure the Rx unit of the MAC after a reset.
3056 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003057void igb_configure_rx_ring(struct igb_adapter *adapter,
3058 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003059{
3060 struct e1000_hw *hw = &adapter->hw;
3061 u64 rdba = ring->dma;
3062 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003063 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003064
3065 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003066 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003067
3068 /* Set DMA base address registers */
3069 wr32(E1000_RDBAL(reg_idx),
3070 rdba & 0x00000000ffffffffULL);
3071 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3072 wr32(E1000_RDLEN(reg_idx),
3073 ring->count * sizeof(union e1000_adv_rx_desc));
3074
3075 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003076 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003077 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003078 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003079
Alexander Duyck952f72a2009-10-27 15:51:07 +00003080 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003081 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003082#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003083 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003084#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003085 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003086#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003087 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Alexander Duyck06218a82011-08-26 07:46:55 +00003088 if (hw->mac.type >= e1000_82580)
Nick Nunley757b77e2010-03-26 11:36:47 +00003089 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003090 /* Only set Drop Enable if we are supporting multiple queues */
3091 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3092 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003093
3094 wr32(E1000_SRRCTL(reg_idx), srrctl);
3095
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003096 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003097 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003098
Alexander Duyck85b430b2009-10-27 15:50:29 +00003099 rxdctl |= IGB_RX_PTHRESH;
3100 rxdctl |= IGB_RX_HTHRESH << 8;
3101 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003102
3103 /* enable receive descriptor fetching */
3104 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003105 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3106}
3107
3108/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003109 * igb_configure_rx - Configure receive Unit after Reset
3110 * @adapter: board private structure
3111 *
3112 * Configure the Rx unit of the MAC after a reset.
3113 **/
3114static void igb_configure_rx(struct igb_adapter *adapter)
3115{
Hannes Eder91075842009-02-18 19:36:04 -08003116 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003117
Alexander Duyck68d480c2009-10-05 06:33:08 +00003118 /* set UTA to appropriate mode */
3119 igb_set_uta(adapter);
3120
Alexander Duyck26ad9172009-10-05 06:32:49 +00003121 /* set the correct pool for the PF default MAC address in entry 0 */
3122 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3123 adapter->vfs_allocated_count);
3124
Alexander Duyck06cf2662009-10-27 15:53:25 +00003125 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3126 * the Base and Length of the Rx Descriptor Ring */
3127 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003128 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003129}
3130
3131/**
3132 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003133 * @tx_ring: Tx descriptor ring for a specific queue
3134 *
3135 * Free all transmit software resources
3136 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003137void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003138{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003139 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003140
Alexander Duyck06034642011-08-26 07:44:22 +00003141 vfree(tx_ring->tx_buffer_info);
3142 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003143
Alexander Duyck439705e2009-10-27 23:49:20 +00003144 /* if not set, then don't free */
3145 if (!tx_ring->desc)
3146 return;
3147
Alexander Duyck59d71982010-04-27 13:09:25 +00003148 dma_free_coherent(tx_ring->dev, tx_ring->size,
3149 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003150
3151 tx_ring->desc = NULL;
3152}
3153
3154/**
3155 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3156 * @adapter: board private structure
3157 *
3158 * Free all transmit software resources
3159 **/
3160static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3161{
3162 int i;
3163
3164 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003165 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003166}
3167
Alexander Duyckebe42d12011-08-26 07:45:09 +00003168void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3169 struct igb_tx_buffer *tx_buffer)
Auke Kok9d5c8242008-01-24 02:22:38 -08003170{
Alexander Duyckebe42d12011-08-26 07:45:09 +00003171 if (tx_buffer->skb) {
3172 dev_kfree_skb_any(tx_buffer->skb);
3173 if (tx_buffer->dma)
3174 dma_unmap_single(ring->dev,
3175 tx_buffer->dma,
3176 tx_buffer->length,
3177 DMA_TO_DEVICE);
3178 } else if (tx_buffer->dma) {
3179 dma_unmap_page(ring->dev,
3180 tx_buffer->dma,
3181 tx_buffer->length,
3182 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003183 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00003184 tx_buffer->next_to_watch = NULL;
3185 tx_buffer->skb = NULL;
3186 tx_buffer->dma = 0;
3187 /* buffer_info must be completely set up in the transmit path */
Auke Kok9d5c8242008-01-24 02:22:38 -08003188}
3189
3190/**
3191 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003192 * @tx_ring: ring to be cleaned
3193 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003194static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003195{
Alexander Duyck06034642011-08-26 07:44:22 +00003196 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003197 unsigned long size;
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00003198 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003199
Alexander Duyck06034642011-08-26 07:44:22 +00003200 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003201 return;
3202 /* Free all the Tx ring sk_buffs */
3203
3204 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003205 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003206 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003207 }
3208
Alexander Duyck06034642011-08-26 07:44:22 +00003209 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3210 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003211
3212 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003213 memset(tx_ring->desc, 0, tx_ring->size);
3214
3215 tx_ring->next_to_use = 0;
3216 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003217}
3218
3219/**
3220 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3221 * @adapter: board private structure
3222 **/
3223static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3224{
3225 int i;
3226
3227 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003228 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003229}
3230
3231/**
3232 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003233 * @rx_ring: ring to clean the resources from
3234 *
3235 * Free all receive software resources
3236 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003237void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003238{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003239 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003240
Alexander Duyck06034642011-08-26 07:44:22 +00003241 vfree(rx_ring->rx_buffer_info);
3242 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003243
Alexander Duyck439705e2009-10-27 23:49:20 +00003244 /* if not set, then don't free */
3245 if (!rx_ring->desc)
3246 return;
3247
Alexander Duyck59d71982010-04-27 13:09:25 +00003248 dma_free_coherent(rx_ring->dev, rx_ring->size,
3249 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003250
3251 rx_ring->desc = NULL;
3252}
3253
3254/**
3255 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3256 * @adapter: board private structure
3257 *
3258 * Free all receive software resources
3259 **/
3260static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3261{
3262 int i;
3263
3264 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003265 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003266}
3267
3268/**
3269 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003270 * @rx_ring: ring to free buffers from
3271 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003272static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003273{
Auke Kok9d5c8242008-01-24 02:22:38 -08003274 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003275 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003276
Alexander Duyck06034642011-08-26 07:44:22 +00003277 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003278 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003279
Auke Kok9d5c8242008-01-24 02:22:38 -08003280 /* Free all the Rx ring sk_buffs */
3281 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003282 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003283 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003284 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003285 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003286 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003287 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003288 buffer_info->dma = 0;
3289 }
3290
3291 if (buffer_info->skb) {
3292 dev_kfree_skb(buffer_info->skb);
3293 buffer_info->skb = NULL;
3294 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003295 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003296 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003297 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003298 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003299 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003300 buffer_info->page_dma = 0;
3301 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003302 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003303 put_page(buffer_info->page);
3304 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003305 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003306 }
3307 }
3308
Alexander Duyck06034642011-08-26 07:44:22 +00003309 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3310 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003311
3312 /* Zero out the descriptor ring */
3313 memset(rx_ring->desc, 0, rx_ring->size);
3314
3315 rx_ring->next_to_clean = 0;
3316 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003317}
3318
3319/**
3320 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3321 * @adapter: board private structure
3322 **/
3323static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3324{
3325 int i;
3326
3327 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003328 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003329}
3330
3331/**
3332 * igb_set_mac - Change the Ethernet Address of the NIC
3333 * @netdev: network interface device structure
3334 * @p: pointer to an address structure
3335 *
3336 * Returns 0 on success, negative on failure
3337 **/
3338static int igb_set_mac(struct net_device *netdev, void *p)
3339{
3340 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003341 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003342 struct sockaddr *addr = p;
3343
3344 if (!is_valid_ether_addr(addr->sa_data))
3345 return -EADDRNOTAVAIL;
3346
3347 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003348 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003349
Alexander Duyck26ad9172009-10-05 06:32:49 +00003350 /* set the correct pool for the new PF MAC address in entry 0 */
3351 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3352 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003353
Auke Kok9d5c8242008-01-24 02:22:38 -08003354 return 0;
3355}
3356
3357/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003358 * igb_write_mc_addr_list - write multicast addresses to MTA
3359 * @netdev: network interface device structure
3360 *
3361 * Writes multicast address list to the MTA hash table.
3362 * Returns: -ENOMEM on failure
3363 * 0 on no addresses written
3364 * X on writing X addresses to MTA
3365 **/
3366static int igb_write_mc_addr_list(struct net_device *netdev)
3367{
3368 struct igb_adapter *adapter = netdev_priv(netdev);
3369 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003370 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003371 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003372 int i;
3373
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003374 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003375 /* nothing to program, so clear mc list */
3376 igb_update_mc_addr_list(hw, NULL, 0);
3377 igb_restore_vf_multicasts(adapter);
3378 return 0;
3379 }
3380
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003381 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003382 if (!mta_list)
3383 return -ENOMEM;
3384
Alexander Duyck68d480c2009-10-05 06:33:08 +00003385 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003386 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003387 netdev_for_each_mc_addr(ha, netdev)
3388 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003389
Alexander Duyck68d480c2009-10-05 06:33:08 +00003390 igb_update_mc_addr_list(hw, mta_list, i);
3391 kfree(mta_list);
3392
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003393 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003394}
3395
3396/**
3397 * igb_write_uc_addr_list - write unicast addresses to RAR table
3398 * @netdev: network interface device structure
3399 *
3400 * Writes unicast address list to the RAR table.
3401 * Returns: -ENOMEM on failure/insufficient address space
3402 * 0 on no addresses written
3403 * X on writing X addresses to the RAR table
3404 **/
3405static int igb_write_uc_addr_list(struct net_device *netdev)
3406{
3407 struct igb_adapter *adapter = netdev_priv(netdev);
3408 struct e1000_hw *hw = &adapter->hw;
3409 unsigned int vfn = adapter->vfs_allocated_count;
3410 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3411 int count = 0;
3412
3413 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003414 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003415 return -ENOMEM;
3416
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003417 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003418 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003419
3420 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003421 if (!rar_entries)
3422 break;
3423 igb_rar_set_qsel(adapter, ha->addr,
3424 rar_entries--,
3425 vfn);
3426 count++;
3427 }
3428 }
3429 /* write the addresses in reverse order to avoid write combining */
3430 for (; rar_entries > 0 ; rar_entries--) {
3431 wr32(E1000_RAH(rar_entries), 0);
3432 wr32(E1000_RAL(rar_entries), 0);
3433 }
3434 wrfl();
3435
3436 return count;
3437}
3438
3439/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003440 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003441 * @netdev: network interface device structure
3442 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003443 * The set_rx_mode entry point is called whenever the unicast or multicast
3444 * address lists or the network interface flags are updated. This routine is
3445 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003446 * promiscuous mode, and all-multi behavior.
3447 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003448static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003449{
3450 struct igb_adapter *adapter = netdev_priv(netdev);
3451 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003452 unsigned int vfn = adapter->vfs_allocated_count;
3453 u32 rctl, vmolr = 0;
3454 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003455
3456 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003457 rctl = rd32(E1000_RCTL);
3458
Alexander Duyck68d480c2009-10-05 06:33:08 +00003459 /* clear the effected bits */
3460 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3461
Patrick McHardy746b9f02008-07-16 20:15:45 -07003462 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003463 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003464 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003465 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003466 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003467 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003468 vmolr |= E1000_VMOLR_MPME;
3469 } else {
3470 /*
3471 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003472 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003473 * that we can at least receive multicast traffic
3474 */
3475 count = igb_write_mc_addr_list(netdev);
3476 if (count < 0) {
3477 rctl |= E1000_RCTL_MPE;
3478 vmolr |= E1000_VMOLR_MPME;
3479 } else if (count) {
3480 vmolr |= E1000_VMOLR_ROMPE;
3481 }
3482 }
3483 /*
3484 * Write addresses to available RAR registers, if there is not
3485 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003486 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003487 */
3488 count = igb_write_uc_addr_list(netdev);
3489 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003490 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003491 vmolr |= E1000_VMOLR_ROPE;
3492 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003493 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003494 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003495 wr32(E1000_RCTL, rctl);
3496
Alexander Duyck68d480c2009-10-05 06:33:08 +00003497 /*
3498 * In order to support SR-IOV and eventually VMDq it is necessary to set
3499 * the VMOLR to enable the appropriate modes. Without this workaround
3500 * we will have issues with VLAN tag stripping not being done for frames
3501 * that are only arriving because we are the default pool
3502 */
3503 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003504 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003505
Alexander Duyck68d480c2009-10-05 06:33:08 +00003506 vmolr |= rd32(E1000_VMOLR(vfn)) &
3507 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3508 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003509 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003510}
3511
Greg Rose13800462010-11-06 02:08:26 +00003512static void igb_check_wvbr(struct igb_adapter *adapter)
3513{
3514 struct e1000_hw *hw = &adapter->hw;
3515 u32 wvbr = 0;
3516
3517 switch (hw->mac.type) {
3518 case e1000_82576:
3519 case e1000_i350:
3520 if (!(wvbr = rd32(E1000_WVBR)))
3521 return;
3522 break;
3523 default:
3524 break;
3525 }
3526
3527 adapter->wvbr |= wvbr;
3528}
3529
3530#define IGB_STAGGERED_QUEUE_OFFSET 8
3531
3532static void igb_spoof_check(struct igb_adapter *adapter)
3533{
3534 int j;
3535
3536 if (!adapter->wvbr)
3537 return;
3538
3539 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3540 if (adapter->wvbr & (1 << j) ||
3541 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3542 dev_warn(&adapter->pdev->dev,
3543 "Spoof event(s) detected on VF %d\n", j);
3544 adapter->wvbr &=
3545 ~((1 << j) |
3546 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3547 }
3548 }
3549}
3550
Auke Kok9d5c8242008-01-24 02:22:38 -08003551/* Need to wait a few seconds after link up to get diagnostic information from
3552 * the phy */
3553static void igb_update_phy_info(unsigned long data)
3554{
3555 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003556 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003557}
3558
3559/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003560 * igb_has_link - check shared code for link and determine up/down
3561 * @adapter: pointer to driver private info
3562 **/
Nick Nunley31455352010-02-17 01:01:21 +00003563bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003564{
3565 struct e1000_hw *hw = &adapter->hw;
3566 bool link_active = false;
3567 s32 ret_val = 0;
3568
3569 /* get_link_status is set on LSC (link status) interrupt or
3570 * rx sequence error interrupt. get_link_status will stay
3571 * false until the e1000_check_for_link establishes link
3572 * for copper adapters ONLY
3573 */
3574 switch (hw->phy.media_type) {
3575 case e1000_media_type_copper:
3576 if (hw->mac.get_link_status) {
3577 ret_val = hw->mac.ops.check_for_link(hw);
3578 link_active = !hw->mac.get_link_status;
3579 } else {
3580 link_active = true;
3581 }
3582 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003583 case e1000_media_type_internal_serdes:
3584 ret_val = hw->mac.ops.check_for_link(hw);
3585 link_active = hw->mac.serdes_has_link;
3586 break;
3587 default:
3588 case e1000_media_type_unknown:
3589 break;
3590 }
3591
3592 return link_active;
3593}
3594
Stefan Assmann563988d2011-04-05 04:27:15 +00003595static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3596{
3597 bool ret = false;
3598 u32 ctrl_ext, thstat;
3599
3600 /* check for thermal sensor event on i350, copper only */
3601 if (hw->mac.type == e1000_i350) {
3602 thstat = rd32(E1000_THSTAT);
3603 ctrl_ext = rd32(E1000_CTRL_EXT);
3604
3605 if ((hw->phy.media_type == e1000_media_type_copper) &&
3606 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3607 ret = !!(thstat & event);
3608 }
3609 }
3610
3611 return ret;
3612}
3613
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003614/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003615 * igb_watchdog - Timer Call-back
3616 * @data: pointer to adapter cast into an unsigned long
3617 **/
3618static void igb_watchdog(unsigned long data)
3619{
3620 struct igb_adapter *adapter = (struct igb_adapter *)data;
3621 /* Do the rest outside of interrupt context */
3622 schedule_work(&adapter->watchdog_task);
3623}
3624
3625static void igb_watchdog_task(struct work_struct *work)
3626{
3627 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003628 struct igb_adapter,
3629 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003630 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003631 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003632 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003633 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003634
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003635 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003636 if (link) {
3637 if (!netif_carrier_ok(netdev)) {
3638 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003639 hw->mac.ops.get_speed_and_duplex(hw,
3640 &adapter->link_speed,
3641 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003642
3643 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003644 /* Links status message must follow this format */
3645 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003646 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003647 netdev->name,
3648 adapter->link_speed,
3649 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003650 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003651 ((ctrl & E1000_CTRL_TFCE) &&
3652 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3653 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3654 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003655
Stefan Assmann563988d2011-04-05 04:27:15 +00003656 /* check for thermal sensor event */
3657 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3658 printk(KERN_INFO "igb: %s The network adapter "
3659 "link speed was downshifted "
3660 "because it overheated.\n",
3661 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003662 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003663
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003664 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003665 adapter->tx_timeout_factor = 1;
3666 switch (adapter->link_speed) {
3667 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003668 adapter->tx_timeout_factor = 14;
3669 break;
3670 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003671 /* maybe add some timeout factor ? */
3672 break;
3673 }
3674
3675 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003676
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003677 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003678 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003679
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003680 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003681 if (!test_bit(__IGB_DOWN, &adapter->state))
3682 mod_timer(&adapter->phy_info_timer,
3683 round_jiffies(jiffies + 2 * HZ));
3684 }
3685 } else {
3686 if (netif_carrier_ok(netdev)) {
3687 adapter->link_speed = 0;
3688 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003689
3690 /* check for thermal sensor event */
3691 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3692 printk(KERN_ERR "igb: %s The network adapter "
3693 "was stopped because it "
3694 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003695 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003696 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003697
Alexander Duyck527d47c2008-11-27 00:21:39 -08003698 /* Links status message must follow this format */
3699 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3700 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003701 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003702
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003703 igb_ping_all_vfs(adapter);
3704
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003705 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003706 if (!test_bit(__IGB_DOWN, &adapter->state))
3707 mod_timer(&adapter->phy_info_timer,
3708 round_jiffies(jiffies + 2 * HZ));
3709 }
3710 }
3711
Eric Dumazet12dcd862010-10-15 17:27:10 +00003712 spin_lock(&adapter->stats64_lock);
3713 igb_update_stats(adapter, &adapter->stats64);
3714 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003715
Alexander Duyckdbabb062009-11-12 18:38:16 +00003716 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003717 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003718 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003719 /* We've lost link, so the controller stops DMA,
3720 * but we've got queued Tx work that's never going
3721 * to get done, so reset controller to flush Tx.
3722 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003723 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3724 adapter->tx_timeout_count++;
3725 schedule_work(&adapter->reset_task);
3726 /* return immediately since reset is imminent */
3727 return;
3728 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003729 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003730
Alexander Duyckdbabb062009-11-12 18:38:16 +00003731 /* Force detection of hung controller every watchdog period */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00003732 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyckdbabb062009-11-12 18:38:16 +00003733 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003734
Auke Kok9d5c8242008-01-24 02:22:38 -08003735 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003736 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003737 u32 eics = 0;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00003738 for (i = 0; i < adapter->num_q_vectors; i++)
3739 eics |= adapter->q_vector[i]->eims_value;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003740 wr32(E1000_EICS, eics);
3741 } else {
3742 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3743 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003744
Greg Rose13800462010-11-06 02:08:26 +00003745 igb_spoof_check(adapter);
3746
Auke Kok9d5c8242008-01-24 02:22:38 -08003747 /* Reset the timer */
3748 if (!test_bit(__IGB_DOWN, &adapter->state))
3749 mod_timer(&adapter->watchdog_timer,
3750 round_jiffies(jiffies + 2 * HZ));
3751}
3752
3753enum latency_range {
3754 lowest_latency = 0,
3755 low_latency = 1,
3756 bulk_latency = 2,
3757 latency_invalid = 255
3758};
3759
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003760/**
3761 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3762 *
3763 * Stores a new ITR value based on strictly on packet size. This
3764 * algorithm is less sophisticated than that used in igb_update_itr,
3765 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003766 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003767 * were determined based on theoretical maximum wire speed and testing
3768 * data, in order to minimize response time while increasing bulk
3769 * throughput.
3770 * This functionality is controlled by the InterruptThrottleRate module
3771 * parameter (see igb_param.c)
3772 * NOTE: This function is called only when operating in a multiqueue
3773 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003774 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003775 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003776static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003777{
Alexander Duyck047e0032009-10-27 15:49:27 +00003778 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003779 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003780 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003781 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003782
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003783 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3784 * ints/sec - ITR timer value of 120 ticks.
3785 */
3786 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003787 new_val = IGB_4K_ITR;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003788 goto set_itr_val;
3789 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003790
Alexander Duyck0ba82992011-08-26 07:45:47 +00003791 packets = q_vector->rx.total_packets;
3792 if (packets)
3793 avg_wire_size = q_vector->rx.total_bytes / packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003794
Alexander Duyck0ba82992011-08-26 07:45:47 +00003795 packets = q_vector->tx.total_packets;
3796 if (packets)
3797 avg_wire_size = max_t(u32, avg_wire_size,
3798 q_vector->tx.total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003799
3800 /* if avg_wire_size isn't set no work was done */
3801 if (!avg_wire_size)
3802 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003803
3804 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3805 avg_wire_size += 24;
3806
3807 /* Don't starve jumbo frames */
3808 avg_wire_size = min(avg_wire_size, 3000);
3809
3810 /* Give a little boost to mid-size frames */
3811 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3812 new_val = avg_wire_size / 3;
3813 else
3814 new_val = avg_wire_size / 2;
3815
Alexander Duyck0ba82992011-08-26 07:45:47 +00003816 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3817 if (new_val < IGB_20K_ITR &&
3818 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3819 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3820 new_val = IGB_20K_ITR;
Nick Nunleyabe1c362010-02-17 01:03:19 +00003821
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003822set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003823 if (new_val != q_vector->itr_val) {
3824 q_vector->itr_val = new_val;
3825 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003826 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003827clear_counts:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003828 q_vector->rx.total_bytes = 0;
3829 q_vector->rx.total_packets = 0;
3830 q_vector->tx.total_bytes = 0;
3831 q_vector->tx.total_packets = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003832}
3833
3834/**
3835 * igb_update_itr - update the dynamic ITR value based on statistics
3836 * Stores a new ITR value based on packets and byte
3837 * counts during the last interrupt. The advantage of per interrupt
3838 * computation is faster updates and more accurate ITR for the current
3839 * traffic pattern. Constants in this function were computed
3840 * based on theoretical maximum wire speed and thresholds were set based
3841 * on testing data as well as attempting to minimize response time
3842 * while increasing bulk throughput.
3843 * this functionality is controlled by the InterruptThrottleRate module
3844 * parameter (see igb_param.c)
3845 * NOTE: These calculations are only valid when operating in a single-
3846 * queue environment.
Alexander Duyck0ba82992011-08-26 07:45:47 +00003847 * @q_vector: pointer to q_vector
3848 * @ring_container: ring info to update the itr for
Auke Kok9d5c8242008-01-24 02:22:38 -08003849 **/
Alexander Duyck0ba82992011-08-26 07:45:47 +00003850static void igb_update_itr(struct igb_q_vector *q_vector,
3851 struct igb_ring_container *ring_container)
Auke Kok9d5c8242008-01-24 02:22:38 -08003852{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003853 unsigned int packets = ring_container->total_packets;
3854 unsigned int bytes = ring_container->total_bytes;
3855 u8 itrval = ring_container->itr;
Auke Kok9d5c8242008-01-24 02:22:38 -08003856
Alexander Duyck0ba82992011-08-26 07:45:47 +00003857 /* no packets, exit with status unchanged */
Auke Kok9d5c8242008-01-24 02:22:38 -08003858 if (packets == 0)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003859 return;
Auke Kok9d5c8242008-01-24 02:22:38 -08003860
Alexander Duyck0ba82992011-08-26 07:45:47 +00003861 switch (itrval) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003862 case lowest_latency:
3863 /* handle TSO and jumbo frames */
3864 if (bytes/packets > 8000)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003865 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003866 else if ((packets < 5) && (bytes > 512))
Alexander Duyck0ba82992011-08-26 07:45:47 +00003867 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003868 break;
3869 case low_latency: /* 50 usec aka 20000 ints/s */
3870 if (bytes > 10000) {
3871 /* this if handles the TSO accounting */
3872 if (bytes/packets > 8000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003873 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003874 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003875 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003876 } else if ((packets > 35)) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003877 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003878 }
3879 } else if (bytes/packets > 2000) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003880 itrval = bulk_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003881 } else if (packets <= 2 && bytes < 512) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003882 itrval = lowest_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003883 }
3884 break;
3885 case bulk_latency: /* 250 usec aka 4000 ints/s */
3886 if (bytes > 25000) {
3887 if (packets > 35)
Alexander Duyck0ba82992011-08-26 07:45:47 +00003888 itrval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003889 } else if (bytes < 1500) {
Alexander Duyck0ba82992011-08-26 07:45:47 +00003890 itrval = low_latency;
Auke Kok9d5c8242008-01-24 02:22:38 -08003891 }
3892 break;
3893 }
3894
Alexander Duyck0ba82992011-08-26 07:45:47 +00003895 /* clear work counters since we have the values we need */
3896 ring_container->total_bytes = 0;
3897 ring_container->total_packets = 0;
3898
3899 /* write updated itr to ring container */
3900 ring_container->itr = itrval;
Auke Kok9d5c8242008-01-24 02:22:38 -08003901}
3902
Alexander Duyck0ba82992011-08-26 07:45:47 +00003903static void igb_set_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003904{
Alexander Duyck0ba82992011-08-26 07:45:47 +00003905 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00003906 u32 new_itr = q_vector->itr_val;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003907 u8 current_itr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003908
3909 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3910 if (adapter->link_speed != SPEED_1000) {
3911 current_itr = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00003912 new_itr = IGB_4K_ITR;
Auke Kok9d5c8242008-01-24 02:22:38 -08003913 goto set_itr_now;
3914 }
3915
Alexander Duyck0ba82992011-08-26 07:45:47 +00003916 igb_update_itr(q_vector, &q_vector->tx);
3917 igb_update_itr(q_vector, &q_vector->rx);
Auke Kok9d5c8242008-01-24 02:22:38 -08003918
Alexander Duyck0ba82992011-08-26 07:45:47 +00003919 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003920
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003921 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck0ba82992011-08-26 07:45:47 +00003922 if (current_itr == lowest_latency &&
3923 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3924 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003925 current_itr = low_latency;
3926
Auke Kok9d5c8242008-01-24 02:22:38 -08003927 switch (current_itr) {
3928 /* counts and packets in update_itr are dependent on these numbers */
3929 case lowest_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003930 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003931 break;
3932 case low_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003933 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003934 break;
3935 case bulk_latency:
Alexander Duyck0ba82992011-08-26 07:45:47 +00003936 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003937 break;
3938 default:
3939 break;
3940 }
3941
3942set_itr_now:
Alexander Duyck047e0032009-10-27 15:49:27 +00003943 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003944 /* this attempts to bias the interrupt rate towards Bulk
3945 * by adding intermediate steps when interrupt rate is
3946 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003947 new_itr = new_itr > q_vector->itr_val ?
3948 max((new_itr * q_vector->itr_val) /
3949 (new_itr + (q_vector->itr_val >> 2)),
Alexander Duyck0ba82992011-08-26 07:45:47 +00003950 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003951 new_itr;
3952 /* Don't write the value here; it resets the adapter's
3953 * internal timer, and causes us to delay far longer than
3954 * we should between interrupts. Instead, we write the ITR
3955 * value at the beginning of the next interrupt so the timing
3956 * ends up being correct.
3957 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003958 q_vector->itr_val = new_itr;
3959 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003960 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003961}
3962
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003963void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3964 u32 type_tucmd, u32 mss_l4len_idx)
3965{
3966 struct e1000_adv_tx_context_desc *context_desc;
3967 u16 i = tx_ring->next_to_use;
3968
3969 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3970
3971 i++;
3972 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3973
3974 /* set bits to identify this as an advanced context descriptor */
3975 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3976
3977 /* For 82575, context index must be unique per ring. */
Alexander Duyck866cff02011-08-26 07:45:36 +00003978 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003979 mss_l4len_idx |= tx_ring->reg_idx << 4;
3980
3981 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3982 context_desc->seqnum_seed = 0;
3983 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3984 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3985}
3986
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003987static int igb_tso(struct igb_ring *tx_ring,
3988 struct igb_tx_buffer *first,
3989 u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003990{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003991 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00003992 u32 vlan_macip_lens, type_tucmd;
3993 u32 mss_l4len_idx, l4len;
3994
3995 if (!skb_is_gso(skb))
3996 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003997
3998 if (skb_header_cloned(skb)) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00003999 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004000 if (err)
4001 return err;
4002 }
4003
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004004 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4005 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
Auke Kok9d5c8242008-01-24 02:22:38 -08004006
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004007 if (first->protocol == __constant_htons(ETH_P_IP)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004008 struct iphdr *iph = ip_hdr(skb);
4009 iph->tot_len = 0;
4010 iph->check = 0;
4011 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4012 iph->daddr, 0,
4013 IPPROTO_TCP,
4014 0);
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004015 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004016 first->tx_flags |= IGB_TX_FLAGS_TSO |
4017 IGB_TX_FLAGS_CSUM |
4018 IGB_TX_FLAGS_IPV4;
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08004019 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004020 ipv6_hdr(skb)->payload_len = 0;
4021 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4022 &ipv6_hdr(skb)->daddr,
4023 0, IPPROTO_TCP, 0);
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004024 first->tx_flags |= IGB_TX_FLAGS_TSO |
4025 IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004026 }
4027
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004028 /* compute header lengths */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004029 l4len = tcp_hdrlen(skb);
4030 *hdr_len = skb_transport_offset(skb) + l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08004031
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004032 /* update gso size and bytecount with header size */
4033 first->gso_segs = skb_shinfo(skb)->gso_segs;
4034 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4035
Auke Kok9d5c8242008-01-24 02:22:38 -08004036 /* MSS L4LEN IDX */
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004037 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4038 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
Auke Kok9d5c8242008-01-24 02:22:38 -08004039
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004040 /* VLAN MACLEN IPLEN */
4041 vlan_macip_lens = skb_network_header_len(skb);
4042 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004043 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004044
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004045 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004046
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004047 return 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004048}
4049
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004050static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004051{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004052 struct sk_buff *skb = first->skb;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004053 u32 vlan_macip_lens = 0;
4054 u32 mss_l4len_idx = 0;
4055 u32 type_tucmd = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004056
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004057 if (skb->ip_summed != CHECKSUM_PARTIAL) {
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004058 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4059 return;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004060 } else {
4061 u8 l4_hdr = 0;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004062 switch (first->protocol) {
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004063 case __constant_htons(ETH_P_IP):
4064 vlan_macip_lens |= skb_network_header_len(skb);
4065 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4066 l4_hdr = ip_hdr(skb)->protocol;
4067 break;
4068 case __constant_htons(ETH_P_IPV6):
4069 vlan_macip_lens |= skb_network_header_len(skb);
4070 l4_hdr = ipv6_hdr(skb)->nexthdr;
4071 break;
4072 default:
4073 if (unlikely(net_ratelimit())) {
4074 dev_warn(tx_ring->dev,
4075 "partial checksum but proto=%x!\n",
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004076 first->protocol);
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004077 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004078 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08004079 }
4080
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004081 switch (l4_hdr) {
4082 case IPPROTO_TCP:
4083 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4084 mss_l4len_idx = tcp_hdrlen(skb) <<
4085 E1000_ADVTXD_L4LEN_SHIFT;
4086 break;
4087 case IPPROTO_SCTP:
4088 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4089 mss_l4len_idx = sizeof(struct sctphdr) <<
4090 E1000_ADVTXD_L4LEN_SHIFT;
4091 break;
4092 case IPPROTO_UDP:
4093 mss_l4len_idx = sizeof(struct udphdr) <<
4094 E1000_ADVTXD_L4LEN_SHIFT;
4095 break;
4096 default:
4097 if (unlikely(net_ratelimit())) {
4098 dev_warn(tx_ring->dev,
4099 "partial checksum but l4 proto=%x!\n",
4100 l4_hdr);
4101 }
4102 break;
4103 }
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004104
4105 /* update TX checksum flag */
4106 first->tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004107 }
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004108
4109 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004110 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004111
4112 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
Auke Kok9d5c8242008-01-24 02:22:38 -08004113}
4114
Alexander Duycke032afc2011-08-26 07:44:48 +00004115static __le32 igb_tx_cmd_type(u32 tx_flags)
4116{
4117 /* set type for advanced descriptor with frame checksum insertion */
4118 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4119 E1000_ADVTXD_DCMD_IFCS |
4120 E1000_ADVTXD_DCMD_DEXT);
4121
4122 /* set HW vlan bit if vlan is present */
4123 if (tx_flags & IGB_TX_FLAGS_VLAN)
4124 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4125
4126 /* set timestamp bit if present */
4127 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4128 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4129
4130 /* set segmentation bits for TSO */
4131 if (tx_flags & IGB_TX_FLAGS_TSO)
4132 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4133
4134 return cmd_type;
4135}
4136
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004137static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4138 union e1000_adv_tx_desc *tx_desc,
4139 u32 tx_flags, unsigned int paylen)
Alexander Duycke032afc2011-08-26 07:44:48 +00004140{
4141 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4142
4143 /* 82575 requires a unique index per ring if any offload is enabled */
4144 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
Alexander Duyck866cff02011-08-26 07:45:36 +00004145 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
Alexander Duycke032afc2011-08-26 07:44:48 +00004146 olinfo_status |= tx_ring->reg_idx << 4;
4147
4148 /* insert L4 checksum */
4149 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4150 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4151
4152 /* insert IPv4 checksum */
4153 if (tx_flags & IGB_TX_FLAGS_IPV4)
4154 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4155 }
4156
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004157 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duycke032afc2011-08-26 07:44:48 +00004158}
4159
Alexander Duyckebe42d12011-08-26 07:45:09 +00004160/*
4161 * The largest size we can write to the descriptor is 65535. In order to
4162 * maintain a power of two alignment we have to limit ourselves to 32K.
4163 */
4164#define IGB_MAX_TXD_PWR 15
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004165#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
Auke Kok9d5c8242008-01-24 02:22:38 -08004166
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004167static void igb_tx_map(struct igb_ring *tx_ring,
4168 struct igb_tx_buffer *first,
Alexander Duyckebe42d12011-08-26 07:45:09 +00004169 const u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004170{
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004171 struct sk_buff *skb = first->skb;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004172 struct igb_tx_buffer *tx_buffer_info;
4173 union e1000_adv_tx_desc *tx_desc;
4174 dma_addr_t dma;
4175 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4176 unsigned int data_len = skb->data_len;
4177 unsigned int size = skb_headlen(skb);
4178 unsigned int paylen = skb->len - hdr_len;
4179 __le32 cmd_type;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004180 u32 tx_flags = first->tx_flags;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004181 u16 i = tx_ring->next_to_use;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004182
4183 tx_desc = IGB_TX_DESC(tx_ring, i);
4184
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004185 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004186 cmd_type = igb_tx_cmd_type(tx_flags);
4187
4188 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4189 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004190 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004191
Alexander Duyckebe42d12011-08-26 07:45:09 +00004192 /* record length, and DMA address */
4193 first->length = size;
4194 first->dma = dma;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004195 tx_desc->read.buffer_addr = cpu_to_le64(dma);
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00004196
Alexander Duyckebe42d12011-08-26 07:45:09 +00004197 for (;;) {
4198 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4199 tx_desc->read.cmd_type_len =
4200 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004201
Alexander Duyckebe42d12011-08-26 07:45:09 +00004202 i++;
4203 tx_desc++;
4204 if (i == tx_ring->count) {
4205 tx_desc = IGB_TX_DESC(tx_ring, 0);
4206 i = 0;
4207 }
4208
4209 dma += IGB_MAX_DATA_PER_TXD;
4210 size -= IGB_MAX_DATA_PER_TXD;
4211
4212 tx_desc->read.olinfo_status = 0;
4213 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4214 }
4215
4216 if (likely(!data_len))
4217 break;
4218
4219 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4220
Alexander Duyck65689fe2009-03-20 00:17:43 +00004221 i++;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004222 tx_desc++;
4223 if (i == tx_ring->count) {
4224 tx_desc = IGB_TX_DESC(tx_ring, 0);
Alexander Duyck65689fe2009-03-20 00:17:43 +00004225 i = 0;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004226 }
Alexander Duyck65689fe2009-03-20 00:17:43 +00004227
Eric Dumazet9e903e02011-10-18 21:00:24 +00004228 size = skb_frag_size(frag);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004229 data_len -= size;
4230
4231 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4232 size, DMA_TO_DEVICE);
4233 if (dma_mapping_error(tx_ring->dev, dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004234 goto dma_error;
4235
Alexander Duyckebe42d12011-08-26 07:45:09 +00004236 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4237 tx_buffer_info->length = size;
4238 tx_buffer_info->dma = dma;
4239
4240 tx_desc->read.olinfo_status = 0;
4241 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4242
4243 frag++;
Auke Kok9d5c8242008-01-24 02:22:38 -08004244 }
4245
Alexander Duyckebe42d12011-08-26 07:45:09 +00004246 /* write last descriptor with RS and EOP bits */
4247 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4248 tx_desc->read.cmd_type_len = cmd_type;
Alexander Duyck8542db02011-08-26 07:44:43 +00004249
4250 /* set the timestamp */
4251 first->time_stamp = jiffies;
4252
Alexander Duyckebe42d12011-08-26 07:45:09 +00004253 /*
4254 * Force memory writes to complete before letting h/w know there
4255 * are new descriptors to fetch. (Only applicable for weak-ordered
4256 * memory model archs, such as IA-64).
4257 *
4258 * We also need this memory barrier to make certain all of the
4259 * status bits have been updated before next_to_watch is written.
4260 */
Auke Kok9d5c8242008-01-24 02:22:38 -08004261 wmb();
4262
Alexander Duyckebe42d12011-08-26 07:45:09 +00004263 /* set next_to_watch value indicating a packet is present */
4264 first->next_to_watch = tx_desc;
4265
4266 i++;
4267 if (i == tx_ring->count)
4268 i = 0;
4269
Auke Kok9d5c8242008-01-24 02:22:38 -08004270 tx_ring->next_to_use = i;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004271
Alexander Duyckfce99e32009-10-27 15:51:27 +00004272 writel(i, tx_ring->tail);
Alexander Duyckebe42d12011-08-26 07:45:09 +00004273
Auke Kok9d5c8242008-01-24 02:22:38 -08004274 /* we need this if more than one processor can write to our tail
4275 * at a time, it syncronizes IO on IA64/Altix systems */
4276 mmiowb();
Alexander Duyckebe42d12011-08-26 07:45:09 +00004277
4278 return;
4279
4280dma_error:
4281 dev_err(tx_ring->dev, "TX DMA map failed\n");
4282
4283 /* clear dma mappings for failed tx_buffer_info map */
4284 for (;;) {
4285 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4286 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4287 if (tx_buffer_info == first)
4288 break;
4289 if (i == 0)
4290 i = tx_ring->count;
4291 i--;
4292 }
4293
4294 tx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004295}
4296
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004297static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004298{
Alexander Duycke694e962009-10-27 15:53:06 +00004299 struct net_device *netdev = tx_ring->netdev;
4300
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004301 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004302
Auke Kok9d5c8242008-01-24 02:22:38 -08004303 /* Herbert's original patch had:
4304 * smp_mb__after_netif_stop_queue();
4305 * but since that doesn't exist yet, just open code it. */
4306 smp_mb();
4307
4308 /* We need to check again in a case another CPU has just
4309 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004310 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004311 return -EBUSY;
4312
4313 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004314 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004315
4316 u64_stats_update_begin(&tx_ring->tx_syncp2);
4317 tx_ring->tx_stats.restart_queue2++;
4318 u64_stats_update_end(&tx_ring->tx_syncp2);
4319
Auke Kok9d5c8242008-01-24 02:22:38 -08004320 return 0;
4321}
4322
Alexander Duyck6ad4edf2011-08-26 07:45:26 +00004323static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004324{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004325 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004326 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004327 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004328}
4329
Alexander Duyckcd392f52011-08-26 07:43:59 +00004330netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4331 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004332{
Alexander Duyck8542db02011-08-26 07:44:43 +00004333 struct igb_tx_buffer *first;
Alexander Duyckebe42d12011-08-26 07:45:09 +00004334 int tso;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004335 u32 tx_flags = 0;
Alexander Duyck31f6adb2011-08-26 07:44:53 +00004336 __be16 protocol = vlan_get_protocol(skb);
Nick Nunley91d4ee32010-02-17 01:04:56 +00004337 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004338
Auke Kok9d5c8242008-01-24 02:22:38 -08004339 /* need: 1 descriptor per page,
4340 * + 2 desc gap to keep tail from touching head,
4341 * + 1 desc for skb->data,
4342 * + 1 desc for context descriptor,
4343 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004344 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004345 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004346 return NETDEV_TX_BUSY;
4347 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004348
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004349 /* record the location of the first descriptor for this packet */
4350 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4351 first->skb = skb;
4352 first->bytecount = skb->len;
4353 first->gso_segs = 1;
4354
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004355 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4356 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004357 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004358 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004359
Jesse Grosseab6d182010-10-20 13:56:03 +00004360 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004361 tx_flags |= IGB_TX_FLAGS_VLAN;
4362 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4363 }
4364
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004365 /* record initial flags and protocol */
4366 first->tx_flags = tx_flags;
4367 first->protocol = protocol;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004368
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004369 tso = igb_tso(tx_ring, first, &hdr_len);
4370 if (tso < 0)
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004371 goto out_drop;
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004372 else if (!tso)
4373 igb_tx_csum(tx_ring, first);
Auke Kok9d5c8242008-01-24 02:22:38 -08004374
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004375 igb_tx_map(tx_ring, first, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004376
4377 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004378 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004379
Auke Kok9d5c8242008-01-24 02:22:38 -08004380 return NETDEV_TX_OK;
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004381
4382out_drop:
Alexander Duyck7af40ad92011-08-26 07:45:15 +00004383 igb_unmap_and_free_tx_resource(tx_ring, first);
4384
Alexander Duyck7d13a7d2011-08-26 07:44:32 +00004385 return NETDEV_TX_OK;
Auke Kok9d5c8242008-01-24 02:22:38 -08004386}
4387
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004388static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4389 struct sk_buff *skb)
4390{
4391 unsigned int r_idx = skb->queue_mapping;
4392
4393 if (r_idx >= adapter->num_tx_queues)
4394 r_idx = r_idx % adapter->num_tx_queues;
4395
4396 return adapter->tx_ring[r_idx];
4397}
4398
Alexander Duyckcd392f52011-08-26 07:43:59 +00004399static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4400 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004401{
4402 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004403
4404 if (test_bit(__IGB_DOWN, &adapter->state)) {
4405 dev_kfree_skb_any(skb);
4406 return NETDEV_TX_OK;
4407 }
4408
4409 if (skb->len <= 0) {
4410 dev_kfree_skb_any(skb);
4411 return NETDEV_TX_OK;
4412 }
4413
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004414 /*
4415 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4416 * in order to meet this minimum size requirement.
4417 */
4418 if (skb->len < 17) {
4419 if (skb_padto(skb, 17))
4420 return NETDEV_TX_OK;
4421 skb->len = 17;
4422 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004423
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004424 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004425}
4426
4427/**
4428 * igb_tx_timeout - Respond to a Tx Hang
4429 * @netdev: network interface device structure
4430 **/
4431static void igb_tx_timeout(struct net_device *netdev)
4432{
4433 struct igb_adapter *adapter = netdev_priv(netdev);
4434 struct e1000_hw *hw = &adapter->hw;
4435
4436 /* Do the reset outside of interrupt context */
4437 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004438
Alexander Duyck06218a82011-08-26 07:46:55 +00004439 if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00004440 hw->dev_spec._82575.global_device_reset = true;
4441
Auke Kok9d5c8242008-01-24 02:22:38 -08004442 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004443 wr32(E1000_EICS,
4444 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004445}
4446
4447static void igb_reset_task(struct work_struct *work)
4448{
4449 struct igb_adapter *adapter;
4450 adapter = container_of(work, struct igb_adapter, reset_task);
4451
Taku Izumic97ec422010-04-27 14:39:30 +00004452 igb_dump(adapter);
4453 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004454 igb_reinit_locked(adapter);
4455}
4456
4457/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004458 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004459 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004460 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004461 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004462 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004463static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4464 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004465{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004466 struct igb_adapter *adapter = netdev_priv(netdev);
4467
4468 spin_lock(&adapter->stats64_lock);
4469 igb_update_stats(adapter, &adapter->stats64);
4470 memcpy(stats, &adapter->stats64, sizeof(*stats));
4471 spin_unlock(&adapter->stats64_lock);
4472
4473 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004474}
4475
4476/**
4477 * igb_change_mtu - Change the Maximum Transfer Unit
4478 * @netdev: network interface device structure
4479 * @new_mtu: new value for maximum frame size
4480 *
4481 * Returns 0 on success, negative on failure
4482 **/
4483static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4484{
4485 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004486 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004487 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004488
Alexander Duyckc809d222009-10-27 23:52:13 +00004489 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004490 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004491 return -EINVAL;
4492 }
4493
Alexander Duyck153285f2011-08-26 07:43:32 +00004494#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004495 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004496 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004497 return -EINVAL;
4498 }
4499
4500 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4501 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004502
Auke Kok9d5c8242008-01-24 02:22:38 -08004503 /* igb_down has a dependency on max_frame_size */
4504 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004505
Alexander Duyck4c844852009-10-27 15:52:07 +00004506 if (netif_running(netdev))
4507 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004508
Alexander Duyck090b1792009-10-27 23:51:55 +00004509 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004510 netdev->mtu, new_mtu);
4511 netdev->mtu = new_mtu;
4512
4513 if (netif_running(netdev))
4514 igb_up(adapter);
4515 else
4516 igb_reset(adapter);
4517
4518 clear_bit(__IGB_RESETTING, &adapter->state);
4519
4520 return 0;
4521}
4522
4523/**
4524 * igb_update_stats - Update the board statistics counters
4525 * @adapter: board private structure
4526 **/
4527
Eric Dumazet12dcd862010-10-15 17:27:10 +00004528void igb_update_stats(struct igb_adapter *adapter,
4529 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004530{
4531 struct e1000_hw *hw = &adapter->hw;
4532 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004533 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004534 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004535 int i;
4536 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004537 unsigned int start;
4538 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004539
4540#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4541
4542 /*
4543 * Prevent stats update while adapter is being reset, or if the pci
4544 * connection is down.
4545 */
4546 if (adapter->link_speed == 0)
4547 return;
4548 if (pci_channel_offline(pdev))
4549 return;
4550
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004551 bytes = 0;
4552 packets = 0;
4553 for (i = 0; i < adapter->num_rx_queues; i++) {
4554 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004555 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004556
Alexander Duyck3025a442010-02-17 01:02:39 +00004557 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004558 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004559
4560 do {
4561 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4562 _bytes = ring->rx_stats.bytes;
4563 _packets = ring->rx_stats.packets;
4564 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4565 bytes += _bytes;
4566 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004567 }
4568
Alexander Duyck128e45e2009-11-12 18:37:38 +00004569 net_stats->rx_bytes = bytes;
4570 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004571
4572 bytes = 0;
4573 packets = 0;
4574 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004575 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004576 do {
4577 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4578 _bytes = ring->tx_stats.bytes;
4579 _packets = ring->tx_stats.packets;
4580 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4581 bytes += _bytes;
4582 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004583 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004584 net_stats->tx_bytes = bytes;
4585 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004586
4587 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004588 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4589 adapter->stats.gprc += rd32(E1000_GPRC);
4590 adapter->stats.gorc += rd32(E1000_GORCL);
4591 rd32(E1000_GORCH); /* clear GORCL */
4592 adapter->stats.bprc += rd32(E1000_BPRC);
4593 adapter->stats.mprc += rd32(E1000_MPRC);
4594 adapter->stats.roc += rd32(E1000_ROC);
4595
4596 adapter->stats.prc64 += rd32(E1000_PRC64);
4597 adapter->stats.prc127 += rd32(E1000_PRC127);
4598 adapter->stats.prc255 += rd32(E1000_PRC255);
4599 adapter->stats.prc511 += rd32(E1000_PRC511);
4600 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4601 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4602 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4603 adapter->stats.sec += rd32(E1000_SEC);
4604
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004605 mpc = rd32(E1000_MPC);
4606 adapter->stats.mpc += mpc;
4607 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004608 adapter->stats.scc += rd32(E1000_SCC);
4609 adapter->stats.ecol += rd32(E1000_ECOL);
4610 adapter->stats.mcc += rd32(E1000_MCC);
4611 adapter->stats.latecol += rd32(E1000_LATECOL);
4612 adapter->stats.dc += rd32(E1000_DC);
4613 adapter->stats.rlec += rd32(E1000_RLEC);
4614 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4615 adapter->stats.xontxc += rd32(E1000_XONTXC);
4616 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4617 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4618 adapter->stats.fcruc += rd32(E1000_FCRUC);
4619 adapter->stats.gptc += rd32(E1000_GPTC);
4620 adapter->stats.gotc += rd32(E1000_GOTCL);
4621 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004622 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004623 adapter->stats.ruc += rd32(E1000_RUC);
4624 adapter->stats.rfc += rd32(E1000_RFC);
4625 adapter->stats.rjc += rd32(E1000_RJC);
4626 adapter->stats.tor += rd32(E1000_TORH);
4627 adapter->stats.tot += rd32(E1000_TOTH);
4628 adapter->stats.tpr += rd32(E1000_TPR);
4629
4630 adapter->stats.ptc64 += rd32(E1000_PTC64);
4631 adapter->stats.ptc127 += rd32(E1000_PTC127);
4632 adapter->stats.ptc255 += rd32(E1000_PTC255);
4633 adapter->stats.ptc511 += rd32(E1000_PTC511);
4634 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4635 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4636
4637 adapter->stats.mptc += rd32(E1000_MPTC);
4638 adapter->stats.bptc += rd32(E1000_BPTC);
4639
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004640 adapter->stats.tpt += rd32(E1000_TPT);
4641 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004642
4643 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004644 /* read internal phy specific stats */
4645 reg = rd32(E1000_CTRL_EXT);
4646 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4647 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4648 adapter->stats.tncrs += rd32(E1000_TNCRS);
4649 }
4650
Auke Kok9d5c8242008-01-24 02:22:38 -08004651 adapter->stats.tsctc += rd32(E1000_TSCTC);
4652 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4653
4654 adapter->stats.iac += rd32(E1000_IAC);
4655 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4656 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4657 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4658 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4659 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4660 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4661 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4662 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4663
4664 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004665 net_stats->multicast = adapter->stats.mprc;
4666 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004667
4668 /* Rx Errors */
4669
4670 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004671 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004672 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004673 adapter->stats.crcerrs + adapter->stats.algnerrc +
4674 adapter->stats.ruc + adapter->stats.roc +
4675 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004676 net_stats->rx_length_errors = adapter->stats.ruc +
4677 adapter->stats.roc;
4678 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4679 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4680 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004681
4682 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004683 net_stats->tx_errors = adapter->stats.ecol +
4684 adapter->stats.latecol;
4685 net_stats->tx_aborted_errors = adapter->stats.ecol;
4686 net_stats->tx_window_errors = adapter->stats.latecol;
4687 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004688
4689 /* Tx Dropped needs to be maintained elsewhere */
4690
4691 /* Phy Stats */
4692 if (hw->phy.media_type == e1000_media_type_copper) {
4693 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004694 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004695 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4696 adapter->phy_stats.idle_errors += phy_tmp;
4697 }
4698 }
4699
4700 /* Management Stats */
4701 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4702 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4703 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004704
4705 /* OS2BMC Stats */
4706 reg = rd32(E1000_MANC);
4707 if (reg & E1000_MANC_EN_BMC2OS) {
4708 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4709 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4710 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4711 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4712 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004713}
4714
Auke Kok9d5c8242008-01-24 02:22:38 -08004715static irqreturn_t igb_msix_other(int irq, void *data)
4716{
Alexander Duyck047e0032009-10-27 15:49:27 +00004717 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004718 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004719 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004720 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004721
Alexander Duyck7f081d42010-01-07 17:41:00 +00004722 if (icr & E1000_ICR_DRSTA)
4723 schedule_work(&adapter->reset_task);
4724
Alexander Duyck047e0032009-10-27 15:49:27 +00004725 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004726 /* HW is reporting DMA is out of sync */
4727 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004728 /* The DMA Out of Sync is also indication of a spoof event
4729 * in IOV mode. Check the Wrong VM Behavior register to
4730 * see if it is really a spoof event. */
4731 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004732 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004733
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004734 /* Check for a mailbox event */
4735 if (icr & E1000_ICR_VMMB)
4736 igb_msg_task(adapter);
4737
4738 if (icr & E1000_ICR_LSC) {
4739 hw->mac.get_link_status = 1;
4740 /* guard against interrupt when we're going down */
4741 if (!test_bit(__IGB_DOWN, &adapter->state))
4742 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4743 }
4744
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004745 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004746
4747 return IRQ_HANDLED;
4748}
4749
Alexander Duyck047e0032009-10-27 15:49:27 +00004750static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004751{
Alexander Duyck26b39272010-02-17 01:00:41 +00004752 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004753 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004754
Alexander Duyck047e0032009-10-27 15:49:27 +00004755 if (!q_vector->set_itr)
4756 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004757
Alexander Duyck047e0032009-10-27 15:49:27 +00004758 if (!itr_val)
4759 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004760
Alexander Duyck26b39272010-02-17 01:00:41 +00004761 if (adapter->hw.mac.type == e1000_82575)
4762 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004763 else
Alexander Duyck0ba82992011-08-26 07:45:47 +00004764 itr_val |= E1000_EITR_CNT_IGNR;
Alexander Duyck047e0032009-10-27 15:49:27 +00004765
4766 writel(itr_val, q_vector->itr_register);
4767 q_vector->set_itr = 0;
4768}
4769
4770static irqreturn_t igb_msix_ring(int irq, void *data)
4771{
4772 struct igb_q_vector *q_vector = data;
4773
4774 /* Write the ITR value calculated from the previous interrupt. */
4775 igb_write_itr(q_vector);
4776
4777 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004778
Auke Kok9d5c8242008-01-24 02:22:38 -08004779 return IRQ_HANDLED;
4780}
4781
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004782#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004783static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004784{
Alexander Duyck047e0032009-10-27 15:49:27 +00004785 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004786 struct e1000_hw *hw = &adapter->hw;
4787 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004788
Alexander Duyck047e0032009-10-27 15:49:27 +00004789 if (q_vector->cpu == cpu)
4790 goto out_no_update;
4791
Alexander Duyck0ba82992011-08-26 07:45:47 +00004792 if (q_vector->tx.ring) {
4793 int q = q_vector->tx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004794 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4795 if (hw->mac.type == e1000_82575) {
4796 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4797 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4798 } else {
4799 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4800 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4801 E1000_DCA_TXCTRL_CPUID_SHIFT;
4802 }
4803 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4804 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4805 }
Alexander Duyck0ba82992011-08-26 07:45:47 +00004806 if (q_vector->rx.ring) {
4807 int q = q_vector->rx.ring->reg_idx;
Alexander Duyck047e0032009-10-27 15:49:27 +00004808 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4809 if (hw->mac.type == e1000_82575) {
4810 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4811 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4812 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004813 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004814 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004815 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004816 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004817 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4818 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4819 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4820 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004821 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004822 q_vector->cpu = cpu;
4823out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004824 put_cpu();
4825}
4826
4827static void igb_setup_dca(struct igb_adapter *adapter)
4828{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004829 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004830 int i;
4831
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004832 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004833 return;
4834
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004835 /* Always use CB2 mode, difference is masked in the CB driver. */
4836 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4837
Alexander Duyck047e0032009-10-27 15:49:27 +00004838 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004839 adapter->q_vector[i]->cpu = -1;
4840 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004841 }
4842}
4843
4844static int __igb_notify_dca(struct device *dev, void *data)
4845{
4846 struct net_device *netdev = dev_get_drvdata(dev);
4847 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004848 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004849 struct e1000_hw *hw = &adapter->hw;
4850 unsigned long event = *(unsigned long *)data;
4851
4852 switch (event) {
4853 case DCA_PROVIDER_ADD:
4854 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004855 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004856 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004857 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004858 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004859 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004860 igb_setup_dca(adapter);
4861 break;
4862 }
4863 /* Fall Through since DCA is disabled. */
4864 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004865 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004866 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004867 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004868 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004869 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004870 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004871 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004872 }
4873 break;
4874 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004875
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004876 return 0;
4877}
4878
4879static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4880 void *p)
4881{
4882 int ret_val;
4883
4884 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4885 __igb_notify_dca);
4886
4887 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4888}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004889#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004890
Greg Rose0224d662011-10-14 02:57:14 +00004891#ifdef CONFIG_PCI_IOV
4892static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4893{
4894 unsigned char mac_addr[ETH_ALEN];
4895 struct pci_dev *pdev = adapter->pdev;
4896 struct e1000_hw *hw = &adapter->hw;
4897 struct pci_dev *pvfdev;
4898 unsigned int device_id;
4899 u16 thisvf_devfn;
4900
4901 random_ether_addr(mac_addr);
4902 igb_set_vf_mac(adapter, vf, mac_addr);
4903
4904 switch (adapter->hw.mac.type) {
4905 case e1000_82576:
4906 device_id = IGB_82576_VF_DEV_ID;
4907 /* VF Stride for 82576 is 2 */
4908 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
4909 (pdev->devfn & 1);
4910 break;
4911 case e1000_i350:
4912 device_id = IGB_I350_VF_DEV_ID;
4913 /* VF Stride for I350 is 4 */
4914 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
4915 (pdev->devfn & 3);
4916 break;
4917 default:
4918 device_id = 0;
4919 thisvf_devfn = 0;
4920 break;
4921 }
4922
4923 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4924 while (pvfdev) {
4925 if (pvfdev->devfn == thisvf_devfn)
4926 break;
4927 pvfdev = pci_get_device(hw->vendor_id,
4928 device_id, pvfdev);
4929 }
4930
4931 if (pvfdev)
4932 adapter->vf_data[vf].vfdev = pvfdev;
4933 else
4934 dev_err(&pdev->dev,
4935 "Couldn't find pci dev ptr for VF %4.4x\n",
4936 thisvf_devfn);
4937 return pvfdev != NULL;
4938}
4939
4940static int igb_find_enabled_vfs(struct igb_adapter *adapter)
4941{
4942 struct e1000_hw *hw = &adapter->hw;
4943 struct pci_dev *pdev = adapter->pdev;
4944 struct pci_dev *pvfdev;
4945 u16 vf_devfn = 0;
4946 u16 vf_stride;
4947 unsigned int device_id;
4948 int vfs_found = 0;
4949
4950 switch (adapter->hw.mac.type) {
4951 case e1000_82576:
4952 device_id = IGB_82576_VF_DEV_ID;
4953 /* VF Stride for 82576 is 2 */
4954 vf_stride = 2;
4955 break;
4956 case e1000_i350:
4957 device_id = IGB_I350_VF_DEV_ID;
4958 /* VF Stride for I350 is 4 */
4959 vf_stride = 4;
4960 break;
4961 default:
4962 device_id = 0;
4963 vf_stride = 0;
4964 break;
4965 }
4966
4967 vf_devfn = pdev->devfn + 0x80;
4968 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
4969 while (pvfdev) {
4970 if (pvfdev->devfn == vf_devfn)
4971 vfs_found++;
4972 vf_devfn += vf_stride;
4973 pvfdev = pci_get_device(hw->vendor_id,
4974 device_id, pvfdev);
4975 }
4976
4977 return vfs_found;
4978}
4979
4980static int igb_check_vf_assignment(struct igb_adapter *adapter)
4981{
4982 int i;
4983 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4984 if (adapter->vf_data[i].vfdev) {
4985 if (adapter->vf_data[i].vfdev->dev_flags &
4986 PCI_DEV_FLAGS_ASSIGNED)
4987 return true;
4988 }
4989 }
4990 return false;
4991}
4992
4993#endif
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004994static void igb_ping_all_vfs(struct igb_adapter *adapter)
4995{
4996 struct e1000_hw *hw = &adapter->hw;
4997 u32 ping;
4998 int i;
4999
5000 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5001 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005002 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005003 ping |= E1000_VT_MSGTYPE_CTS;
5004 igb_write_mbx(hw, &ping, 1, i);
5005 }
5006}
5007
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005008static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5009{
5010 struct e1000_hw *hw = &adapter->hw;
5011 u32 vmolr = rd32(E1000_VMOLR(vf));
5012 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5013
Alexander Duyckd85b90042010-09-22 17:56:20 +00005014 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005015 IGB_VF_FLAG_MULTI_PROMISC);
5016 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5017
5018 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5019 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00005020 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005021 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5022 } else {
5023 /*
5024 * if we have hashes and we are clearing a multicast promisc
5025 * flag we need to write the hashes to the MTA as this step
5026 * was previously skipped
5027 */
5028 if (vf_data->num_vf_mc_hashes > 30) {
5029 vmolr |= E1000_VMOLR_MPME;
5030 } else if (vf_data->num_vf_mc_hashes) {
5031 int j;
5032 vmolr |= E1000_VMOLR_ROMPE;
5033 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5034 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5035 }
5036 }
5037
5038 wr32(E1000_VMOLR(vf), vmolr);
5039
5040 /* there are flags left unprocessed, likely not supported */
5041 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5042 return -EINVAL;
5043
5044 return 0;
5045
5046}
5047
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005048static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5049 u32 *msgbuf, u32 vf)
5050{
5051 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5052 u16 *hash_list = (u16 *)&msgbuf[1];
5053 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5054 int i;
5055
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005056 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005057 * to this VF for later use to restore when the PF multi cast
5058 * list changes
5059 */
5060 vf_data->num_vf_mc_hashes = n;
5061
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005062 /* only up to 30 hash values supported */
5063 if (n > 30)
5064 n = 30;
5065
5066 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005067 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07005068 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005069
5070 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005071 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005072
5073 return 0;
5074}
5075
5076static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5077{
5078 struct e1000_hw *hw = &adapter->hw;
5079 struct vf_data_storage *vf_data;
5080 int i, j;
5081
5082 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005083 u32 vmolr = rd32(E1000_VMOLR(i));
5084 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5085
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005086 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005087
5088 if ((vf_data->num_vf_mc_hashes > 30) ||
5089 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5090 vmolr |= E1000_VMOLR_MPME;
5091 } else if (vf_data->num_vf_mc_hashes) {
5092 vmolr |= E1000_VMOLR_ROMPE;
5093 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5094 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5095 }
5096 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005097 }
5098}
5099
5100static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5101{
5102 struct e1000_hw *hw = &adapter->hw;
5103 u32 pool_mask, reg, vid;
5104 int i;
5105
5106 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5107
5108 /* Find the vlan filter for this id */
5109 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5110 reg = rd32(E1000_VLVF(i));
5111
5112 /* remove the vf from the pool */
5113 reg &= ~pool_mask;
5114
5115 /* if pool is empty then remove entry from vfta */
5116 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5117 (reg & E1000_VLVF_VLANID_ENABLE)) {
5118 reg = 0;
5119 vid = reg & E1000_VLVF_VLANID_MASK;
5120 igb_vfta_set(hw, vid, false);
5121 }
5122
5123 wr32(E1000_VLVF(i), reg);
5124 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005125
5126 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005127}
5128
5129static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5130{
5131 struct e1000_hw *hw = &adapter->hw;
5132 u32 reg, i;
5133
Alexander Duyck51466232009-10-27 23:47:35 +00005134 /* The vlvf table only exists on 82576 hardware and newer */
5135 if (hw->mac.type < e1000_82576)
5136 return -1;
5137
5138 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005139 if (!adapter->vfs_allocated_count)
5140 return -1;
5141
5142 /* Find the vlan filter for this id */
5143 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5144 reg = rd32(E1000_VLVF(i));
5145 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5146 vid == (reg & E1000_VLVF_VLANID_MASK))
5147 break;
5148 }
5149
5150 if (add) {
5151 if (i == E1000_VLVF_ARRAY_SIZE) {
5152 /* Did not find a matching VLAN ID entry that was
5153 * enabled. Search for a free filter entry, i.e.
5154 * one without the enable bit set
5155 */
5156 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5157 reg = rd32(E1000_VLVF(i));
5158 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5159 break;
5160 }
5161 }
5162 if (i < E1000_VLVF_ARRAY_SIZE) {
5163 /* Found an enabled/available entry */
5164 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5165
5166 /* if !enabled we need to set this up in vfta */
5167 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005168 /* add VID to filter table */
5169 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005170 reg |= E1000_VLVF_VLANID_ENABLE;
5171 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005172 reg &= ~E1000_VLVF_VLANID_MASK;
5173 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005174 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005175
5176 /* do not modify RLPML for PF devices */
5177 if (vf >= adapter->vfs_allocated_count)
5178 return 0;
5179
5180 if (!adapter->vf_data[vf].vlans_enabled) {
5181 u32 size;
5182 reg = rd32(E1000_VMOLR(vf));
5183 size = reg & E1000_VMOLR_RLPML_MASK;
5184 size += 4;
5185 reg &= ~E1000_VMOLR_RLPML_MASK;
5186 reg |= size;
5187 wr32(E1000_VMOLR(vf), reg);
5188 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005189
Alexander Duyck51466232009-10-27 23:47:35 +00005190 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005191 }
5192 } else {
5193 if (i < E1000_VLVF_ARRAY_SIZE) {
5194 /* remove vf from the pool */
5195 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5196 /* if pool is empty then remove entry from vfta */
5197 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5198 reg = 0;
5199 igb_vfta_set(hw, vid, false);
5200 }
5201 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005202
5203 /* do not modify RLPML for PF devices */
5204 if (vf >= adapter->vfs_allocated_count)
5205 return 0;
5206
5207 adapter->vf_data[vf].vlans_enabled--;
5208 if (!adapter->vf_data[vf].vlans_enabled) {
5209 u32 size;
5210 reg = rd32(E1000_VMOLR(vf));
5211 size = reg & E1000_VMOLR_RLPML_MASK;
5212 size -= 4;
5213 reg &= ~E1000_VMOLR_RLPML_MASK;
5214 reg |= size;
5215 wr32(E1000_VMOLR(vf), reg);
5216 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005217 }
5218 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005219 return 0;
5220}
5221
5222static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5223{
5224 struct e1000_hw *hw = &adapter->hw;
5225
5226 if (vid)
5227 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5228 else
5229 wr32(E1000_VMVIR(vf), 0);
5230}
5231
5232static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5233 int vf, u16 vlan, u8 qos)
5234{
5235 int err = 0;
5236 struct igb_adapter *adapter = netdev_priv(netdev);
5237
5238 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5239 return -EINVAL;
5240 if (vlan || qos) {
5241 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5242 if (err)
5243 goto out;
5244 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5245 igb_set_vmolr(adapter, vf, !vlan);
5246 adapter->vf_data[vf].pf_vlan = vlan;
5247 adapter->vf_data[vf].pf_qos = qos;
5248 dev_info(&adapter->pdev->dev,
5249 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5250 if (test_bit(__IGB_DOWN, &adapter->state)) {
5251 dev_warn(&adapter->pdev->dev,
5252 "The VF VLAN has been set,"
5253 " but the PF device is not up.\n");
5254 dev_warn(&adapter->pdev->dev,
5255 "Bring the PF device up before"
5256 " attempting to use the VF device.\n");
5257 }
5258 } else {
5259 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5260 false, vf);
5261 igb_set_vmvir(adapter, vlan, vf);
5262 igb_set_vmolr(adapter, vf, true);
5263 adapter->vf_data[vf].pf_vlan = 0;
5264 adapter->vf_data[vf].pf_qos = 0;
5265 }
5266out:
5267 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005268}
5269
5270static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5271{
5272 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5273 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5274
5275 return igb_vlvf_set(adapter, vid, add, vf);
5276}
5277
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005278static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005279{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005280 /* clear flags - except flag that indicates PF has set the MAC */
5281 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005282 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005283
5284 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005285 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005286
5287 /* reset vlans for device */
5288 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005289 if (adapter->vf_data[vf].pf_vlan)
5290 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5291 adapter->vf_data[vf].pf_vlan,
5292 adapter->vf_data[vf].pf_qos);
5293 else
5294 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005295
5296 /* reset multicast table array for vf */
5297 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5298
5299 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005300 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005301}
5302
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005303static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5304{
5305 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5306
5307 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005308 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5309 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005310
5311 /* process remaining reset events */
5312 igb_vf_reset(adapter, vf);
5313}
5314
5315static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005316{
5317 struct e1000_hw *hw = &adapter->hw;
5318 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005319 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005320 u32 reg, msgbuf[3];
5321 u8 *addr = (u8 *)(&msgbuf[1]);
5322
5323 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005324 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005325
5326 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005327 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005328
5329 /* enable transmit and receive for vf */
5330 reg = rd32(E1000_VFTE);
5331 wr32(E1000_VFTE, reg | (1 << vf));
5332 reg = rd32(E1000_VFRE);
5333 wr32(E1000_VFRE, reg | (1 << vf));
5334
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005335 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005336
5337 /* reply to reset with ack and vf mac address */
5338 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5339 memcpy(addr, vf_mac, 6);
5340 igb_write_mbx(hw, msgbuf, 3, vf);
5341}
5342
5343static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5344{
Greg Rosede42edd2010-07-01 13:39:23 +00005345 /*
5346 * The VF MAC Address is stored in a packed array of bytes
5347 * starting at the second 32 bit word of the msg array
5348 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005349 unsigned char *addr = (char *)&msg[1];
5350 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005351
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005352 if (is_valid_ether_addr(addr))
5353 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005354
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005355 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005356}
5357
5358static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5359{
5360 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005361 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005362 u32 msg = E1000_VT_MSGTYPE_NACK;
5363
5364 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005365 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5366 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005367 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005368 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005369 }
5370}
5371
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005372static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005373{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005374 struct pci_dev *pdev = adapter->pdev;
5375 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005376 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005377 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005378 s32 retval;
5379
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005380 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005381
Alexander Duyckfef45f42009-12-11 22:57:34 -08005382 if (retval) {
5383 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005384 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005385 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5386 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5387 return;
5388 goto out;
5389 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005390
5391 /* this is a message we already processed, do nothing */
5392 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005393 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005394
5395 /*
5396 * until the vf completes a reset it should not be
5397 * allowed to start any configuration.
5398 */
5399
5400 if (msgbuf[0] == E1000_VF_RESET) {
5401 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005402 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005403 }
5404
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005405 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005406 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5407 return;
5408 retval = -1;
5409 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005410 }
5411
5412 switch ((msgbuf[0] & 0xFFFF)) {
5413 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005414 retval = -EINVAL;
5415 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5416 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5417 else
5418 dev_warn(&pdev->dev,
5419 "VF %d attempted to override administratively "
5420 "set MAC address\nReload the VF driver to "
5421 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005422 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005423 case E1000_VF_SET_PROMISC:
5424 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5425 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005426 case E1000_VF_SET_MULTICAST:
5427 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5428 break;
5429 case E1000_VF_SET_LPE:
5430 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5431 break;
5432 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005433 retval = -1;
5434 if (vf_data->pf_vlan)
5435 dev_warn(&pdev->dev,
5436 "VF %d attempted to override administratively "
5437 "set VLAN tag\nReload the VF driver to "
5438 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005439 else
5440 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005441 break;
5442 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005443 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005444 retval = -1;
5445 break;
5446 }
5447
Alexander Duyckfef45f42009-12-11 22:57:34 -08005448 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5449out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005450 /* notify the VF of the results of what it sent us */
5451 if (retval)
5452 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5453 else
5454 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5455
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005456 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005457}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005458
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005459static void igb_msg_task(struct igb_adapter *adapter)
5460{
5461 struct e1000_hw *hw = &adapter->hw;
5462 u32 vf;
5463
5464 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5465 /* process any reset requests */
5466 if (!igb_check_for_rst(hw, vf))
5467 igb_vf_reset_event(adapter, vf);
5468
5469 /* process any messages pending */
5470 if (!igb_check_for_msg(hw, vf))
5471 igb_rcv_msg_from_vf(adapter, vf);
5472
5473 /* process any acks */
5474 if (!igb_check_for_ack(hw, vf))
5475 igb_rcv_ack_from_vf(adapter, vf);
5476 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005477}
5478
Auke Kok9d5c8242008-01-24 02:22:38 -08005479/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005480 * igb_set_uta - Set unicast filter table address
5481 * @adapter: board private structure
5482 *
5483 * The unicast table address is a register array of 32-bit registers.
5484 * The table is meant to be used in a way similar to how the MTA is used
5485 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005486 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5487 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005488 **/
5489static void igb_set_uta(struct igb_adapter *adapter)
5490{
5491 struct e1000_hw *hw = &adapter->hw;
5492 int i;
5493
5494 /* The UTA table only exists on 82576 hardware and newer */
5495 if (hw->mac.type < e1000_82576)
5496 return;
5497
5498 /* we only need to do this if VMDq is enabled */
5499 if (!adapter->vfs_allocated_count)
5500 return;
5501
5502 for (i = 0; i < hw->mac.uta_reg_count; i++)
5503 array_wr32(E1000_UTA, i, ~0);
5504}
5505
5506/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005507 * igb_intr_msi - Interrupt Handler
5508 * @irq: interrupt number
5509 * @data: pointer to a network interface device structure
5510 **/
5511static irqreturn_t igb_intr_msi(int irq, void *data)
5512{
Alexander Duyck047e0032009-10-27 15:49:27 +00005513 struct igb_adapter *adapter = data;
5514 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005515 struct e1000_hw *hw = &adapter->hw;
5516 /* read ICR disables interrupts using IAM */
5517 u32 icr = rd32(E1000_ICR);
5518
Alexander Duyck047e0032009-10-27 15:49:27 +00005519 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005520
Alexander Duyck7f081d42010-01-07 17:41:00 +00005521 if (icr & E1000_ICR_DRSTA)
5522 schedule_work(&adapter->reset_task);
5523
Alexander Duyck047e0032009-10-27 15:49:27 +00005524 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005525 /* HW is reporting DMA is out of sync */
5526 adapter->stats.doosync++;
5527 }
5528
Auke Kok9d5c8242008-01-24 02:22:38 -08005529 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5530 hw->mac.get_link_status = 1;
5531 if (!test_bit(__IGB_DOWN, &adapter->state))
5532 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5533 }
5534
Alexander Duyck047e0032009-10-27 15:49:27 +00005535 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005536
5537 return IRQ_HANDLED;
5538}
5539
5540/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005541 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005542 * @irq: interrupt number
5543 * @data: pointer to a network interface device structure
5544 **/
5545static irqreturn_t igb_intr(int irq, void *data)
5546{
Alexander Duyck047e0032009-10-27 15:49:27 +00005547 struct igb_adapter *adapter = data;
5548 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005549 struct e1000_hw *hw = &adapter->hw;
5550 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5551 * need for the IMC write */
5552 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005553
5554 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5555 * not set, then the adapter didn't send an interrupt */
5556 if (!(icr & E1000_ICR_INT_ASSERTED))
5557 return IRQ_NONE;
5558
Alexander Duyck0ba82992011-08-26 07:45:47 +00005559 igb_write_itr(q_vector);
5560
Alexander Duyck7f081d42010-01-07 17:41:00 +00005561 if (icr & E1000_ICR_DRSTA)
5562 schedule_work(&adapter->reset_task);
5563
Alexander Duyck047e0032009-10-27 15:49:27 +00005564 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005565 /* HW is reporting DMA is out of sync */
5566 adapter->stats.doosync++;
5567 }
5568
Auke Kok9d5c8242008-01-24 02:22:38 -08005569 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5570 hw->mac.get_link_status = 1;
5571 /* guard against interrupt when we're going down */
5572 if (!test_bit(__IGB_DOWN, &adapter->state))
5573 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5574 }
5575
Alexander Duyck047e0032009-10-27 15:49:27 +00005576 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005577
5578 return IRQ_HANDLED;
5579}
5580
Alexander Duyck0ba82992011-08-26 07:45:47 +00005581void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005582{
Alexander Duyck047e0032009-10-27 15:49:27 +00005583 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005584 struct e1000_hw *hw = &adapter->hw;
5585
Alexander Duyck0ba82992011-08-26 07:45:47 +00005586 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5587 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5588 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5589 igb_set_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005590 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005591 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005592 }
5593
5594 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5595 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005596 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005597 else
5598 igb_irq_enable(adapter);
5599 }
5600}
5601
Auke Kok9d5c8242008-01-24 02:22:38 -08005602/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005603 * igb_poll - NAPI Rx polling callback
5604 * @napi: napi polling structure
5605 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005606 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005607static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005608{
Alexander Duyck047e0032009-10-27 15:49:27 +00005609 struct igb_q_vector *q_vector = container_of(napi,
5610 struct igb_q_vector,
5611 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005612 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005613
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005614#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005615 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5616 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005617#endif
Alexander Duyck0ba82992011-08-26 07:45:47 +00005618 if (q_vector->tx.ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005619 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005620
Alexander Duyck0ba82992011-08-26 07:45:47 +00005621 if (q_vector->rx.ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005622 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005623
Alexander Duyck16eb8812011-08-26 07:43:54 +00005624 /* If all work not completed, return budget and keep polling */
5625 if (!clean_complete)
5626 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005627
Alexander Duyck46544252009-02-19 20:39:04 -08005628 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005629 napi_complete(napi);
5630 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005631
Alexander Duyck16eb8812011-08-26 07:43:54 +00005632 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005633}
Al Viro6d8126f2008-03-16 22:23:24 +00005634
Auke Kok9d5c8242008-01-24 02:22:38 -08005635/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005636 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005637 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005638 * @shhwtstamps: timestamp structure to update
5639 * @regval: unsigned 64bit system time value.
5640 *
5641 * We need to convert the system time value stored in the RX/TXSTMP registers
5642 * into a hwtstamp which can be used by the upper level timestamping functions
5643 */
5644static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5645 struct skb_shared_hwtstamps *shhwtstamps,
5646 u64 regval)
5647{
5648 u64 ns;
5649
Alexander Duyck55cac242009-11-19 12:42:21 +00005650 /*
5651 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5652 * 24 to match clock shift we setup earlier.
5653 */
Alexander Duyck06218a82011-08-26 07:46:55 +00005654 if (adapter->hw.mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00005655 regval <<= IGB_82580_TSYNC_SHIFT;
5656
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005657 ns = timecounter_cyc2time(&adapter->clock, regval);
5658 timecompare_update(&adapter->compare, ns);
5659 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5660 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5661 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5662}
5663
5664/**
5665 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5666 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005667 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005668 *
5669 * If we were asked to do hardware stamping and such a time stamp is
5670 * available, then it must have been for this skb here because we only
5671 * allow only one such packet into the queue.
5672 */
Alexander Duyck06034642011-08-26 07:44:22 +00005673static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5674 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005675{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005676 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005677 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005678 struct skb_shared_hwtstamps shhwtstamps;
5679 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005680
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005681 /* if skb does not support hw timestamp or TX stamp not valid exit */
Alexander Duyck2bbfebe2011-08-26 07:44:59 +00005682 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005683 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5684 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005685
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005686 regval = rd32(E1000_TXSTMPL);
5687 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5688
5689 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005690 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005691}
5692
5693/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005694 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005695 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005696 * returns true if ring is completely cleaned
5697 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005698static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005699{
Alexander Duyck047e0032009-10-27 15:49:27 +00005700 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005701 struct igb_ring *tx_ring = q_vector->tx.ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005702 struct igb_tx_buffer *tx_buffer;
Alexander Duyck8542db02011-08-26 07:44:43 +00005703 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005704 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0ba82992011-08-26 07:45:47 +00005705 unsigned int budget = q_vector->tx.work_limit;
Alexander Duyck8542db02011-08-26 07:44:43 +00005706 unsigned int i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005707
Alexander Duyck13fde972011-10-05 13:35:24 +00005708 if (test_bit(__IGB_DOWN, &adapter->state))
5709 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005710
Alexander Duyck06034642011-08-26 07:44:22 +00005711 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005712 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck8542db02011-08-26 07:44:43 +00005713 i -= tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005714
Alexander Duyck13fde972011-10-05 13:35:24 +00005715 for (; budget; budget--) {
Alexander Duyck8542db02011-08-26 07:44:43 +00005716 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005717
Alexander Duyck8542db02011-08-26 07:44:43 +00005718 /* prevent any other reads prior to eop_desc */
5719 rmb();
5720
5721 /* if next_to_watch is not set then there is no work pending */
5722 if (!eop_desc)
5723 break;
Alexander Duyck13fde972011-10-05 13:35:24 +00005724
5725 /* if DD is not set pending work has not been completed */
5726 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5727 break;
5728
Alexander Duyck8542db02011-08-26 07:44:43 +00005729 /* clear next_to_watch to prevent false hangs */
5730 tx_buffer->next_to_watch = NULL;
Alexander Duyck13fde972011-10-05 13:35:24 +00005731
Alexander Duyckebe42d12011-08-26 07:45:09 +00005732 /* update the statistics for this packet */
5733 total_bytes += tx_buffer->bytecount;
5734 total_packets += tx_buffer->gso_segs;
Alexander Duyck13fde972011-10-05 13:35:24 +00005735
Alexander Duyckebe42d12011-08-26 07:45:09 +00005736 /* retrieve hardware timestamp */
5737 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005738
Alexander Duyckebe42d12011-08-26 07:45:09 +00005739 /* free the skb */
5740 dev_kfree_skb_any(tx_buffer->skb);
5741 tx_buffer->skb = NULL;
5742
5743 /* unmap skb header data */
5744 dma_unmap_single(tx_ring->dev,
5745 tx_buffer->dma,
5746 tx_buffer->length,
5747 DMA_TO_DEVICE);
5748
5749 /* clear last DMA location and unmap remaining buffers */
5750 while (tx_desc != eop_desc) {
5751 tx_buffer->dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005752
Alexander Duyck13fde972011-10-05 13:35:24 +00005753 tx_buffer++;
5754 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005755 i++;
Alexander Duyck8542db02011-08-26 07:44:43 +00005756 if (unlikely(!i)) {
5757 i -= tx_ring->count;
Alexander Duyck06034642011-08-26 07:44:22 +00005758 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005759 tx_desc = IGB_TX_DESC(tx_ring, 0);
5760 }
Alexander Duyckebe42d12011-08-26 07:45:09 +00005761
5762 /* unmap any remaining paged data */
5763 if (tx_buffer->dma) {
5764 dma_unmap_page(tx_ring->dev,
5765 tx_buffer->dma,
5766 tx_buffer->length,
5767 DMA_TO_DEVICE);
5768 }
5769 }
5770
5771 /* clear last DMA location */
5772 tx_buffer->dma = 0;
5773
5774 /* move us one more past the eop_desc for start of next pkt */
5775 tx_buffer++;
5776 tx_desc++;
5777 i++;
5778 if (unlikely(!i)) {
5779 i -= tx_ring->count;
5780 tx_buffer = tx_ring->tx_buffer_info;
5781 tx_desc = IGB_TX_DESC(tx_ring, 0);
5782 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005783 }
5784
Alexander Duyck8542db02011-08-26 07:44:43 +00005785 i += tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005786 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005787 u64_stats_update_begin(&tx_ring->tx_syncp);
5788 tx_ring->tx_stats.bytes += total_bytes;
5789 tx_ring->tx_stats.packets += total_packets;
5790 u64_stats_update_end(&tx_ring->tx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00005791 q_vector->tx.total_bytes += total_bytes;
5792 q_vector->tx.total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005793
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005794 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005795 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck13fde972011-10-05 13:35:24 +00005796
Alexander Duyck8542db02011-08-26 07:44:43 +00005797 eop_desc = tx_buffer->next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005798
Auke Kok9d5c8242008-01-24 02:22:38 -08005799 /* Detect a transmit hang in hardware, this serializes the
5800 * check with the clearing of time_stamp and movement of i */
Alexander Duyck6d095fa2011-08-26 07:46:19 +00005801 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
Alexander Duyck8542db02011-08-26 07:44:43 +00005802 if (eop_desc &&
5803 time_after(jiffies, tx_buffer->time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005804 (adapter->tx_timeout_factor * HZ)) &&
5805 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005806
Auke Kok9d5c8242008-01-24 02:22:38 -08005807 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005808 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005809 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005810 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005811 " TDH <%x>\n"
5812 " TDT <%x>\n"
5813 " next_to_use <%x>\n"
5814 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005815 "buffer_info[next_to_clean]\n"
5816 " time_stamp <%lx>\n"
Alexander Duyck8542db02011-08-26 07:44:43 +00005817 " next_to_watch <%p>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005818 " jiffies <%lx>\n"
5819 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005820 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005821 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005822 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005823 tx_ring->next_to_use,
5824 tx_ring->next_to_clean,
Alexander Duyck8542db02011-08-26 07:44:43 +00005825 tx_buffer->time_stamp,
5826 eop_desc,
Auke Kok9d5c8242008-01-24 02:22:38 -08005827 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005828 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005829 netif_stop_subqueue(tx_ring->netdev,
5830 tx_ring->queue_index);
5831
5832 /* we are about to reset, no point in enabling stuff */
5833 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005834 }
5835 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005836
5837 if (unlikely(total_packets &&
5838 netif_carrier_ok(tx_ring->netdev) &&
5839 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5840 /* Make sure that anybody stopping the queue after this
5841 * sees the new next_to_clean.
5842 */
5843 smp_mb();
5844 if (__netif_subqueue_stopped(tx_ring->netdev,
5845 tx_ring->queue_index) &&
5846 !(test_bit(__IGB_DOWN, &adapter->state))) {
5847 netif_wake_subqueue(tx_ring->netdev,
5848 tx_ring->queue_index);
5849
5850 u64_stats_update_begin(&tx_ring->tx_syncp);
5851 tx_ring->tx_stats.restart_queue++;
5852 u64_stats_update_end(&tx_ring->tx_syncp);
5853 }
5854 }
5855
5856 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005857}
5858
Alexander Duyckcd392f52011-08-26 07:43:59 +00005859static inline void igb_rx_checksum(struct igb_ring *ring,
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005860 union e1000_adv_rx_desc *rx_desc,
5861 struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005862{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005863 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005864
Alexander Duyck294e7d72011-08-26 07:45:57 +00005865 /* Ignore Checksum bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005866 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
Alexander Duyck294e7d72011-08-26 07:45:57 +00005867 return;
5868
5869 /* Rx checksum disabled via ethtool */
5870 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005871 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005872
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 /* TCP/UDP checksum error bit is set */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005874 if (igb_test_staterr(rx_desc,
5875 E1000_RXDEXT_STATERR_TCPE |
5876 E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005877 /*
5878 * work around errata with sctp packets where the TCPE aka
5879 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5880 * packets, (aka let the stack check the crc32c)
5881 */
Alexander Duyck866cff02011-08-26 07:45:36 +00005882 if (!((skb->len == 60) &&
5883 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
Eric Dumazet12dcd862010-10-15 17:27:10 +00005884 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005885 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005886 u64_stats_update_end(&ring->rx_syncp);
5887 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005888 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 return;
5890 }
5891 /* It must be a TCP or UDP packet with a valid checksum */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005892 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5893 E1000_RXD_STAT_UDPCS))
Auke Kok9d5c8242008-01-24 02:22:38 -08005894 skb->ip_summed = CHECKSUM_UNNECESSARY;
5895
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005896 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5897 le32_to_cpu(rx_desc->wb.upper.status_error));
Auke Kok9d5c8242008-01-24 02:22:38 -08005898}
5899
Alexander Duyck077887c2011-08-26 07:46:29 +00005900static inline void igb_rx_hash(struct igb_ring *ring,
5901 union e1000_adv_rx_desc *rx_desc,
5902 struct sk_buff *skb)
5903{
5904 if (ring->netdev->features & NETIF_F_RXHASH)
5905 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5906}
5907
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005908static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5909 union e1000_adv_rx_desc *rx_desc,
5910 struct sk_buff *skb)
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005911{
5912 struct igb_adapter *adapter = q_vector->adapter;
5913 struct e1000_hw *hw = &adapter->hw;
5914 u64 regval;
5915
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005916 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5917 E1000_RXDADV_STAT_TS))
5918 return;
5919
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005920 /*
5921 * If this bit is set, then the RX registers contain the time stamp. No
5922 * other packet will be time stamped until we read these registers, so
5923 * read the registers to make them available again. Because only one
5924 * packet can be time stamped at a time, we know that the register
5925 * values must belong to this one here and therefore we don't need to
5926 * compare any of the additional attributes stored for it.
5927 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005928 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005929 * can turn into a skb_shared_hwtstamps.
5930 */
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005931 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
Nick Nunley757b77e2010-03-26 11:36:47 +00005932 u32 *stamp = (u32 *)skb->data;
5933 regval = le32_to_cpu(*(stamp + 2));
5934 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5935 skb_pull(skb, IGB_TS_HDR_LEN);
5936 } else {
5937 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5938 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005939
Nick Nunley757b77e2010-03-26 11:36:47 +00005940 regval = rd32(E1000_RXSTMPL);
5941 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5942 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005943
5944 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5945}
Alexander Duyck8be10e92011-08-26 07:47:11 +00005946
5947static void igb_rx_vlan(struct igb_ring *ring,
5948 union e1000_adv_rx_desc *rx_desc,
5949 struct sk_buff *skb)
5950{
5951 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5952 u16 vid;
5953 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5954 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5955 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5956 else
5957 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5958
5959 __vlan_hwaccel_put_tag(skb, vid);
5960 }
5961}
5962
Alexander Duyck44390ca2011-08-26 07:43:38 +00005963static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005964{
5965 /* HW will not DMA in data larger than the given buffer, even if it
5966 * parses the (NFS, of course) header to be larger. In that case, it
5967 * fills the header buffer and spills the rest into the page.
5968 */
5969 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5970 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005971 if (hlen > IGB_RX_HDR_LEN)
5972 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005973 return hlen;
5974}
5975
Alexander Duyckcd392f52011-08-26 07:43:59 +00005976static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005977{
Alexander Duyck0ba82992011-08-26 07:45:47 +00005978 struct igb_ring *rx_ring = q_vector->rx.ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005979 union e1000_adv_rx_desc *rx_desc;
5980 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005981 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005982 u16 cleaned_count = igb_desc_unused(rx_ring);
5983 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005984
Alexander Duyck601369062011-08-26 07:44:05 +00005985 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005986
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00005987 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005988 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005989 struct sk_buff *skb = buffer_info->skb;
5990 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005991
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005992 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005993 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005994
5995 i++;
5996 if (i == rx_ring->count)
5997 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005998
Alexander Duyck601369062011-08-26 07:44:05 +00005999 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006000 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006001
Alexander Duyck16eb8812011-08-26 07:43:54 +00006002 /*
6003 * This memory barrier is needed to keep us from reading
6004 * any other fields out of the rx_desc until we know the
6005 * RXD_STAT_DD bit is set
6006 */
6007 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00006008
Alexander Duyck16eb8812011-08-26 07:43:54 +00006009 if (!skb_is_nonlinear(skb)) {
6010 __skb_put(skb, igb_get_hlen(rx_desc));
6011 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00006012 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00006013 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00006014 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006015 }
6016
Alexander Duyck16eb8812011-08-26 07:43:54 +00006017 if (rx_desc->wb.upper.length) {
6018 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006019
Koki Sanagiaa913402010-04-27 01:01:19 +00006020 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006021 buffer_info->page,
6022 buffer_info->page_offset,
6023 length);
6024
Alexander Duyck16eb8812011-08-26 07:43:54 +00006025 skb->len += length;
6026 skb->data_len += length;
Eric Dumazet95b9c1d2011-10-13 07:56:41 +00006027 skb->truesize += PAGE_SIZE / 2;
Alexander Duyck16eb8812011-08-26 07:43:54 +00006028
Alexander Duyckd1eff352009-11-12 18:38:35 +00006029 if ((page_count(buffer_info->page) != 1) ||
6030 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006031 buffer_info->page = NULL;
6032 else
6033 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08006034
Alexander Duyck16eb8812011-08-26 07:43:54 +00006035 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6036 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6037 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006038 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006039
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006040 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00006041 struct igb_rx_buffer *next_buffer;
6042 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08006043 buffer_info->skb = next_buffer->skb;
6044 buffer_info->dma = next_buffer->dma;
6045 next_buffer->skb = skb;
6046 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006047 goto next_desc;
6048 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00006049
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006050 if (igb_test_staterr(rx_desc,
6051 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00006052 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006053 goto next_desc;
6054 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006055
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006056 igb_rx_hwtstamp(q_vector, rx_desc, skb);
Alexander Duyck077887c2011-08-26 07:46:29 +00006057 igb_rx_hash(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006058 igb_rx_checksum(rx_ring, rx_desc, skb);
Alexander Duyck8be10e92011-08-26 07:47:11 +00006059 igb_rx_vlan(rx_ring, rx_desc, skb);
Alexander Duyck3ceb90f2011-08-26 07:46:03 +00006060
6061 total_bytes += skb->len;
6062 total_packets++;
6063
6064 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6065
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006066 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08006067
Alexander Duyck16eb8812011-08-26 07:43:54 +00006068 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08006069next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00006070 if (!budget)
6071 break;
6072
6073 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006074 /* return some buffers to hardware, one at a time is too slow */
6075 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00006076 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08006077 cleaned_count = 0;
6078 }
6079
6080 /* use prefetched values */
6081 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08006082 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07006083
Auke Kok9d5c8242008-01-24 02:22:38 -08006084 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006085 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08006086 rx_ring->rx_stats.packets += total_packets;
6087 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00006088 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck0ba82992011-08-26 07:45:47 +00006089 q_vector->rx.total_packets += total_packets;
6090 q_vector->rx.total_bytes += total_bytes;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006091
6092 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00006093 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006094
Alexander Duyck16eb8812011-08-26 07:43:54 +00006095 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08006096}
6097
Alexander Duyckc023cd82011-08-26 07:43:43 +00006098static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006099 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006100{
6101 struct sk_buff *skb = bi->skb;
6102 dma_addr_t dma = bi->dma;
6103
6104 if (dma)
6105 return true;
6106
6107 if (likely(!skb)) {
6108 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6109 IGB_RX_HDR_LEN);
6110 bi->skb = skb;
6111 if (!skb) {
6112 rx_ring->rx_stats.alloc_failed++;
6113 return false;
6114 }
6115
6116 /* initialize skb for ring */
6117 skb_record_rx_queue(skb, rx_ring->queue_index);
6118 }
6119
6120 dma = dma_map_single(rx_ring->dev, skb->data,
6121 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6122
6123 if (dma_mapping_error(rx_ring->dev, dma)) {
6124 rx_ring->rx_stats.alloc_failed++;
6125 return false;
6126 }
6127
6128 bi->dma = dma;
6129 return true;
6130}
6131
6132static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00006133 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00006134{
6135 struct page *page = bi->page;
6136 dma_addr_t page_dma = bi->page_dma;
6137 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6138
6139 if (page_dma)
6140 return true;
6141
6142 if (!page) {
6143 page = netdev_alloc_page(rx_ring->netdev);
6144 bi->page = page;
6145 if (unlikely(!page)) {
6146 rx_ring->rx_stats.alloc_failed++;
6147 return false;
6148 }
6149 }
6150
6151 page_dma = dma_map_page(rx_ring->dev, page,
6152 page_offset, PAGE_SIZE / 2,
6153 DMA_FROM_DEVICE);
6154
6155 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6156 rx_ring->rx_stats.alloc_failed++;
6157 return false;
6158 }
6159
6160 bi->page_dma = page_dma;
6161 bi->page_offset = page_offset;
6162 return true;
6163}
6164
Auke Kok9d5c8242008-01-24 02:22:38 -08006165/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00006166 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08006167 * @adapter: address of board private structure
6168 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00006169void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08006170{
Auke Kok9d5c8242008-01-24 02:22:38 -08006171 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00006172 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006173 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08006174
Alexander Duyck601369062011-08-26 07:44:05 +00006175 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006176 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006177 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006178
6179 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006180 if (!igb_alloc_mapped_skb(rx_ring, bi))
6181 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006182
Alexander Duyckc023cd82011-08-26 07:43:43 +00006183 /* Refresh the desc even if buffer_addrs didn't change
6184 * because each write-back erases this info. */
6185 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006186
Alexander Duyckc023cd82011-08-26 07:43:43 +00006187 if (!igb_alloc_mapped_page(rx_ring, bi))
6188 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006189
Alexander Duyckc023cd82011-08-26 07:43:43 +00006190 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006191
Alexander Duyckc023cd82011-08-26 07:43:43 +00006192 rx_desc++;
6193 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006194 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006195 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006196 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006197 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006198 i -= rx_ring->count;
6199 }
6200
6201 /* clear the hdr_addr for the next_to_use descriptor */
6202 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006203 }
6204
Alexander Duyckc023cd82011-08-26 07:43:43 +00006205 i += rx_ring->count;
6206
Auke Kok9d5c8242008-01-24 02:22:38 -08006207 if (rx_ring->next_to_use != i) {
6208 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006209
6210 /* Force memory writes to complete before letting h/w
6211 * know there are new descriptors to fetch. (Only
6212 * applicable for weak-ordered memory model archs,
6213 * such as IA-64). */
6214 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006215 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006216 }
6217}
6218
6219/**
6220 * igb_mii_ioctl -
6221 * @netdev:
6222 * @ifreq:
6223 * @cmd:
6224 **/
6225static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6226{
6227 struct igb_adapter *adapter = netdev_priv(netdev);
6228 struct mii_ioctl_data *data = if_mii(ifr);
6229
6230 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6231 return -EOPNOTSUPP;
6232
6233 switch (cmd) {
6234 case SIOCGMIIPHY:
6235 data->phy_id = adapter->hw.phy.addr;
6236 break;
6237 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006238 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6239 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006240 return -EIO;
6241 break;
6242 case SIOCSMIIREG:
6243 default:
6244 return -EOPNOTSUPP;
6245 }
6246 return 0;
6247}
6248
6249/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006250 * igb_hwtstamp_ioctl - control hardware time stamping
6251 * @netdev:
6252 * @ifreq:
6253 * @cmd:
6254 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006255 * Outgoing time stamping can be enabled and disabled. Play nice and
6256 * disable it when requested, although it shouldn't case any overhead
6257 * when no packet needs it. At most one packet in the queue may be
6258 * marked for time stamping, otherwise it would be impossible to tell
6259 * for sure to which packet the hardware time stamp belongs.
6260 *
6261 * Incoming time stamping has to be configured via the hardware
6262 * filters. Not all combinations are supported, in particular event
6263 * type has to be specified. Matching the kind of event packet is
6264 * not supported, with the exception of "all V2 events regardless of
6265 * level 2 or 4".
6266 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006267 **/
6268static int igb_hwtstamp_ioctl(struct net_device *netdev,
6269 struct ifreq *ifr, int cmd)
6270{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006271 struct igb_adapter *adapter = netdev_priv(netdev);
6272 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006273 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006274 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6275 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006276 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006277 bool is_l4 = false;
6278 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006279 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006280
6281 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6282 return -EFAULT;
6283
6284 /* reserved for future extensions */
6285 if (config.flags)
6286 return -EINVAL;
6287
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006288 switch (config.tx_type) {
6289 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006290 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006291 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006292 break;
6293 default:
6294 return -ERANGE;
6295 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006296
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006297 switch (config.rx_filter) {
6298 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006299 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006300 break;
6301 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6302 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6303 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6304 case HWTSTAMP_FILTER_ALL:
6305 /*
6306 * register TSYNCRXCFG must be set, therefore it is not
6307 * possible to time stamp both Sync and Delay_Req messages
6308 * => fall back to time stamping all packets
6309 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006310 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006311 config.rx_filter = HWTSTAMP_FILTER_ALL;
6312 break;
6313 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006314 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006315 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006316 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006317 break;
6318 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006319 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006320 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006321 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006322 break;
6323 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6324 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006325 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006326 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006327 is_l2 = true;
6328 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006329 config.rx_filter = HWTSTAMP_FILTER_SOME;
6330 break;
6331 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6332 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006333 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006334 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006335 is_l2 = true;
6336 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006337 config.rx_filter = HWTSTAMP_FILTER_SOME;
6338 break;
6339 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6340 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6341 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006342 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006343 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006344 is_l2 = true;
Jacob Keller11ba69e2011-10-12 00:51:54 +00006345 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006346 break;
6347 default:
6348 return -ERANGE;
6349 }
6350
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006351 if (hw->mac.type == e1000_82575) {
6352 if (tsync_rx_ctl | tsync_tx_ctl)
6353 return -EINVAL;
6354 return 0;
6355 }
6356
Nick Nunley757b77e2010-03-26 11:36:47 +00006357 /*
6358 * Per-packet timestamping only works if all packets are
6359 * timestamped, so enable timestamping in all packets as
6360 * long as one rx filter was configured.
6361 */
Alexander Duyck06218a82011-08-26 07:46:55 +00006362 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
Nick Nunley757b77e2010-03-26 11:36:47 +00006363 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6364 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6365 }
6366
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006367 /* enable/disable TX */
6368 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006369 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6370 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006371 wr32(E1000_TSYNCTXCTL, regval);
6372
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006373 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006374 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006375 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6376 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006377 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006378
6379 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006380 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6381
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006382 /* define ethertype filter for timestamped packets */
6383 if (is_l2)
6384 wr32(E1000_ETQF(3),
6385 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6386 E1000_ETQF_1588 | /* enable timestamping */
6387 ETH_P_1588)); /* 1588 eth protocol type */
6388 else
6389 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006390
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006391#define PTP_PORT 319
6392 /* L4 Queue Filter[3]: filter by destination port and protocol */
6393 if (is_l4) {
6394 u32 ftqf = (IPPROTO_UDP /* UDP */
6395 | E1000_FTQF_VF_BP /* VF not compared */
6396 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6397 | E1000_FTQF_MASK); /* mask all inputs */
6398 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006399
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006400 wr32(E1000_IMIR(3), htons(PTP_PORT));
6401 wr32(E1000_IMIREXT(3),
6402 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6403 if (hw->mac.type == e1000_82576) {
6404 /* enable source port check */
6405 wr32(E1000_SPQF(3), htons(PTP_PORT));
6406 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6407 }
6408 wr32(E1000_FTQF(3), ftqf);
6409 } else {
6410 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6411 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006412 wrfl();
6413
6414 adapter->hwtstamp_config = config;
6415
6416 /* clear TX/RX time stamp registers, just to be sure */
6417 regval = rd32(E1000_TXSTMPH);
6418 regval = rd32(E1000_RXSTMPH);
6419
6420 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6421 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006422}
6423
6424/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006425 * igb_ioctl -
6426 * @netdev:
6427 * @ifreq:
6428 * @cmd:
6429 **/
6430static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6431{
6432 switch (cmd) {
6433 case SIOCGMIIPHY:
6434 case SIOCGMIIREG:
6435 case SIOCSMIIREG:
6436 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006437 case SIOCSHWTSTAMP:
6438 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006439 default:
6440 return -EOPNOTSUPP;
6441 }
6442}
6443
Alexander Duyck009bc062009-07-23 18:08:35 +00006444s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6445{
6446 struct igb_adapter *adapter = hw->back;
6447 u16 cap_offset;
6448
Jon Masonbdaae042011-06-27 07:44:01 +00006449 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006450 if (!cap_offset)
6451 return -E1000_ERR_CONFIG;
6452
6453 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6454
6455 return 0;
6456}
6457
6458s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6459{
6460 struct igb_adapter *adapter = hw->back;
6461 u16 cap_offset;
6462
Jon Masonbdaae042011-06-27 07:44:01 +00006463 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006464 if (!cap_offset)
6465 return -E1000_ERR_CONFIG;
6466
6467 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6468
6469 return 0;
6470}
6471
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006472static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006473{
6474 struct igb_adapter *adapter = netdev_priv(netdev);
6475 struct e1000_hw *hw = &adapter->hw;
6476 u32 ctrl, rctl;
Alexander Duyck5faf0302011-08-26 07:46:08 +00006477 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
Auke Kok9d5c8242008-01-24 02:22:38 -08006478
Alexander Duyck5faf0302011-08-26 07:46:08 +00006479 if (enable) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006480 /* enable VLAN tag insert/strip */
6481 ctrl = rd32(E1000_CTRL);
6482 ctrl |= E1000_CTRL_VME;
6483 wr32(E1000_CTRL, ctrl);
6484
Alexander Duyck51466232009-10-27 23:47:35 +00006485 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006486 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006487 rctl &= ~E1000_RCTL_CFIEN;
6488 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006489 } else {
6490 /* disable VLAN tag insert/strip */
6491 ctrl = rd32(E1000_CTRL);
6492 ctrl &= ~E1000_CTRL_VME;
6493 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006494 }
6495
Alexander Duycke1739522009-02-19 20:39:44 -08006496 igb_rlpml_set(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006497}
6498
6499static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6500{
6501 struct igb_adapter *adapter = netdev_priv(netdev);
6502 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006503 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006504
Alexander Duyck51466232009-10-27 23:47:35 +00006505 /* attempt to add filter to vlvf array */
6506 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006507
Alexander Duyck51466232009-10-27 23:47:35 +00006508 /* add the filter since PF can receive vlans w/o entry in vlvf */
6509 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006510
6511 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006512}
6513
6514static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6515{
6516 struct igb_adapter *adapter = netdev_priv(netdev);
6517 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006518 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006519 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006520
Alexander Duyck51466232009-10-27 23:47:35 +00006521 /* remove vlan from VLVF table array */
6522 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006523
Alexander Duyck51466232009-10-27 23:47:35 +00006524 /* if vid was not present in VLVF just remove it from table */
6525 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006526 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006527
6528 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006529}
6530
6531static void igb_restore_vlan(struct igb_adapter *adapter)
6532{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006533 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006534
Alexander Duyck5faf0302011-08-26 07:46:08 +00006535 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6536
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006537 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6538 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006539}
6540
David Decotigny14ad2512011-04-27 18:32:43 +00006541int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006542{
Alexander Duyck090b1792009-10-27 23:51:55 +00006543 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006544 struct e1000_mac_info *mac = &adapter->hw.mac;
6545
6546 mac->autoneg = 0;
6547
David Decotigny14ad2512011-04-27 18:32:43 +00006548 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6549 * for the switch() below to work */
6550 if ((spd & 1) || (dplx & ~1))
6551 goto err_inval;
6552
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006553 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6554 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006555 spd != SPEED_1000 &&
6556 dplx != DUPLEX_FULL)
6557 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006558
David Decotigny14ad2512011-04-27 18:32:43 +00006559 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006560 case SPEED_10 + DUPLEX_HALF:
6561 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6562 break;
6563 case SPEED_10 + DUPLEX_FULL:
6564 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6565 break;
6566 case SPEED_100 + DUPLEX_HALF:
6567 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6568 break;
6569 case SPEED_100 + DUPLEX_FULL:
6570 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6571 break;
6572 case SPEED_1000 + DUPLEX_FULL:
6573 mac->autoneg = 1;
6574 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6575 break;
6576 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6577 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006578 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006579 }
6580 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006581
6582err_inval:
6583 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6584 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006585}
6586
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006587static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006588{
6589 struct net_device *netdev = pci_get_drvdata(pdev);
6590 struct igb_adapter *adapter = netdev_priv(netdev);
6591 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006592 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006593 u32 wufc = adapter->wol;
6594#ifdef CONFIG_PM
6595 int retval = 0;
6596#endif
6597
6598 netif_device_detach(netdev);
6599
Alexander Duycka88f10e2008-07-08 15:13:38 -07006600 if (netif_running(netdev))
6601 igb_close(netdev);
6602
Alexander Duyck047e0032009-10-27 15:49:27 +00006603 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006604
6605#ifdef CONFIG_PM
6606 retval = pci_save_state(pdev);
6607 if (retval)
6608 return retval;
6609#endif
6610
6611 status = rd32(E1000_STATUS);
6612 if (status & E1000_STATUS_LU)
6613 wufc &= ~E1000_WUFC_LNKC;
6614
6615 if (wufc) {
6616 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006617 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006618
6619 /* turn on all-multi mode if wake on multicast is enabled */
6620 if (wufc & E1000_WUFC_MC) {
6621 rctl = rd32(E1000_RCTL);
6622 rctl |= E1000_RCTL_MPE;
6623 wr32(E1000_RCTL, rctl);
6624 }
6625
6626 ctrl = rd32(E1000_CTRL);
6627 /* advertise wake from D3Cold */
6628 #define E1000_CTRL_ADVD3WUC 0x00100000
6629 /* phy power management enable */
6630 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6631 ctrl |= E1000_CTRL_ADVD3WUC;
6632 wr32(E1000_CTRL, ctrl);
6633
Auke Kok9d5c8242008-01-24 02:22:38 -08006634 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006635 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006636
6637 wr32(E1000_WUC, E1000_WUC_PME_EN);
6638 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006639 } else {
6640 wr32(E1000_WUC, 0);
6641 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006642 }
6643
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006644 *enable_wake = wufc || adapter->en_mng_pt;
6645 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006646 igb_power_down_link(adapter);
6647 else
6648 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006649
6650 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6651 * would have already happened in close and is redundant. */
6652 igb_release_hw_control(adapter);
6653
6654 pci_disable_device(pdev);
6655
Auke Kok9d5c8242008-01-24 02:22:38 -08006656 return 0;
6657}
6658
6659#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006660static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6661{
6662 int retval;
6663 bool wake;
6664
6665 retval = __igb_shutdown(pdev, &wake);
6666 if (retval)
6667 return retval;
6668
6669 if (wake) {
6670 pci_prepare_to_sleep(pdev);
6671 } else {
6672 pci_wake_from_d3(pdev, false);
6673 pci_set_power_state(pdev, PCI_D3hot);
6674 }
6675
6676 return 0;
6677}
6678
Auke Kok9d5c8242008-01-24 02:22:38 -08006679static int igb_resume(struct pci_dev *pdev)
6680{
6681 struct net_device *netdev = pci_get_drvdata(pdev);
6682 struct igb_adapter *adapter = netdev_priv(netdev);
6683 struct e1000_hw *hw = &adapter->hw;
6684 u32 err;
6685
6686 pci_set_power_state(pdev, PCI_D0);
6687 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006688 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006689
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006690 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006691 if (err) {
6692 dev_err(&pdev->dev,
6693 "igb: Cannot enable PCI device from suspend\n");
6694 return err;
6695 }
6696 pci_set_master(pdev);
6697
6698 pci_enable_wake(pdev, PCI_D3hot, 0);
6699 pci_enable_wake(pdev, PCI_D3cold, 0);
6700
Alexander Duyck047e0032009-10-27 15:49:27 +00006701 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006702 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6703 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006704 }
6705
Auke Kok9d5c8242008-01-24 02:22:38 -08006706 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006707
6708 /* let the f/w know that the h/w is now under the control of the
6709 * driver. */
6710 igb_get_hw_control(adapter);
6711
Auke Kok9d5c8242008-01-24 02:22:38 -08006712 wr32(E1000_WUS, ~0);
6713
Alexander Duycka88f10e2008-07-08 15:13:38 -07006714 if (netif_running(netdev)) {
6715 err = igb_open(netdev);
6716 if (err)
6717 return err;
6718 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006719
6720 netif_device_attach(netdev);
6721
Auke Kok9d5c8242008-01-24 02:22:38 -08006722 return 0;
6723}
6724#endif
6725
6726static void igb_shutdown(struct pci_dev *pdev)
6727{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006728 bool wake;
6729
6730 __igb_shutdown(pdev, &wake);
6731
6732 if (system_state == SYSTEM_POWER_OFF) {
6733 pci_wake_from_d3(pdev, wake);
6734 pci_set_power_state(pdev, PCI_D3hot);
6735 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006736}
6737
6738#ifdef CONFIG_NET_POLL_CONTROLLER
6739/*
6740 * Polling 'interrupt' - used by things like netconsole to send skbs
6741 * without having to re-enable interrupts. It's not called while
6742 * the interrupt routine is executing.
6743 */
6744static void igb_netpoll(struct net_device *netdev)
6745{
6746 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006747 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006748 struct igb_q_vector *q_vector;
Auke Kok9d5c8242008-01-24 02:22:38 -08006749 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006750
Alexander Duyck047e0032009-10-27 15:49:27 +00006751 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck0d1ae7f2011-08-26 07:46:34 +00006752 q_vector = adapter->q_vector[i];
6753 if (adapter->msix_entries)
6754 wr32(E1000_EIMC, q_vector->eims_value);
6755 else
6756 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006757 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006758 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006759}
6760#endif /* CONFIG_NET_POLL_CONTROLLER */
6761
6762/**
6763 * igb_io_error_detected - called when PCI error is detected
6764 * @pdev: Pointer to PCI device
6765 * @state: The current pci connection state
6766 *
6767 * This function is called after a PCI bus error affecting
6768 * this device has been detected.
6769 */
6770static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6771 pci_channel_state_t state)
6772{
6773 struct net_device *netdev = pci_get_drvdata(pdev);
6774 struct igb_adapter *adapter = netdev_priv(netdev);
6775
6776 netif_device_detach(netdev);
6777
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006778 if (state == pci_channel_io_perm_failure)
6779 return PCI_ERS_RESULT_DISCONNECT;
6780
Auke Kok9d5c8242008-01-24 02:22:38 -08006781 if (netif_running(netdev))
6782 igb_down(adapter);
6783 pci_disable_device(pdev);
6784
6785 /* Request a slot slot reset. */
6786 return PCI_ERS_RESULT_NEED_RESET;
6787}
6788
6789/**
6790 * igb_io_slot_reset - called after the pci bus has been reset.
6791 * @pdev: Pointer to PCI device
6792 *
6793 * Restart the card from scratch, as if from a cold-boot. Implementation
6794 * resembles the first-half of the igb_resume routine.
6795 */
6796static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6797{
6798 struct net_device *netdev = pci_get_drvdata(pdev);
6799 struct igb_adapter *adapter = netdev_priv(netdev);
6800 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006801 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006802 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006803
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006804 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006805 dev_err(&pdev->dev,
6806 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006807 result = PCI_ERS_RESULT_DISCONNECT;
6808 } else {
6809 pci_set_master(pdev);
6810 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006811 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006812
6813 pci_enable_wake(pdev, PCI_D3hot, 0);
6814 pci_enable_wake(pdev, PCI_D3cold, 0);
6815
6816 igb_reset(adapter);
6817 wr32(E1000_WUS, ~0);
6818 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006819 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006820
Jeff Kirsherea943d42008-12-11 20:34:19 -08006821 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6822 if (err) {
6823 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6824 "failed 0x%0x\n", err);
6825 /* non-fatal, continue */
6826 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006827
Alexander Duyck40a914f2008-11-27 00:24:37 -08006828 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006829}
6830
6831/**
6832 * igb_io_resume - called when traffic can start flowing again.
6833 * @pdev: Pointer to PCI device
6834 *
6835 * This callback is called when the error recovery driver tells us that
6836 * its OK to resume normal operation. Implementation resembles the
6837 * second-half of the igb_resume routine.
6838 */
6839static void igb_io_resume(struct pci_dev *pdev)
6840{
6841 struct net_device *netdev = pci_get_drvdata(pdev);
6842 struct igb_adapter *adapter = netdev_priv(netdev);
6843
Auke Kok9d5c8242008-01-24 02:22:38 -08006844 if (netif_running(netdev)) {
6845 if (igb_up(adapter)) {
6846 dev_err(&pdev->dev, "igb_up failed after reset\n");
6847 return;
6848 }
6849 }
6850
6851 netif_device_attach(netdev);
6852
6853 /* let the f/w know that the h/w is now under the control of the
6854 * driver. */
6855 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006856}
6857
Alexander Duyck26ad9172009-10-05 06:32:49 +00006858static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6859 u8 qsel)
6860{
6861 u32 rar_low, rar_high;
6862 struct e1000_hw *hw = &adapter->hw;
6863
6864 /* HW expects these in little endian so we reverse the byte order
6865 * from network order (big endian) to little endian
6866 */
6867 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6868 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6869 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6870
6871 /* Indicate to hardware the Address is Valid. */
6872 rar_high |= E1000_RAH_AV;
6873
6874 if (hw->mac.type == e1000_82575)
6875 rar_high |= E1000_RAH_POOL_1 * qsel;
6876 else
6877 rar_high |= E1000_RAH_POOL_1 << qsel;
6878
6879 wr32(E1000_RAL(index), rar_low);
6880 wrfl();
6881 wr32(E1000_RAH(index), rar_high);
6882 wrfl();
6883}
6884
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006885static int igb_set_vf_mac(struct igb_adapter *adapter,
6886 int vf, unsigned char *mac_addr)
6887{
6888 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006889 /* VF MAC addresses start at end of receive addresses and moves
6890 * torwards the first, as a result a collision should not be possible */
6891 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006892
Alexander Duyck37680112009-02-19 20:40:30 -08006893 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006894
Alexander Duyck26ad9172009-10-05 06:32:49 +00006895 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006896
6897 return 0;
6898}
6899
Williams, Mitch A8151d292010-02-10 01:44:24 +00006900static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6901{
6902 struct igb_adapter *adapter = netdev_priv(netdev);
6903 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6904 return -EINVAL;
6905 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6906 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6907 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6908 " change effective.");
6909 if (test_bit(__IGB_DOWN, &adapter->state)) {
6910 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6911 " but the PF device is not up.\n");
6912 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6913 " attempting to use the VF device.\n");
6914 }
6915 return igb_set_vf_mac(adapter, vf, mac);
6916}
6917
Lior Levy17dc5662011-02-08 02:28:46 +00006918static int igb_link_mbps(int internal_link_speed)
6919{
6920 switch (internal_link_speed) {
6921 case SPEED_100:
6922 return 100;
6923 case SPEED_1000:
6924 return 1000;
6925 default:
6926 return 0;
6927 }
6928}
6929
6930static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6931 int link_speed)
6932{
6933 int rf_dec, rf_int;
6934 u32 bcnrc_val;
6935
6936 if (tx_rate != 0) {
6937 /* Calculate the rate factor values to set */
6938 rf_int = link_speed / tx_rate;
6939 rf_dec = (link_speed - (rf_int * tx_rate));
6940 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6941
6942 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6943 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6944 E1000_RTTBCNRC_RF_INT_MASK);
6945 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6946 } else {
6947 bcnrc_val = 0;
6948 }
6949
6950 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6951 wr32(E1000_RTTBCNRC, bcnrc_val);
6952}
6953
6954static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6955{
6956 int actual_link_speed, i;
6957 bool reset_rate = false;
6958
6959 /* VF TX rate limit was not set or not supported */
6960 if ((adapter->vf_rate_link_speed == 0) ||
6961 (adapter->hw.mac.type != e1000_82576))
6962 return;
6963
6964 actual_link_speed = igb_link_mbps(adapter->link_speed);
6965 if (actual_link_speed != adapter->vf_rate_link_speed) {
6966 reset_rate = true;
6967 adapter->vf_rate_link_speed = 0;
6968 dev_info(&adapter->pdev->dev,
6969 "Link speed has been changed. VF Transmit "
6970 "rate is disabled\n");
6971 }
6972
6973 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6974 if (reset_rate)
6975 adapter->vf_data[i].tx_rate = 0;
6976
6977 igb_set_vf_rate_limit(&adapter->hw, i,
6978 adapter->vf_data[i].tx_rate,
6979 actual_link_speed);
6980 }
6981}
6982
Williams, Mitch A8151d292010-02-10 01:44:24 +00006983static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6984{
Lior Levy17dc5662011-02-08 02:28:46 +00006985 struct igb_adapter *adapter = netdev_priv(netdev);
6986 struct e1000_hw *hw = &adapter->hw;
6987 int actual_link_speed;
6988
6989 if (hw->mac.type != e1000_82576)
6990 return -EOPNOTSUPP;
6991
6992 actual_link_speed = igb_link_mbps(adapter->link_speed);
6993 if ((vf >= adapter->vfs_allocated_count) ||
6994 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6995 (tx_rate < 0) || (tx_rate > actual_link_speed))
6996 return -EINVAL;
6997
6998 adapter->vf_rate_link_speed = actual_link_speed;
6999 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7000 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7001
7002 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007003}
7004
7005static int igb_ndo_get_vf_config(struct net_device *netdev,
7006 int vf, struct ifla_vf_info *ivi)
7007{
7008 struct igb_adapter *adapter = netdev_priv(netdev);
7009 if (vf >= adapter->vfs_allocated_count)
7010 return -EINVAL;
7011 ivi->vf = vf;
7012 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00007013 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00007014 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7015 ivi->qos = adapter->vf_data[vf].pf_qos;
7016 return 0;
7017}
7018
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007019static void igb_vmm_control(struct igb_adapter *adapter)
7020{
7021 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00007022 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007023
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007024 switch (hw->mac.type) {
7025 case e1000_82575:
7026 default:
7027 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007028 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007029 case e1000_82576:
7030 /* notify HW that the MAC is adding vlan tags */
7031 reg = rd32(E1000_DTXCTL);
7032 reg |= E1000_DTXCTL_VLAN_ADDED;
7033 wr32(E1000_DTXCTL, reg);
7034 case e1000_82580:
7035 /* enable replication vlan tag stripping */
7036 reg = rd32(E1000_RPLOLR);
7037 reg |= E1000_RPLOLR_STRVLAN;
7038 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00007039 case e1000_i350:
7040 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00007041 break;
7042 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00007043
Alexander Duyckd4960302009-10-27 15:53:45 +00007044 if (adapter->vfs_allocated_count) {
7045 igb_vmdq_set_loopback_pf(hw, true);
7046 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00007047 igb_vmdq_set_anti_spoofing_pf(hw, true,
7048 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00007049 } else {
7050 igb_vmdq_set_loopback_pf(hw, false);
7051 igb_vmdq_set_replication_pf(hw, false);
7052 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08007053}
7054
Carolyn Wybornyb6e0c412011-10-13 17:29:59 +00007055static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7056{
7057 struct e1000_hw *hw = &adapter->hw;
7058 u32 dmac_thr;
7059 u16 hwm;
7060
7061 if (hw->mac.type > e1000_82580) {
7062 if (adapter->flags & IGB_FLAG_DMAC) {
7063 u32 reg;
7064
7065 /* force threshold to 0. */
7066 wr32(E1000_DMCTXTH, 0);
7067
7068 /*
7069 * DMA Coalescing high water mark needs to be higher
7070 * than the RX threshold. set hwm to PBA - 2 * max
7071 * frame size
7072 */
7073 hwm = pba - (2 * adapter->max_frame_size);
7074 reg = rd32(E1000_DMACR);
7075 reg &= ~E1000_DMACR_DMACTHR_MASK;
7076 dmac_thr = pba - 4;
7077
7078 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7079 & E1000_DMACR_DMACTHR_MASK);
7080
7081 /* transition to L0x or L1 if available..*/
7082 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7083
7084 /* watchdog timer= +-1000 usec in 32usec intervals */
7085 reg |= (1000 >> 5);
7086 wr32(E1000_DMACR, reg);
7087
7088 /*
7089 * no lower threshold to disable
7090 * coalescing(smart fifb)-UTRESH=0
7091 */
7092 wr32(E1000_DMCRTRH, 0);
7093 wr32(E1000_FCRTC, hwm);
7094
7095 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7096
7097 wr32(E1000_DMCTLX, reg);
7098
7099 /*
7100 * free space in tx packet buffer to wake from
7101 * DMA coal
7102 */
7103 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7104 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7105
7106 /*
7107 * make low power state decision controlled
7108 * by DMA coal
7109 */
7110 reg = rd32(E1000_PCIEMISC);
7111 reg &= ~E1000_PCIEMISC_LX_DECISION;
7112 wr32(E1000_PCIEMISC, reg);
7113 } /* endif adapter->dmac is not disabled */
7114 } else if (hw->mac.type == e1000_82580) {
7115 u32 reg = rd32(E1000_PCIEMISC);
7116 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7117 wr32(E1000_DMACR, 0);
7118 }
7119}
7120
Auke Kok9d5c8242008-01-24 02:22:38 -08007121/* igb_main.c */