blob: 2bdc78368b64af4c28d43d6cb82774dc347ee42a [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
48#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080049#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040050#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070051#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070052#include <linux/dca.h>
53#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080054#include "igb.h"
55
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080056#define MAJ 3
57#define MIN 0
58#define BUILD 6
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000060__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080061char igb_driver_name[] = "igb";
62char igb_driver_version[] = DRV_VERSION;
63static const char igb_driver_string[] =
64 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000065static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080066
Auke Kok9d5c8242008-01-24 02:22:38 -080067static const struct e1000_info *igb_info_tbl[] = {
68 [board_82575] = &e1000_82575_info,
69};
70
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000071static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
97 /* required last entry */
98 {0, }
99};
100
101MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
102
103void igb_reset(struct igb_adapter *);
104static int igb_setup_all_tx_resources(struct igb_adapter *);
105static int igb_setup_all_rx_resources(struct igb_adapter *);
106static void igb_free_all_tx_resources(struct igb_adapter *);
107static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000108static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800109static int igb_probe(struct pci_dev *, const struct pci_device_id *);
110static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000111static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_sw_init(struct igb_adapter *);
113static int igb_open(struct net_device *);
114static int igb_close(struct net_device *);
115static void igb_configure_tx(struct igb_adapter *);
116static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800117static void igb_clean_all_tx_rings(struct igb_adapter *);
118static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700119static void igb_clean_tx_ring(struct igb_ring *);
120static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000121static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static void igb_update_phy_info(unsigned long);
123static void igb_watchdog(unsigned long);
124static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000125static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000126static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
127 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static int igb_change_mtu(struct net_device *, int);
129static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000130static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static irqreturn_t igb_intr(int irq, void *);
132static irqreturn_t igb_intr_msi(int irq, void *);
133static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000134static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700135#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000136static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700137static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700139static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000140static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000141static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800142static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
143static void igb_tx_timeout(struct net_device *);
144static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000145static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800146static void igb_vlan_rx_add_vid(struct net_device *, u16);
147static void igb_vlan_rx_kill_vid(struct net_device *, u16);
148static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000149static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800150static void igb_ping_all_vfs(struct igb_adapter *);
151static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800152static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000153static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800154static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000155static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
156static int igb_ndo_set_vf_vlan(struct net_device *netdev,
157 int vf, u16 vlan, u8 qos);
158static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
159static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
160 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000161static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800162
Auke Kok9d5c8242008-01-24 02:22:38 -0800163#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000164static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800165static int igb_resume(struct pci_dev *);
166#endif
167static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700168#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700169static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
170static struct notifier_block dca_notifier = {
171 .notifier_call = igb_notify_dca,
172 .next = NULL,
173 .priority = 0
174};
175#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800176#ifdef CONFIG_NET_POLL_CONTROLLER
177/* for netdump / net console */
178static void igb_netpoll(struct net_device *);
179#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800180#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000181static unsigned int max_vfs = 0;
182module_param(max_vfs, uint, 0);
183MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
184 "per physical function");
185#endif /* CONFIG_PCI_IOV */
186
Auke Kok9d5c8242008-01-24 02:22:38 -0800187static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
188 pci_channel_state_t);
189static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
190static void igb_io_resume(struct pci_dev *);
191
192static struct pci_error_handlers igb_err_handler = {
193 .error_detected = igb_io_error_detected,
194 .slot_reset = igb_io_slot_reset,
195 .resume = igb_io_resume,
196};
197
198
199static struct pci_driver igb_driver = {
200 .name = igb_driver_name,
201 .id_table = igb_pci_tbl,
202 .probe = igb_probe,
203 .remove = __devexit_p(igb_remove),
204#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300205 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800206 .suspend = igb_suspend,
207 .resume = igb_resume,
208#endif
209 .shutdown = igb_shutdown,
210 .err_handler = &igb_err_handler
211};
212
213MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
214MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
215MODULE_LICENSE("GPL");
216MODULE_VERSION(DRV_VERSION);
217
Taku Izumic97ec422010-04-27 14:39:30 +0000218struct igb_reg_info {
219 u32 ofs;
220 char *name;
221};
222
223static const struct igb_reg_info igb_reg_info_tbl[] = {
224
225 /* General Registers */
226 {E1000_CTRL, "CTRL"},
227 {E1000_STATUS, "STATUS"},
228 {E1000_CTRL_EXT, "CTRL_EXT"},
229
230 /* Interrupt Registers */
231 {E1000_ICR, "ICR"},
232
233 /* RX Registers */
234 {E1000_RCTL, "RCTL"},
235 {E1000_RDLEN(0), "RDLEN"},
236 {E1000_RDH(0), "RDH"},
237 {E1000_RDT(0), "RDT"},
238 {E1000_RXDCTL(0), "RXDCTL"},
239 {E1000_RDBAL(0), "RDBAL"},
240 {E1000_RDBAH(0), "RDBAH"},
241
242 /* TX Registers */
243 {E1000_TCTL, "TCTL"},
244 {E1000_TDBAL(0), "TDBAL"},
245 {E1000_TDBAH(0), "TDBAH"},
246 {E1000_TDLEN(0), "TDLEN"},
247 {E1000_TDH(0), "TDH"},
248 {E1000_TDT(0), "TDT"},
249 {E1000_TXDCTL(0), "TXDCTL"},
250 {E1000_TDFH, "TDFH"},
251 {E1000_TDFT, "TDFT"},
252 {E1000_TDFHS, "TDFHS"},
253 {E1000_TDFPC, "TDFPC"},
254
255 /* List Terminator */
256 {}
257};
258
259/*
260 * igb_regdump - register printout routine
261 */
262static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
263{
264 int n = 0;
265 char rname[16];
266 u32 regs[8];
267
268 switch (reginfo->ofs) {
269 case E1000_RDLEN(0):
270 for (n = 0; n < 4; n++)
271 regs[n] = rd32(E1000_RDLEN(n));
272 break;
273 case E1000_RDH(0):
274 for (n = 0; n < 4; n++)
275 regs[n] = rd32(E1000_RDH(n));
276 break;
277 case E1000_RDT(0):
278 for (n = 0; n < 4; n++)
279 regs[n] = rd32(E1000_RDT(n));
280 break;
281 case E1000_RXDCTL(0):
282 for (n = 0; n < 4; n++)
283 regs[n] = rd32(E1000_RXDCTL(n));
284 break;
285 case E1000_RDBAL(0):
286 for (n = 0; n < 4; n++)
287 regs[n] = rd32(E1000_RDBAL(n));
288 break;
289 case E1000_RDBAH(0):
290 for (n = 0; n < 4; n++)
291 regs[n] = rd32(E1000_RDBAH(n));
292 break;
293 case E1000_TDBAL(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RDBAL(n));
296 break;
297 case E1000_TDBAH(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_TDBAH(n));
300 break;
301 case E1000_TDLEN(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_TDLEN(n));
304 break;
305 case E1000_TDH(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_TDH(n));
308 break;
309 case E1000_TDT(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_TDT(n));
312 break;
313 case E1000_TXDCTL(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_TXDCTL(n));
316 break;
317 default:
318 printk(KERN_INFO "%-15s %08x\n",
319 reginfo->name, rd32(reginfo->ofs));
320 return;
321 }
322
323 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
324 printk(KERN_INFO "%-15s ", rname);
325 for (n = 0; n < 4; n++)
326 printk(KERN_CONT "%08x ", regs[n]);
327 printk(KERN_CONT "\n");
328}
329
330/*
331 * igb_dump - Print registers, tx-rings and rx-rings
332 */
333static void igb_dump(struct igb_adapter *adapter)
334{
335 struct net_device *netdev = adapter->netdev;
336 struct e1000_hw *hw = &adapter->hw;
337 struct igb_reg_info *reginfo;
338 int n = 0;
339 struct igb_ring *tx_ring;
340 union e1000_adv_tx_desc *tx_desc;
341 struct my_u0 { u64 a; u64 b; } *u0;
Taku Izumic97ec422010-04-27 14:39:30 +0000342 struct igb_ring *rx_ring;
343 union e1000_adv_rx_desc *rx_desc;
344 u32 staterr;
345 int i = 0;
346
347 if (!netif_msg_hw(adapter))
348 return;
349
350 /* Print netdevice Info */
351 if (netdev) {
352 dev_info(&adapter->pdev->dev, "Net device Info\n");
353 printk(KERN_INFO "Device Name state "
354 "trans_start last_rx\n");
355 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
356 netdev->name,
357 netdev->state,
358 netdev->trans_start,
359 netdev->last_rx);
360 }
361
362 /* Print Registers */
363 dev_info(&adapter->pdev->dev, "Register Dump\n");
364 printk(KERN_INFO " Register Name Value\n");
365 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
366 reginfo->name; reginfo++) {
367 igb_regdump(hw, reginfo);
368 }
369
370 /* Print TX Ring Summary */
371 if (!netdev || !netif_running(netdev))
372 goto exit;
373
374 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
375 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
376 " leng ntw timestamp\n");
377 for (n = 0; n < adapter->num_tx_queues; n++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000378 struct igb_tx_buffer *buffer_info;
Taku Izumic97ec422010-04-27 14:39:30 +0000379 tx_ring = adapter->tx_ring[n];
Alexander Duyck06034642011-08-26 07:44:22 +0000380 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Taku Izumic97ec422010-04-27 14:39:30 +0000381 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
382 n, tx_ring->next_to_use, tx_ring->next_to_clean,
383 (u64)buffer_info->dma,
384 buffer_info->length,
385 buffer_info->next_to_watch,
386 (u64)buffer_info->time_stamp);
387 }
388
389 /* Print TX Rings */
390 if (!netif_msg_tx_done(adapter))
391 goto rx_ring_summary;
392
393 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
394
395 /* Transmit Descriptor Formats
396 *
397 * Advanced Transmit Descriptor
398 * +--------------------------------------------------------------+
399 * 0 | Buffer Address [63:0] |
400 * +--------------------------------------------------------------+
401 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
402 * +--------------------------------------------------------------+
403 * 63 46 45 40 39 38 36 35 32 31 24 15 0
404 */
405
406 for (n = 0; n < adapter->num_tx_queues; n++) {
407 tx_ring = adapter->tx_ring[n];
408 printk(KERN_INFO "------------------------------------\n");
409 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "T [desc] [address 63:0 ] "
412 "[PlPOCIStDDM Ln] [bi->dma ] "
413 "leng ntw timestamp bi->skb\n");
414
415 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000416 struct igb_tx_buffer *buffer_info;
Alexander Duyck601369062011-08-26 07:44:05 +0000417 tx_desc = IGB_TX_DESC(tx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +0000418 buffer_info = &tx_ring->tx_buffer_info[i];
Taku Izumic97ec422010-04-27 14:39:30 +0000419 u0 = (struct my_u0 *)tx_desc;
420 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
421 " %04X %3X %016llX %p", i,
422 le64_to_cpu(u0->a),
423 le64_to_cpu(u0->b),
424 (u64)buffer_info->dma,
425 buffer_info->length,
426 buffer_info->next_to_watch,
427 (u64)buffer_info->time_stamp,
428 buffer_info->skb);
429 if (i == tx_ring->next_to_use &&
430 i == tx_ring->next_to_clean)
431 printk(KERN_CONT " NTC/U\n");
432 else if (i == tx_ring->next_to_use)
433 printk(KERN_CONT " NTU\n");
434 else if (i == tx_ring->next_to_clean)
435 printk(KERN_CONT " NTC\n");
436 else
437 printk(KERN_CONT "\n");
438
439 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
440 print_hex_dump(KERN_INFO, "",
441 DUMP_PREFIX_ADDRESS,
442 16, 1, phys_to_virt(buffer_info->dma),
443 buffer_info->length, true);
444 }
445 }
446
447 /* Print RX Rings Summary */
448rx_ring_summary:
449 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
450 printk(KERN_INFO "Queue [NTU] [NTC]\n");
451 for (n = 0; n < adapter->num_rx_queues; n++) {
452 rx_ring = adapter->rx_ring[n];
453 printk(KERN_INFO " %5d %5X %5X\n", n,
454 rx_ring->next_to_use, rx_ring->next_to_clean);
455 }
456
457 /* Print RX Rings */
458 if (!netif_msg_rx_status(adapter))
459 goto exit;
460
461 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
462
463 /* Advanced Receive Descriptor (Read) Format
464 * 63 1 0
465 * +-----------------------------------------------------+
466 * 0 | Packet Buffer Address [63:1] |A0/NSE|
467 * +----------------------------------------------+------+
468 * 8 | Header Buffer Address [63:1] | DD |
469 * +-----------------------------------------------------+
470 *
471 *
472 * Advanced Receive Descriptor (Write-Back) Format
473 *
474 * 63 48 47 32 31 30 21 20 17 16 4 3 0
475 * +------------------------------------------------------+
476 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
477 * | Checksum Ident | | | | Type | Type |
478 * +------------------------------------------------------+
479 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
480 * +------------------------------------------------------+
481 * 63 48 47 32 31 20 19 0
482 */
483
484 for (n = 0; n < adapter->num_rx_queues; n++) {
485 rx_ring = adapter->rx_ring[n];
486 printk(KERN_INFO "------------------------------------\n");
487 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
488 printk(KERN_INFO "------------------------------------\n");
489 printk(KERN_INFO "R [desc] [ PktBuf A0] "
490 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
491 "<-- Adv Rx Read format\n");
492 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
493 "[vl er S cks ln] ---------------- [bi->skb] "
494 "<-- Adv Rx Write-Back format\n");
495
496 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +0000497 struct igb_rx_buffer *buffer_info;
498 buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000499 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000500 u0 = (struct my_u0 *)rx_desc;
501 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
502 if (staterr & E1000_RXD_STAT_DD) {
503 /* Descriptor Done */
504 printk(KERN_INFO "RWB[0x%03X] %016llX "
505 "%016llX ---------------- %p", i,
506 le64_to_cpu(u0->a),
507 le64_to_cpu(u0->b),
508 buffer_info->skb);
509 } else {
510 printk(KERN_INFO "R [0x%03X] %016llX "
511 "%016llX %016llX %p", i,
512 le64_to_cpu(u0->a),
513 le64_to_cpu(u0->b),
514 (u64)buffer_info->dma,
515 buffer_info->skb);
516
517 if (netif_msg_pktdata(adapter)) {
518 print_hex_dump(KERN_INFO, "",
519 DUMP_PREFIX_ADDRESS,
520 16, 1,
521 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000522 IGB_RX_HDR_LEN, true);
523 print_hex_dump(KERN_INFO, "",
524 DUMP_PREFIX_ADDRESS,
525 16, 1,
526 phys_to_virt(
527 buffer_info->page_dma +
528 buffer_info->page_offset),
529 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000530 }
531 }
532
533 if (i == rx_ring->next_to_use)
534 printk(KERN_CONT " NTU\n");
535 else if (i == rx_ring->next_to_clean)
536 printk(KERN_CONT " NTC\n");
537 else
538 printk(KERN_CONT "\n");
539
540 }
541 }
542
543exit:
544 return;
545}
546
547
Patrick Ohly38c845c2009-02-12 05:03:41 +0000548/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000549 * igb_read_clock - read raw cycle counter (to be used by time counter)
550 */
551static cycle_t igb_read_clock(const struct cyclecounter *tc)
552{
553 struct igb_adapter *adapter =
554 container_of(tc, struct igb_adapter, cycles);
555 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000556 u64 stamp = 0;
557 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000558
Alexander Duyck55cac242009-11-19 12:42:21 +0000559 /*
560 * The timestamp latches on lowest register read. For the 82580
561 * the lowest register is SYSTIMR instead of SYSTIML. However we never
562 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
563 */
564 if (hw->mac.type == e1000_82580) {
565 stamp = rd32(E1000_SYSTIMR) >> 8;
566 shift = IGB_82580_TSYNC_SHIFT;
567 }
568
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000569 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
570 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000571 return stamp;
572}
573
Auke Kok9d5c8242008-01-24 02:22:38 -0800574/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000575 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800576 * used by hardware layer to print debugging information
577 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000578struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800579{
580 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000581 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800582}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000583
584/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800585 * igb_init_module - Driver Registration Routine
586 *
587 * igb_init_module is the first routine called when the driver is
588 * loaded. All it does is register with the PCI subsystem.
589 **/
590static int __init igb_init_module(void)
591{
592 int ret;
593 printk(KERN_INFO "%s - version %s\n",
594 igb_driver_string, igb_driver_version);
595
596 printk(KERN_INFO "%s\n", igb_copyright);
597
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700598#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700599 dca_register_notify(&dca_notifier);
600#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800601 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800602 return ret;
603}
604
605module_init(igb_init_module);
606
607/**
608 * igb_exit_module - Driver Exit Cleanup Routine
609 *
610 * igb_exit_module is called just before the driver is removed
611 * from memory.
612 **/
613static void __exit igb_exit_module(void)
614{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700615#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700616 dca_unregister_notify(&dca_notifier);
617#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800618 pci_unregister_driver(&igb_driver);
619}
620
621module_exit(igb_exit_module);
622
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800623#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
624/**
625 * igb_cache_ring_register - Descriptor ring to register mapping
626 * @adapter: board private structure to initialize
627 *
628 * Once we know the feature-set enabled for the device, we'll cache
629 * the register offset the descriptor ring is assigned to.
630 **/
631static void igb_cache_ring_register(struct igb_adapter *adapter)
632{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000633 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000634 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800635
636 switch (adapter->hw.mac.type) {
637 case e1000_82576:
638 /* The queues are allocated for virtualization such that VF 0
639 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
640 * In order to avoid collision we start at the first free queue
641 * and continue consuming queues in the same sequence
642 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000643 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000644 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000645 adapter->rx_ring[i]->reg_idx = rbase_offset +
646 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000647 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800648 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000649 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000650 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800651 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000654 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000655 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800656 break;
657 }
658}
659
Alexander Duyck047e0032009-10-27 15:49:27 +0000660static void igb_free_queues(struct igb_adapter *adapter)
661{
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000663
Alexander Duyck3025a442010-02-17 01:02:39 +0000664 for (i = 0; i < adapter->num_tx_queues; i++) {
665 kfree(adapter->tx_ring[i]);
666 adapter->tx_ring[i] = NULL;
667 }
668 for (i = 0; i < adapter->num_rx_queues; i++) {
669 kfree(adapter->rx_ring[i]);
670 adapter->rx_ring[i] = NULL;
671 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000672 adapter->num_rx_queues = 0;
673 adapter->num_tx_queues = 0;
674}
675
Auke Kok9d5c8242008-01-24 02:22:38 -0800676/**
677 * igb_alloc_queues - Allocate memory for all rings
678 * @adapter: board private structure to initialize
679 *
680 * We allocate one ring per queue at run-time since we don't know the
681 * number of queues at compile-time.
682 **/
683static int igb_alloc_queues(struct igb_adapter *adapter)
684{
Alexander Duyck3025a442010-02-17 01:02:39 +0000685 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800686 int i;
687
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700688 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000689 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
690 if (!ring)
691 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800692 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700693 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000694 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000695 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000696 /* For 82575, context index must be unique per ring. */
697 if (adapter->hw.mac.type == e1000_82575)
698 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000699 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700700 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000701
Auke Kok9d5c8242008-01-24 02:22:38 -0800702 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000703 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
704 if (!ring)
705 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800706 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700707 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000708 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000709 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000710 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
711 /* set flag indicating ring supports SCTP checksum offload */
712 if (adapter->hw.mac.type >= e1000_82576)
713 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000714 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800715 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800716
717 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000718
Auke Kok9d5c8242008-01-24 02:22:38 -0800719 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800720
Alexander Duyck047e0032009-10-27 15:49:27 +0000721err:
722 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700723
Alexander Duyck047e0032009-10-27 15:49:27 +0000724 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700725}
726
Auke Kok9d5c8242008-01-24 02:22:38 -0800727#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000728static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800729{
730 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000731 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800732 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700733 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000734 int rx_queue = IGB_N0_QUEUE;
735 int tx_queue = IGB_N0_QUEUE;
736
737 if (q_vector->rx_ring)
738 rx_queue = q_vector->rx_ring->reg_idx;
739 if (q_vector->tx_ring)
740 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700741
742 switch (hw->mac.type) {
743 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800744 /* The 82575 assigns vectors using a bitmask, which matches the
745 bitmask for the EICR/EIMS/EIMC registers. To assign one
746 or more queues to a vector, we write the appropriate bits
747 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000748 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800749 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000750 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800751 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000752 if (!adapter->msix_entries && msix_vector == 0)
753 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800754 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000755 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700756 break;
757 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800758 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700759 Each queue has a single entry in the table to which we write
760 a vector number along with a "valid" bit. Sadly, the layout
761 of the table is somewhat counterintuitive. */
762 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700764 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000765 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800766 /* vector goes into low byte of register */
767 ivar = ivar & 0xFFFFFF00;
768 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000769 } else {
770 /* vector goes into third byte of register */
771 ivar = ivar & 0xFF00FFFF;
772 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700773 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700774 array_wr32(E1000_IVAR0, index, ivar);
775 }
776 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000777 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700778 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000779 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800780 /* vector goes into second byte of register */
781 ivar = ivar & 0xFFFF00FF;
782 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000783 } else {
784 /* vector goes into high byte of register */
785 ivar = ivar & 0x00FFFFFF;
786 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700787 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700788 array_wr32(E1000_IVAR0, index, ivar);
789 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000790 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700791 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000792 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000793 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +0000794 /* 82580 uses the same table-based approach as 82576 but has fewer
795 entries as a result we carry over for queues greater than 4. */
796 if (rx_queue > IGB_N0_QUEUE) {
797 index = (rx_queue >> 1);
798 ivar = array_rd32(E1000_IVAR0, index);
799 if (rx_queue & 0x1) {
800 /* vector goes into third byte of register */
801 ivar = ivar & 0xFF00FFFF;
802 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
803 } else {
804 /* vector goes into low byte of register */
805 ivar = ivar & 0xFFFFFF00;
806 ivar |= msix_vector | E1000_IVAR_VALID;
807 }
808 array_wr32(E1000_IVAR0, index, ivar);
809 }
810 if (tx_queue > IGB_N0_QUEUE) {
811 index = (tx_queue >> 1);
812 ivar = array_rd32(E1000_IVAR0, index);
813 if (tx_queue & 0x1) {
814 /* vector goes into high byte of register */
815 ivar = ivar & 0x00FFFFFF;
816 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
817 } else {
818 /* vector goes into second byte of register */
819 ivar = ivar & 0xFFFF00FF;
820 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
821 }
822 array_wr32(E1000_IVAR0, index, ivar);
823 }
824 q_vector->eims_value = 1 << msix_vector;
825 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700826 default:
827 BUG();
828 break;
829 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000830
831 /* add q_vector eims value to global eims_enable_mask */
832 adapter->eims_enable_mask |= q_vector->eims_value;
833
834 /* configure q_vector to set itr on first interrupt */
835 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800836}
837
838/**
839 * igb_configure_msix - Configure MSI-X hardware
840 *
841 * igb_configure_msix sets up the hardware to properly
842 * generate MSI-X interrupts.
843 **/
844static void igb_configure_msix(struct igb_adapter *adapter)
845{
846 u32 tmp;
847 int i, vector = 0;
848 struct e1000_hw *hw = &adapter->hw;
849
850 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800851
852 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700853 switch (hw->mac.type) {
854 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800855 tmp = rd32(E1000_CTRL_EXT);
856 /* enable MSI-X PBA support*/
857 tmp |= E1000_CTRL_EXT_PBA_CLR;
858
859 /* Auto-Mask interrupts upon ICR read. */
860 tmp |= E1000_CTRL_EXT_EIAME;
861 tmp |= E1000_CTRL_EXT_IRCA;
862
863 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000864
865 /* enable msix_other interrupt */
866 array_wr32(E1000_MSIXBM(0), vector++,
867 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700868 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800869
Alexander Duyck2d064c02008-07-08 15:10:12 -0700870 break;
871
872 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000873 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000874 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000875 /* Turn on MSI-X capability first, or our settings
876 * won't stick. And it will take days to debug. */
877 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
878 E1000_GPIE_PBA | E1000_GPIE_EIAME |
879 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700880
Alexander Duyck047e0032009-10-27 15:49:27 +0000881 /* enable msix_other interrupt */
882 adapter->eims_other = 1 << vector;
883 tmp = (vector++ | E1000_IVAR_VALID) << 8;
884
885 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700886 break;
887 default:
888 /* do nothing, since nothing else supports MSI-X */
889 break;
890 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000891
892 adapter->eims_enable_mask |= adapter->eims_other;
893
Alexander Duyck26b39272010-02-17 01:00:41 +0000894 for (i = 0; i < adapter->num_q_vectors; i++)
895 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000896
Auke Kok9d5c8242008-01-24 02:22:38 -0800897 wrfl();
898}
899
900/**
901 * igb_request_msix - Initialize MSI-X interrupts
902 *
903 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
904 * kernel.
905 **/
906static int igb_request_msix(struct igb_adapter *adapter)
907{
908 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000909 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800910 int i, err = 0, vector = 0;
911
Auke Kok9d5c8242008-01-24 02:22:38 -0800912 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800913 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800914 if (err)
915 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000916 vector++;
917
918 for (i = 0; i < adapter->num_q_vectors; i++) {
919 struct igb_q_vector *q_vector = adapter->q_vector[i];
920
921 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
922
923 if (q_vector->rx_ring && q_vector->tx_ring)
924 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
925 q_vector->rx_ring->queue_index);
926 else if (q_vector->tx_ring)
927 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
928 q_vector->tx_ring->queue_index);
929 else if (q_vector->rx_ring)
930 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
931 q_vector->rx_ring->queue_index);
932 else
933 sprintf(q_vector->name, "%s-unused", netdev->name);
934
935 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800936 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000937 q_vector);
938 if (err)
939 goto out;
940 vector++;
941 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800942
Auke Kok9d5c8242008-01-24 02:22:38 -0800943 igb_configure_msix(adapter);
944 return 0;
945out:
946 return err;
947}
948
949static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
950{
951 if (adapter->msix_entries) {
952 pci_disable_msix(adapter->pdev);
953 kfree(adapter->msix_entries);
954 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800956 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000957 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800958}
959
Alexander Duyck047e0032009-10-27 15:49:27 +0000960/**
961 * igb_free_q_vectors - Free memory allocated for interrupt vectors
962 * @adapter: board private structure to initialize
963 *
964 * This function frees the memory allocated to the q_vectors. In addition if
965 * NAPI is enabled it will delete any references to the NAPI struct prior
966 * to freeing the q_vector.
967 **/
968static void igb_free_q_vectors(struct igb_adapter *adapter)
969{
970 int v_idx;
971
972 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
973 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
974 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000975 if (!q_vector)
976 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000977 netif_napi_del(&q_vector->napi);
978 kfree(q_vector);
979 }
980 adapter->num_q_vectors = 0;
981}
982
983/**
984 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
985 *
986 * This function resets the device so that it has 0 rx queues, tx queues, and
987 * MSI-X interrupts allocated.
988 */
989static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
990{
991 igb_free_queues(adapter);
992 igb_free_q_vectors(adapter);
993 igb_reset_interrupt_capability(adapter);
994}
Auke Kok9d5c8242008-01-24 02:22:38 -0800995
996/**
997 * igb_set_interrupt_capability - set MSI or MSI-X if supported
998 *
999 * Attempt to configure interrupts using the best available
1000 * capabilities of the hardware and kernel.
1001 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001002static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001003{
1004 int err;
1005 int numvecs, i;
1006
Alexander Duyck83b71802009-02-06 23:15:45 +00001007 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001008 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001009 if (adapter->vfs_allocated_count)
1010 adapter->num_tx_queues = 1;
1011 else
1012 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001013
Alexander Duyck047e0032009-10-27 15:49:27 +00001014 /* start with one vector for every rx queue */
1015 numvecs = adapter->num_rx_queues;
1016
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001017 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001018 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1019 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001020
1021 /* store the number of vectors reserved for queues */
1022 adapter->num_q_vectors = numvecs;
1023
1024 /* add 1 vector for link status interrupts */
1025 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001026 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1027 GFP_KERNEL);
1028 if (!adapter->msix_entries)
1029 goto msi_only;
1030
1031 for (i = 0; i < numvecs; i++)
1032 adapter->msix_entries[i].entry = i;
1033
1034 err = pci_enable_msix(adapter->pdev,
1035 adapter->msix_entries,
1036 numvecs);
1037 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001038 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001039
1040 igb_reset_interrupt_capability(adapter);
1041
1042 /* If we can't do MSI-X, try MSI */
1043msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001044#ifdef CONFIG_PCI_IOV
1045 /* disable SR-IOV for non MSI-X configurations */
1046 if (adapter->vf_data) {
1047 struct e1000_hw *hw = &adapter->hw;
1048 /* disable iov and allow time for transactions to clear */
1049 pci_disable_sriov(adapter->pdev);
1050 msleep(500);
1051
1052 kfree(adapter->vf_data);
1053 adapter->vf_data = NULL;
1054 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001055 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001056 msleep(100);
1057 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1058 }
1059#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001060 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001061 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001062 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001063 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001064 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001065 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001066 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001067 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001068out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001069 /* Notify the stack of the (possibly) reduced queue counts. */
1070 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1071 return netif_set_real_num_rx_queues(adapter->netdev,
1072 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001073}
1074
1075/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001076 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1077 * @adapter: board private structure to initialize
1078 *
1079 * We allocate one q_vector per queue interrupt. If allocation fails we
1080 * return -ENOMEM.
1081 **/
1082static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1083{
1084 struct igb_q_vector *q_vector;
1085 struct e1000_hw *hw = &adapter->hw;
1086 int v_idx;
1087
1088 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1089 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1090 if (!q_vector)
1091 goto err_out;
1092 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001093 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1094 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001095 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1096 adapter->q_vector[v_idx] = q_vector;
1097 }
1098 return 0;
1099
1100err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001101 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001102 return -ENOMEM;
1103}
1104
1105static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1106 int ring_idx, int v_idx)
1107{
Alexander Duyck3025a442010-02-17 01:02:39 +00001108 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001109
Alexander Duyck3025a442010-02-17 01:02:39 +00001110 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001111 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001112 q_vector->itr_val = adapter->rx_itr_setting;
1113 if (q_vector->itr_val && q_vector->itr_val <= 3)
1114 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001115}
1116
1117static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1118 int ring_idx, int v_idx)
1119{
Alexander Duyck3025a442010-02-17 01:02:39 +00001120 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001121
Alexander Duyck3025a442010-02-17 01:02:39 +00001122 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001123 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001124 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck13fde972011-10-05 13:35:24 +00001125 q_vector->tx_work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001126 if (q_vector->itr_val && q_vector->itr_val <= 3)
1127 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001128}
1129
1130/**
1131 * igb_map_ring_to_vector - maps allocated queues to vectors
1132 *
1133 * This function maps the recently allocated queues to vectors.
1134 **/
1135static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1136{
1137 int i;
1138 int v_idx = 0;
1139
1140 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1141 (adapter->num_q_vectors < adapter->num_tx_queues))
1142 return -ENOMEM;
1143
1144 if (adapter->num_q_vectors >=
1145 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1146 for (i = 0; i < adapter->num_rx_queues; i++)
1147 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1148 for (i = 0; i < adapter->num_tx_queues; i++)
1149 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1150 } else {
1151 for (i = 0; i < adapter->num_rx_queues; i++) {
1152 if (i < adapter->num_tx_queues)
1153 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1154 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1155 }
1156 for (; i < adapter->num_tx_queues; i++)
1157 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1158 }
1159 return 0;
1160}
1161
1162/**
1163 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1164 *
1165 * This function initializes the interrupts and allocates all of the queues.
1166 **/
1167static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1168{
1169 struct pci_dev *pdev = adapter->pdev;
1170 int err;
1171
Ben Hutchings21adef32010-09-27 08:28:39 +00001172 err = igb_set_interrupt_capability(adapter);
1173 if (err)
1174 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001175
1176 err = igb_alloc_q_vectors(adapter);
1177 if (err) {
1178 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1179 goto err_alloc_q_vectors;
1180 }
1181
1182 err = igb_alloc_queues(adapter);
1183 if (err) {
1184 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1185 goto err_alloc_queues;
1186 }
1187
1188 err = igb_map_ring_to_vector(adapter);
1189 if (err) {
1190 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1191 goto err_map_queues;
1192 }
1193
1194
1195 return 0;
1196err_map_queues:
1197 igb_free_queues(adapter);
1198err_alloc_queues:
1199 igb_free_q_vectors(adapter);
1200err_alloc_q_vectors:
1201 igb_reset_interrupt_capability(adapter);
1202 return err;
1203}
1204
1205/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001206 * igb_request_irq - initialize interrupts
1207 *
1208 * Attempts to configure interrupts using the best available
1209 * capabilities of the hardware and kernel.
1210 **/
1211static int igb_request_irq(struct igb_adapter *adapter)
1212{
1213 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001214 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001215 int err = 0;
1216
1217 if (adapter->msix_entries) {
1218 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001219 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001220 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001221 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001222 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001223 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001224 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001225 igb_free_all_tx_resources(adapter);
1226 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001227 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001228 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001229 adapter->num_q_vectors = 1;
1230 err = igb_alloc_q_vectors(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev,
1233 "Unable to allocate memory for vectors\n");
1234 goto request_done;
1235 }
1236 err = igb_alloc_queues(adapter);
1237 if (err) {
1238 dev_err(&pdev->dev,
1239 "Unable to allocate memory for queues\n");
1240 igb_free_q_vectors(adapter);
1241 goto request_done;
1242 }
1243 igb_setup_all_tx_resources(adapter);
1244 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001245 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001246 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001247 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001248
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001249 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001250 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001251 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001252 if (!err)
1253 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001254
Auke Kok9d5c8242008-01-24 02:22:38 -08001255 /* fall back to legacy interrupts */
1256 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001257 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001258 }
1259
Joe Perchesa0607fd2009-11-18 23:29:17 -08001260 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001261 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001262
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001263 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001264 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1265 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001266
1267request_done:
1268 return err;
1269}
1270
1271static void igb_free_irq(struct igb_adapter *adapter)
1272{
Auke Kok9d5c8242008-01-24 02:22:38 -08001273 if (adapter->msix_entries) {
1274 int vector = 0, i;
1275
Alexander Duyck047e0032009-10-27 15:49:27 +00001276 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001277
Alexander Duyck047e0032009-10-27 15:49:27 +00001278 for (i = 0; i < adapter->num_q_vectors; i++) {
1279 struct igb_q_vector *q_vector = adapter->q_vector[i];
1280 free_irq(adapter->msix_entries[vector++].vector,
1281 q_vector);
1282 }
1283 } else {
1284 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001285 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001286}
1287
1288/**
1289 * igb_irq_disable - Mask off interrupt generation on the NIC
1290 * @adapter: board private structure
1291 **/
1292static void igb_irq_disable(struct igb_adapter *adapter)
1293{
1294 struct e1000_hw *hw = &adapter->hw;
1295
Alexander Duyck25568a52009-10-27 23:49:59 +00001296 /*
1297 * we need to be careful when disabling interrupts. The VFs are also
1298 * mapped into these registers and so clearing the bits can cause
1299 * issues on the VF drivers so we only need to clear what we set
1300 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001301 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001302 u32 regval = rd32(E1000_EIAM);
1303 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1304 wr32(E1000_EIMC, adapter->eims_enable_mask);
1305 regval = rd32(E1000_EIAC);
1306 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001307 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001308
1309 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001310 wr32(E1000_IMC, ~0);
1311 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001312 if (adapter->msix_entries) {
1313 int i;
1314 for (i = 0; i < adapter->num_q_vectors; i++)
1315 synchronize_irq(adapter->msix_entries[i].vector);
1316 } else {
1317 synchronize_irq(adapter->pdev->irq);
1318 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001319}
1320
1321/**
1322 * igb_irq_enable - Enable default interrupt generation settings
1323 * @adapter: board private structure
1324 **/
1325static void igb_irq_enable(struct igb_adapter *adapter)
1326{
1327 struct e1000_hw *hw = &adapter->hw;
1328
1329 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001330 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001331 u32 regval = rd32(E1000_EIAC);
1332 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1333 regval = rd32(E1000_EIAM);
1334 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001335 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001336 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001337 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001338 ims |= E1000_IMS_VMMB;
1339 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001340 if (adapter->hw.mac.type == e1000_82580)
1341 ims |= E1000_IMS_DRSTA;
1342
Alexander Duyck25568a52009-10-27 23:49:59 +00001343 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001344 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001345 wr32(E1000_IMS, IMS_ENABLE_MASK |
1346 E1000_IMS_DRSTA);
1347 wr32(E1000_IAM, IMS_ENABLE_MASK |
1348 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001349 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001350}
1351
1352static void igb_update_mng_vlan(struct igb_adapter *adapter)
1353{
Alexander Duyck51466232009-10-27 23:47:35 +00001354 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001355 u16 vid = adapter->hw.mng_cookie.vlan_id;
1356 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001357
Alexander Duyck51466232009-10-27 23:47:35 +00001358 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1359 /* add VID to filter table */
1360 igb_vfta_set(hw, vid, true);
1361 adapter->mng_vlan_id = vid;
1362 } else {
1363 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1364 }
1365
1366 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1367 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001368 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001369 /* remove VID from filter table */
1370 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001371 }
1372}
1373
1374/**
1375 * igb_release_hw_control - release control of the h/w to f/w
1376 * @adapter: address of board private structure
1377 *
1378 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1379 * For ASF and Pass Through versions of f/w this means that the
1380 * driver is no longer loaded.
1381 *
1382 **/
1383static void igb_release_hw_control(struct igb_adapter *adapter)
1384{
1385 struct e1000_hw *hw = &adapter->hw;
1386 u32 ctrl_ext;
1387
1388 /* Let firmware take over control of h/w */
1389 ctrl_ext = rd32(E1000_CTRL_EXT);
1390 wr32(E1000_CTRL_EXT,
1391 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1392}
1393
Auke Kok9d5c8242008-01-24 02:22:38 -08001394/**
1395 * igb_get_hw_control - get control of the h/w from f/w
1396 * @adapter: address of board private structure
1397 *
1398 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1399 * For ASF and Pass Through versions of f/w this means that
1400 * the driver is loaded.
1401 *
1402 **/
1403static void igb_get_hw_control(struct igb_adapter *adapter)
1404{
1405 struct e1000_hw *hw = &adapter->hw;
1406 u32 ctrl_ext;
1407
1408 /* Let firmware know the driver has taken over */
1409 ctrl_ext = rd32(E1000_CTRL_EXT);
1410 wr32(E1000_CTRL_EXT,
1411 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1412}
1413
Auke Kok9d5c8242008-01-24 02:22:38 -08001414/**
1415 * igb_configure - configure the hardware for RX and TX
1416 * @adapter: private board structure
1417 **/
1418static void igb_configure(struct igb_adapter *adapter)
1419{
1420 struct net_device *netdev = adapter->netdev;
1421 int i;
1422
1423 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001424 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001425
1426 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001427
Alexander Duyck85b430b2009-10-27 15:50:29 +00001428 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001429 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001430 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001431
1432 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001433 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001434
1435 igb_rx_fifo_flush_82575(&adapter->hw);
1436
Alexander Duyckc493ea42009-03-20 00:16:50 +00001437 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001438 * at least 1 descriptor unused to make sure
1439 * next_to_use != next_to_clean */
1440 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001441 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001442 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001443 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001444}
1445
Nick Nunley88a268c2010-02-17 01:01:59 +00001446/**
1447 * igb_power_up_link - Power up the phy/serdes link
1448 * @adapter: address of board private structure
1449 **/
1450void igb_power_up_link(struct igb_adapter *adapter)
1451{
1452 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1453 igb_power_up_phy_copper(&adapter->hw);
1454 else
1455 igb_power_up_serdes_link_82575(&adapter->hw);
1456}
1457
1458/**
1459 * igb_power_down_link - Power down the phy/serdes link
1460 * @adapter: address of board private structure
1461 */
1462static void igb_power_down_link(struct igb_adapter *adapter)
1463{
1464 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1465 igb_power_down_phy_copper_82575(&adapter->hw);
1466 else
1467 igb_shutdown_serdes_link_82575(&adapter->hw);
1468}
Auke Kok9d5c8242008-01-24 02:22:38 -08001469
1470/**
1471 * igb_up - Open the interface and prepare it to handle traffic
1472 * @adapter: board private structure
1473 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001474int igb_up(struct igb_adapter *adapter)
1475{
1476 struct e1000_hw *hw = &adapter->hw;
1477 int i;
1478
1479 /* hardware has been reset, we need to reload some things */
1480 igb_configure(adapter);
1481
1482 clear_bit(__IGB_DOWN, &adapter->state);
1483
Alexander Duyck047e0032009-10-27 15:49:27 +00001484 for (i = 0; i < adapter->num_q_vectors; i++) {
1485 struct igb_q_vector *q_vector = adapter->q_vector[i];
1486 napi_enable(&q_vector->napi);
1487 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001488 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001489 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001490 else
1491 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001492
1493 /* Clear any pending interrupts. */
1494 rd32(E1000_ICR);
1495 igb_irq_enable(adapter);
1496
Alexander Duyckd4960302009-10-27 15:53:45 +00001497 /* notify VFs that reset has been completed */
1498 if (adapter->vfs_allocated_count) {
1499 u32 reg_data = rd32(E1000_CTRL_EXT);
1500 reg_data |= E1000_CTRL_EXT_PFRSTD;
1501 wr32(E1000_CTRL_EXT, reg_data);
1502 }
1503
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001504 netif_tx_start_all_queues(adapter->netdev);
1505
Alexander Duyck25568a52009-10-27 23:49:59 +00001506 /* start the watchdog. */
1507 hw->mac.get_link_status = 1;
1508 schedule_work(&adapter->watchdog_task);
1509
Auke Kok9d5c8242008-01-24 02:22:38 -08001510 return 0;
1511}
1512
1513void igb_down(struct igb_adapter *adapter)
1514{
Auke Kok9d5c8242008-01-24 02:22:38 -08001515 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001516 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001517 u32 tctl, rctl;
1518 int i;
1519
1520 /* signal that we're down so the interrupt handler does not
1521 * reschedule our watchdog timer */
1522 set_bit(__IGB_DOWN, &adapter->state);
1523
1524 /* disable receives in the hardware */
1525 rctl = rd32(E1000_RCTL);
1526 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1527 /* flush and sleep below */
1528
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001529 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001530
1531 /* disable transmits in the hardware */
1532 tctl = rd32(E1000_TCTL);
1533 tctl &= ~E1000_TCTL_EN;
1534 wr32(E1000_TCTL, tctl);
1535 /* flush both disables and wait for them to finish */
1536 wrfl();
1537 msleep(10);
1538
Alexander Duyck047e0032009-10-27 15:49:27 +00001539 for (i = 0; i < adapter->num_q_vectors; i++) {
1540 struct igb_q_vector *q_vector = adapter->q_vector[i];
1541 napi_disable(&q_vector->napi);
1542 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001543
Auke Kok9d5c8242008-01-24 02:22:38 -08001544 igb_irq_disable(adapter);
1545
1546 del_timer_sync(&adapter->watchdog_timer);
1547 del_timer_sync(&adapter->phy_info_timer);
1548
Auke Kok9d5c8242008-01-24 02:22:38 -08001549 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001550
1551 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001552 spin_lock(&adapter->stats64_lock);
1553 igb_update_stats(adapter, &adapter->stats64);
1554 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001555
Auke Kok9d5c8242008-01-24 02:22:38 -08001556 adapter->link_speed = 0;
1557 adapter->link_duplex = 0;
1558
Jeff Kirsher30236822008-06-24 17:01:15 -07001559 if (!pci_channel_offline(adapter->pdev))
1560 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001561 igb_clean_all_tx_rings(adapter);
1562 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001563#ifdef CONFIG_IGB_DCA
1564
1565 /* since we reset the hardware DCA settings were cleared */
1566 igb_setup_dca(adapter);
1567#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001568}
1569
1570void igb_reinit_locked(struct igb_adapter *adapter)
1571{
1572 WARN_ON(in_interrupt());
1573 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1574 msleep(1);
1575 igb_down(adapter);
1576 igb_up(adapter);
1577 clear_bit(__IGB_RESETTING, &adapter->state);
1578}
1579
1580void igb_reset(struct igb_adapter *adapter)
1581{
Alexander Duyck090b1792009-10-27 23:51:55 +00001582 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001583 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001584 struct e1000_mac_info *mac = &hw->mac;
1585 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001586 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1587 u16 hwm;
1588
1589 /* Repartition Pba for greater than 9k mtu
1590 * To take effect CTRL.RST is required.
1591 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001592 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001593 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001594 case e1000_82580:
1595 pba = rd32(E1000_RXPBS);
1596 pba = igb_rxpbs_adjust_82580(pba);
1597 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001598 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001599 pba = rd32(E1000_RXPBS);
1600 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001601 break;
1602 case e1000_82575:
1603 default:
1604 pba = E1000_PBA_34K;
1605 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001606 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001607
Alexander Duyck2d064c02008-07-08 15:10:12 -07001608 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1609 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001610 /* adjust PBA for jumbo frames */
1611 wr32(E1000_PBA, pba);
1612
1613 /* To maintain wire speed transmits, the Tx FIFO should be
1614 * large enough to accommodate two full transmit packets,
1615 * rounded up to the next 1KB and expressed in KB. Likewise,
1616 * the Rx FIFO should be large enough to accommodate at least
1617 * one full receive packet and is similarly rounded up and
1618 * expressed in KB. */
1619 pba = rd32(E1000_PBA);
1620 /* upper 16 bits has Tx packet buffer allocation size in KB */
1621 tx_space = pba >> 16;
1622 /* lower 16 bits has Rx packet buffer allocation size in KB */
1623 pba &= 0xffff;
1624 /* the tx fifo also stores 16 bytes of information about the tx
1625 * but don't include ethernet FCS because hardware appends it */
1626 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001627 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001628 ETH_FCS_LEN) * 2;
1629 min_tx_space = ALIGN(min_tx_space, 1024);
1630 min_tx_space >>= 10;
1631 /* software strips receive CRC, so leave room for it */
1632 min_rx_space = adapter->max_frame_size;
1633 min_rx_space = ALIGN(min_rx_space, 1024);
1634 min_rx_space >>= 10;
1635
1636 /* If current Tx allocation is less than the min Tx FIFO size,
1637 * and the min Tx FIFO size is less than the current Rx FIFO
1638 * allocation, take space away from current Rx allocation */
1639 if (tx_space < min_tx_space &&
1640 ((min_tx_space - tx_space) < pba)) {
1641 pba = pba - (min_tx_space - tx_space);
1642
1643 /* if short on rx space, rx wins and must trump tx
1644 * adjustment */
1645 if (pba < min_rx_space)
1646 pba = min_rx_space;
1647 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001648 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001649 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001650
1651 /* flow control settings */
1652 /* The high water mark must be low enough to fit one full frame
1653 * (or the size used for early receive) above it in the Rx FIFO.
1654 * Set it to the lower of:
1655 * - 90% of the Rx FIFO size, or
1656 * - the full Rx FIFO size minus one full frame */
1657 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001658 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001659
Alexander Duyckd405ea32009-12-23 13:21:27 +00001660 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1661 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001662 fc->pause_time = 0xFFFF;
1663 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001664 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001665
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001666 /* disable receive for all VFs and wait one second */
1667 if (adapter->vfs_allocated_count) {
1668 int i;
1669 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001670 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001671
1672 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001673 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001674
1675 /* disable transmits and receives */
1676 wr32(E1000_VFRE, 0);
1677 wr32(E1000_VFTE, 0);
1678 }
1679
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001681 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001682 wr32(E1000_WUC, 0);
1683
Alexander Duyck330a6d62009-10-27 23:51:35 +00001684 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001685 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001686 if (hw->mac.type > e1000_82580) {
1687 if (adapter->flags & IGB_FLAG_DMAC) {
1688 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001689
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001690 /*
1691 * DMA Coalescing high water mark needs to be higher
1692 * than * the * Rx threshold. The Rx threshold is
1693 * currently * pba - 6, so we * should use a high water
1694 * mark of pba * - 4. */
1695 hwm = (pba - 4) << 10;
1696
1697 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1698 & E1000_DMACR_DMACTHR_MASK);
1699
1700 /* transition to L0x or L1 if available..*/
1701 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1702
1703 /* watchdog timer= +-1000 usec in 32usec intervals */
1704 reg |= (1000 >> 5);
1705 wr32(E1000_DMACR, reg);
1706
1707 /* no lower threshold to disable coalescing(smart fifb)
1708 * -UTRESH=0*/
1709 wr32(E1000_DMCRTRH, 0);
1710
1711 /* set hwm to PBA - 2 * max frame size */
1712 wr32(E1000_FCRTC, hwm);
1713
1714 /*
1715 * This sets the time to wait before requesting tran-
1716 * sition to * low power state to number of usecs needed
1717 * to receive 1 512 * byte frame at gigabit line rate
1718 */
1719 reg = rd32(E1000_DMCTLX);
1720 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1721
1722 /* Delay 255 usec before entering Lx state. */
1723 reg |= 0xFF;
1724 wr32(E1000_DMCTLX, reg);
1725
1726 /* free space in Tx packet buffer to wake from DMAC */
1727 wr32(E1000_DMCTXTH,
1728 (IGB_MIN_TXPBSIZE -
1729 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1730 >> 6);
1731
1732 /* make low power state decision controlled by DMAC */
1733 reg = rd32(E1000_PCIEMISC);
1734 reg |= E1000_PCIEMISC_LX_DECISION;
1735 wr32(E1000_PCIEMISC, reg);
1736 } /* end if IGB_FLAG_DMAC set */
1737 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001738 if (hw->mac.type == e1000_82580) {
1739 u32 reg = rd32(E1000_PCIEMISC);
1740 wr32(E1000_PCIEMISC,
1741 reg & ~E1000_PCIEMISC_LX_DECISION);
1742 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001743 if (!netif_running(adapter->netdev))
1744 igb_power_down_link(adapter);
1745
Auke Kok9d5c8242008-01-24 02:22:38 -08001746 igb_update_mng_vlan(adapter);
1747
1748 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1749 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1750
Alexander Duyck330a6d62009-10-27 23:51:35 +00001751 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001752}
1753
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001754static u32 igb_fix_features(struct net_device *netdev, u32 features)
1755{
1756 /*
1757 * Since there is no support for separate rx/tx vlan accel
1758 * enable/disable make sure tx flag is always in same state as rx.
1759 */
1760 if (features & NETIF_F_HW_VLAN_RX)
1761 features |= NETIF_F_HW_VLAN_TX;
1762 else
1763 features &= ~NETIF_F_HW_VLAN_TX;
1764
1765 return features;
1766}
1767
Michał Mirosławac52caa2011-06-08 08:38:01 +00001768static int igb_set_features(struct net_device *netdev, u32 features)
1769{
1770 struct igb_adapter *adapter = netdev_priv(netdev);
1771 int i;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001772 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001773
1774 for (i = 0; i < adapter->num_rx_queues; i++) {
1775 if (features & NETIF_F_RXCSUM)
1776 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
1777 else
1778 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
1779 }
1780
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001781 if (changed & NETIF_F_HW_VLAN_RX)
1782 igb_vlan_mode(netdev, features);
1783
Michał Mirosławac52caa2011-06-08 08:38:01 +00001784 return 0;
1785}
1786
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001787static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001788 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001789 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001790 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001791 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001792 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001793 .ndo_set_mac_address = igb_set_mac,
1794 .ndo_change_mtu = igb_change_mtu,
1795 .ndo_do_ioctl = igb_ioctl,
1796 .ndo_tx_timeout = igb_tx_timeout,
1797 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001798 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1799 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001800 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1801 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1802 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1803 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001804#ifdef CONFIG_NET_POLL_CONTROLLER
1805 .ndo_poll_controller = igb_netpoll,
1806#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001807 .ndo_fix_features = igb_fix_features,
1808 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001809};
1810
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001811/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001812 * igb_probe - Device Initialization Routine
1813 * @pdev: PCI device information struct
1814 * @ent: entry in igb_pci_tbl
1815 *
1816 * Returns 0 on success, negative on failure
1817 *
1818 * igb_probe initializes an adapter identified by a pci_dev structure.
1819 * The OS initialization, configuring of the adapter private structure,
1820 * and a hardware reset occur.
1821 **/
1822static int __devinit igb_probe(struct pci_dev *pdev,
1823 const struct pci_device_id *ent)
1824{
1825 struct net_device *netdev;
1826 struct igb_adapter *adapter;
1827 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001828 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001829 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001830 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001831 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1832 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001833 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001834 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001835 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001836
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001837 /* Catch broken hardware that put the wrong VF device ID in
1838 * the PCIe SR-IOV capability.
1839 */
1840 if (pdev->is_virtfn) {
1841 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1842 pci_name(pdev), pdev->vendor, pdev->device);
1843 return -EINVAL;
1844 }
1845
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001846 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001847 if (err)
1848 return err;
1849
1850 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001851 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001852 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001853 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001854 if (!err)
1855 pci_using_dac = 1;
1856 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001857 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001858 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001859 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001860 if (err) {
1861 dev_err(&pdev->dev, "No usable DMA "
1862 "configuration, aborting\n");
1863 goto err_dma;
1864 }
1865 }
1866 }
1867
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001868 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1869 IORESOURCE_MEM),
1870 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001871 if (err)
1872 goto err_pci_reg;
1873
Frans Pop19d5afd2009-10-02 10:04:12 -07001874 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001875
Auke Kok9d5c8242008-01-24 02:22:38 -08001876 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001877 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001878
1879 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001880 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001881 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001882 if (!netdev)
1883 goto err_alloc_etherdev;
1884
1885 SET_NETDEV_DEV(netdev, &pdev->dev);
1886
1887 pci_set_drvdata(pdev, netdev);
1888 adapter = netdev_priv(netdev);
1889 adapter->netdev = netdev;
1890 adapter->pdev = pdev;
1891 hw = &adapter->hw;
1892 hw->back = adapter;
1893 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1894
1895 mmio_start = pci_resource_start(pdev, 0);
1896 mmio_len = pci_resource_len(pdev, 0);
1897
1898 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001899 hw->hw_addr = ioremap(mmio_start, mmio_len);
1900 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001901 goto err_ioremap;
1902
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001903 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001904 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001905 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001906
1907 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1908
1909 netdev->mem_start = mmio_start;
1910 netdev->mem_end = mmio_start + mmio_len;
1911
Auke Kok9d5c8242008-01-24 02:22:38 -08001912 /* PCI config space info */
1913 hw->vendor_id = pdev->vendor;
1914 hw->device_id = pdev->device;
1915 hw->revision_id = pdev->revision;
1916 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1917 hw->subsystem_device_id = pdev->subsystem_device;
1918
Auke Kok9d5c8242008-01-24 02:22:38 -08001919 /* Copy the default MAC, PHY and NVM function pointers */
1920 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1921 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1922 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1923 /* Initialize skew-specific constants */
1924 err = ei->get_invariants(hw);
1925 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001926 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001927
Alexander Duyck450c87c2009-02-06 23:22:11 +00001928 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001929 err = igb_sw_init(adapter);
1930 if (err)
1931 goto err_sw_init;
1932
1933 igb_get_bus_info_pcie(hw);
1934
1935 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001936
1937 /* Copper options */
1938 if (hw->phy.media_type == e1000_media_type_copper) {
1939 hw->phy.mdix = AUTO_ALL_MODES;
1940 hw->phy.disable_polarity_correction = false;
1941 hw->phy.ms_type = e1000_ms_hw_default;
1942 }
1943
1944 if (igb_check_reset_block(hw))
1945 dev_info(&pdev->dev,
1946 "PHY reset is blocked due to SOL/IDER session.\n");
1947
Michał Mirosławac52caa2011-06-08 08:38:01 +00001948 netdev->hw_features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001949 NETIF_F_IP_CSUM |
Michał Mirosławac52caa2011-06-08 08:38:01 +00001950 NETIF_F_IPV6_CSUM |
1951 NETIF_F_TSO |
1952 NETIF_F_TSO6 |
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001953 NETIF_F_RXCSUM |
1954 NETIF_F_HW_VLAN_RX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001955
1956 netdev->features = netdev->hw_features |
Auke Kok9d5c8242008-01-24 02:22:38 -08001957 NETIF_F_HW_VLAN_TX |
Auke Kok9d5c8242008-01-24 02:22:38 -08001958 NETIF_F_HW_VLAN_FILTER;
1959
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001960 netdev->vlan_features |= NETIF_F_TSO;
1961 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001962 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001963 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001964 netdev->vlan_features |= NETIF_F_SG;
1965
Yi Zou7b872a52010-09-22 17:57:58 +00001966 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001967 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001968 netdev->vlan_features |= NETIF_F_HIGHDMA;
1969 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001970
Michał Mirosławac52caa2011-06-08 08:38:01 +00001971 if (hw->mac.type >= e1000_82576) {
1972 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001973 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001974 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001975
Jiri Pirko01789342011-08-16 06:29:00 +00001976 netdev->priv_flags |= IFF_UNICAST_FLT;
1977
Alexander Duyck330a6d62009-10-27 23:51:35 +00001978 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001979
1980 /* before reading the NVM, reset the controller to put the device in a
1981 * known good starting state */
1982 hw->mac.ops.reset_hw(hw);
1983
1984 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001985 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001986 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1987 err = -EIO;
1988 goto err_eeprom;
1989 }
1990
1991 /* copy the MAC address out of the NVM */
1992 if (hw->mac.ops.read_mac_addr(hw))
1993 dev_err(&pdev->dev, "NVM Read Error\n");
1994
1995 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1996 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1997
1998 if (!is_valid_ether_addr(netdev->perm_addr)) {
1999 dev_err(&pdev->dev, "Invalid MAC Address\n");
2000 err = -EIO;
2001 goto err_eeprom;
2002 }
2003
Joe Perchesc061b182010-08-23 18:20:03 +00002004 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002005 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002006 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002007 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002008
2009 INIT_WORK(&adapter->reset_task, igb_reset_task);
2010 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2011
Alexander Duyck450c87c2009-02-06 23:22:11 +00002012 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002013 adapter->fc_autoneg = true;
2014 hw->mac.autoneg = true;
2015 hw->phy.autoneg_advertised = 0x2f;
2016
Alexander Duyck0cce1192009-07-23 18:10:24 +00002017 hw->fc.requested_mode = e1000_fc_default;
2018 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002019
Auke Kok9d5c8242008-01-24 02:22:38 -08002020 igb_validate_mdi_setting(hw);
2021
Auke Kok9d5c8242008-01-24 02:22:38 -08002022 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2023 * enable the ACPI Magic Packet filter
2024 */
2025
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002026 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002027 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002028 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002029 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2030 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2031 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002032 else if (hw->bus.func == 1)
2033 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002034
2035 if (eeprom_data & eeprom_apme_mask)
2036 adapter->eeprom_wol |= E1000_WUFC_MAG;
2037
2038 /* now that we have the eeprom settings, apply the special cases where
2039 * the eeprom may be wrong or the board simply won't support wake on
2040 * lan on a particular port */
2041 switch (pdev->device) {
2042 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2043 adapter->eeprom_wol = 0;
2044 break;
2045 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002046 case E1000_DEV_ID_82576_FIBER:
2047 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002048 /* Wake events only supported on port A for dual fiber
2049 * regardless of eeprom setting */
2050 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2051 adapter->eeprom_wol = 0;
2052 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002053 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002054 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002055 /* if quad port adapter, disable WoL on all but port A */
2056 if (global_quad_port_a != 0)
2057 adapter->eeprom_wol = 0;
2058 else
2059 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2060 /* Reset for multiple quad port adapters */
2061 if (++global_quad_port_a == 4)
2062 global_quad_port_a = 0;
2063 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002064 }
2065
2066 /* initialize the wol settings based on the eeprom settings */
2067 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002068 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002069
2070 /* reset the hardware with the new settings */
2071 igb_reset(adapter);
2072
2073 /* let the f/w know that the h/w is now under the control of the
2074 * driver. */
2075 igb_get_hw_control(adapter);
2076
Auke Kok9d5c8242008-01-24 02:22:38 -08002077 strcpy(netdev->name, "eth%d");
2078 err = register_netdev(netdev);
2079 if (err)
2080 goto err_register;
2081
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002082 igb_vlan_mode(netdev, netdev->features);
2083
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002084 /* carrier off reporting is important to ethtool even BEFORE open */
2085 netif_carrier_off(netdev);
2086
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002087#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002088 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002089 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002090 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002091 igb_setup_dca(adapter);
2092 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002093
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002094#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002095 /* do hw tstamp init after resetting */
2096 igb_init_hw_timer(adapter);
2097
Auke Kok9d5c8242008-01-24 02:22:38 -08002098 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2099 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002100 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002101 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002102 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002103 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002104 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002105 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2106 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2107 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2108 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002109 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002110
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002111 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2112 if (ret_val)
2113 strcpy(part_str, "Unknown");
2114 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002115 dev_info(&pdev->dev,
2116 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2117 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002118 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002119 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002120 switch (hw->mac.type) {
2121 case e1000_i350:
2122 igb_set_eee_i350(hw);
2123 break;
2124 default:
2125 break;
2126 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002127 return 0;
2128
2129err_register:
2130 igb_release_hw_control(adapter);
2131err_eeprom:
2132 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002133 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002134
2135 if (hw->flash_address)
2136 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002137err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002138 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002139 iounmap(hw->hw_addr);
2140err_ioremap:
2141 free_netdev(netdev);
2142err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002143 pci_release_selected_regions(pdev,
2144 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002145err_pci_reg:
2146err_dma:
2147 pci_disable_device(pdev);
2148 return err;
2149}
2150
2151/**
2152 * igb_remove - Device Removal Routine
2153 * @pdev: PCI device information struct
2154 *
2155 * igb_remove is called by the PCI subsystem to alert the driver
2156 * that it should release a PCI device. The could be caused by a
2157 * Hot-Plug event, or because the driver is going to be removed from
2158 * memory.
2159 **/
2160static void __devexit igb_remove(struct pci_dev *pdev)
2161{
2162 struct net_device *netdev = pci_get_drvdata(pdev);
2163 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002164 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002165
Tejun Heo760141a2010-12-12 16:45:14 +01002166 /*
2167 * The watchdog timer may be rescheduled, so explicitly
2168 * disable watchdog from being rescheduled.
2169 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002170 set_bit(__IGB_DOWN, &adapter->state);
2171 del_timer_sync(&adapter->watchdog_timer);
2172 del_timer_sync(&adapter->phy_info_timer);
2173
Tejun Heo760141a2010-12-12 16:45:14 +01002174 cancel_work_sync(&adapter->reset_task);
2175 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002176
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002177#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002178 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002179 dev_info(&pdev->dev, "DCA disabled\n");
2180 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002181 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002182 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002183 }
2184#endif
2185
Auke Kok9d5c8242008-01-24 02:22:38 -08002186 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2187 * would have already happened in close and is redundant. */
2188 igb_release_hw_control(adapter);
2189
2190 unregister_netdev(netdev);
2191
Alexander Duyck047e0032009-10-27 15:49:27 +00002192 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002193
Alexander Duyck37680112009-02-19 20:40:30 -08002194#ifdef CONFIG_PCI_IOV
2195 /* reclaim resources allocated to VFs */
2196 if (adapter->vf_data) {
2197 /* disable iov and allow time for transactions to clear */
2198 pci_disable_sriov(pdev);
2199 msleep(500);
2200
2201 kfree(adapter->vf_data);
2202 adapter->vf_data = NULL;
2203 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002204 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002205 msleep(100);
2206 dev_info(&pdev->dev, "IOV Disabled\n");
2207 }
2208#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002209
Alexander Duyck28b07592009-02-06 23:20:31 +00002210 iounmap(hw->hw_addr);
2211 if (hw->flash_address)
2212 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002213 pci_release_selected_regions(pdev,
2214 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002215
2216 free_netdev(netdev);
2217
Frans Pop19d5afd2009-10-02 10:04:12 -07002218 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002219
Auke Kok9d5c8242008-01-24 02:22:38 -08002220 pci_disable_device(pdev);
2221}
2222
2223/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002224 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2225 * @adapter: board private structure to initialize
2226 *
2227 * This function initializes the vf specific data storage and then attempts to
2228 * allocate the VFs. The reason for ordering it this way is because it is much
2229 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2230 * the memory for the VFs.
2231 **/
2232static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2233{
2234#ifdef CONFIG_PCI_IOV
2235 struct pci_dev *pdev = adapter->pdev;
2236
Alexander Duycka6b623e2009-10-27 23:47:53 +00002237 if (adapter->vfs_allocated_count) {
2238 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2239 sizeof(struct vf_data_storage),
2240 GFP_KERNEL);
2241 /* if allocation failed then we do not support SR-IOV */
2242 if (!adapter->vf_data) {
2243 adapter->vfs_allocated_count = 0;
2244 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2245 "Data Storage\n");
2246 }
2247 }
2248
2249 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2250 kfree(adapter->vf_data);
2251 adapter->vf_data = NULL;
2252#endif /* CONFIG_PCI_IOV */
2253 adapter->vfs_allocated_count = 0;
2254#ifdef CONFIG_PCI_IOV
2255 } else {
2256 unsigned char mac_addr[ETH_ALEN];
2257 int i;
2258 dev_info(&pdev->dev, "%d vfs allocated\n",
2259 adapter->vfs_allocated_count);
2260 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2261 random_ether_addr(mac_addr);
2262 igb_set_vf_mac(adapter, i, mac_addr);
2263 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002264 /* DMA Coalescing is not supported in IOV mode. */
2265 if (adapter->flags & IGB_FLAG_DMAC)
2266 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002267 }
2268#endif /* CONFIG_PCI_IOV */
2269}
2270
Alexander Duyck115f4592009-11-12 18:37:00 +00002271
2272/**
2273 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2274 * @adapter: board private structure to initialize
2275 *
2276 * igb_init_hw_timer initializes the function pointer and values for the hw
2277 * timer found in hardware.
2278 **/
2279static void igb_init_hw_timer(struct igb_adapter *adapter)
2280{
2281 struct e1000_hw *hw = &adapter->hw;
2282
2283 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002284 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002285 case e1000_82580:
2286 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2287 adapter->cycles.read = igb_read_clock;
2288 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2289 adapter->cycles.mult = 1;
2290 /*
2291 * The 82580 timesync updates the system timer every 8ns by 8ns
2292 * and the value cannot be shifted. Instead we need to shift
2293 * the registers to generate a 64bit timer value. As a result
2294 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2295 * 24 in order to generate a larger value for synchronization.
2296 */
2297 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2298 /* disable system timer temporarily by setting bit 31 */
2299 wr32(E1000_TSAUXC, 0x80000000);
2300 wrfl();
2301
2302 /* Set registers so that rollover occurs soon to test this. */
2303 wr32(E1000_SYSTIMR, 0x00000000);
2304 wr32(E1000_SYSTIML, 0x80000000);
2305 wr32(E1000_SYSTIMH, 0x000000FF);
2306 wrfl();
2307
2308 /* enable system timer by clearing bit 31 */
2309 wr32(E1000_TSAUXC, 0x0);
2310 wrfl();
2311
2312 timecounter_init(&adapter->clock,
2313 &adapter->cycles,
2314 ktime_to_ns(ktime_get_real()));
2315 /*
2316 * Synchronize our NIC clock against system wall clock. NIC
2317 * time stamp reading requires ~3us per sample, each sample
2318 * was pretty stable even under load => only require 10
2319 * samples for each offset comparison.
2320 */
2321 memset(&adapter->compare, 0, sizeof(adapter->compare));
2322 adapter->compare.source = &adapter->clock;
2323 adapter->compare.target = ktime_get_real;
2324 adapter->compare.num_samples = 10;
2325 timecompare_update(&adapter->compare, 0);
2326 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002327 case e1000_82576:
2328 /*
2329 * Initialize hardware timer: we keep it running just in case
2330 * that some program needs it later on.
2331 */
2332 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2333 adapter->cycles.read = igb_read_clock;
2334 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2335 adapter->cycles.mult = 1;
2336 /**
2337 * Scale the NIC clock cycle by a large factor so that
2338 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002339 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002340 * factor are a) that the clock register overflows more quickly
2341 * (not such a big deal) and b) that the increment per tick has
2342 * to fit into 24 bits. As a result we need to use a shift of
2343 * 19 so we can fit a value of 16 into the TIMINCA register.
2344 */
2345 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2346 wr32(E1000_TIMINCA,
2347 (1 << E1000_TIMINCA_16NS_SHIFT) |
2348 (16 << IGB_82576_TSYNC_SHIFT));
2349
2350 /* Set registers so that rollover occurs soon to test this. */
2351 wr32(E1000_SYSTIML, 0x00000000);
2352 wr32(E1000_SYSTIMH, 0xFF800000);
2353 wrfl();
2354
2355 timecounter_init(&adapter->clock,
2356 &adapter->cycles,
2357 ktime_to_ns(ktime_get_real()));
2358 /*
2359 * Synchronize our NIC clock against system wall clock. NIC
2360 * time stamp reading requires ~3us per sample, each sample
2361 * was pretty stable even under load => only require 10
2362 * samples for each offset comparison.
2363 */
2364 memset(&adapter->compare, 0, sizeof(adapter->compare));
2365 adapter->compare.source = &adapter->clock;
2366 adapter->compare.target = ktime_get_real;
2367 adapter->compare.num_samples = 10;
2368 timecompare_update(&adapter->compare, 0);
2369 break;
2370 case e1000_82575:
2371 /* 82575 does not support timesync */
2372 default:
2373 break;
2374 }
2375
2376}
2377
Alexander Duycka6b623e2009-10-27 23:47:53 +00002378/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002379 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2380 * @adapter: board private structure to initialize
2381 *
2382 * igb_sw_init initializes the Adapter private data structure.
2383 * Fields are initialized based on PCI device information and
2384 * OS network device settings (MTU size).
2385 **/
2386static int __devinit igb_sw_init(struct igb_adapter *adapter)
2387{
2388 struct e1000_hw *hw = &adapter->hw;
2389 struct net_device *netdev = adapter->netdev;
2390 struct pci_dev *pdev = adapter->pdev;
2391
2392 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2393
Alexander Duyck13fde972011-10-05 13:35:24 +00002394 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002395 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2396 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002397
2398 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002399 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2400 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2401
Alexander Duyck13fde972011-10-05 13:35:24 +00002402 /* set default work limits */
2403 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2404
Alexander Duyck153285f2011-08-26 07:43:32 +00002405 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2406 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002407 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2408
Eric Dumazet12dcd862010-10-15 17:27:10 +00002409 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002410#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002411 switch (hw->mac.type) {
2412 case e1000_82576:
2413 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002414 if (max_vfs > 7) {
2415 dev_warn(&pdev->dev,
2416 "Maximum of 7 VFs per PF, using max\n");
2417 adapter->vfs_allocated_count = 7;
2418 } else
2419 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002420 break;
2421 default:
2422 break;
2423 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002424#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002425 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002426 /* i350 cannot do RSS and SR-IOV at the same time */
2427 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2428 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002429
2430 /*
2431 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2432 * then we should combine the queues into a queue pair in order to
2433 * conserve interrupts due to limited supply
2434 */
2435 if ((adapter->rss_queues > 4) ||
2436 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2437 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2438
Alexander Duycka6b623e2009-10-27 23:47:53 +00002439 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002440 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002441 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2442 return -ENOMEM;
2443 }
2444
Alexander Duycka6b623e2009-10-27 23:47:53 +00002445 igb_probe_vfs(adapter);
2446
Auke Kok9d5c8242008-01-24 02:22:38 -08002447 /* Explicitly disable IRQ since the NIC can be in any state. */
2448 igb_irq_disable(adapter);
2449
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002450 if (hw->mac.type == e1000_i350)
2451 adapter->flags &= ~IGB_FLAG_DMAC;
2452
Auke Kok9d5c8242008-01-24 02:22:38 -08002453 set_bit(__IGB_DOWN, &adapter->state);
2454 return 0;
2455}
2456
2457/**
2458 * igb_open - Called when a network interface is made active
2459 * @netdev: network interface device structure
2460 *
2461 * Returns 0 on success, negative value on failure
2462 *
2463 * The open entry point is called when a network interface is made
2464 * active by the system (IFF_UP). At this point all resources needed
2465 * for transmit and receive operations are allocated, the interrupt
2466 * handler is registered with the OS, the watchdog timer is started,
2467 * and the stack is notified that the interface is ready.
2468 **/
2469static int igb_open(struct net_device *netdev)
2470{
2471 struct igb_adapter *adapter = netdev_priv(netdev);
2472 struct e1000_hw *hw = &adapter->hw;
2473 int err;
2474 int i;
2475
2476 /* disallow open during test */
2477 if (test_bit(__IGB_TESTING, &adapter->state))
2478 return -EBUSY;
2479
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002480 netif_carrier_off(netdev);
2481
Auke Kok9d5c8242008-01-24 02:22:38 -08002482 /* allocate transmit descriptors */
2483 err = igb_setup_all_tx_resources(adapter);
2484 if (err)
2485 goto err_setup_tx;
2486
2487 /* allocate receive descriptors */
2488 err = igb_setup_all_rx_resources(adapter);
2489 if (err)
2490 goto err_setup_rx;
2491
Nick Nunley88a268c2010-02-17 01:01:59 +00002492 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002493
Auke Kok9d5c8242008-01-24 02:22:38 -08002494 /* before we allocate an interrupt, we must be ready to handle it.
2495 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2496 * as soon as we call pci_request_irq, so we have to setup our
2497 * clean_rx handler before we do so. */
2498 igb_configure(adapter);
2499
2500 err = igb_request_irq(adapter);
2501 if (err)
2502 goto err_req_irq;
2503
2504 /* From here on the code is the same as igb_up() */
2505 clear_bit(__IGB_DOWN, &adapter->state);
2506
Alexander Duyck047e0032009-10-27 15:49:27 +00002507 for (i = 0; i < adapter->num_q_vectors; i++) {
2508 struct igb_q_vector *q_vector = adapter->q_vector[i];
2509 napi_enable(&q_vector->napi);
2510 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002511
2512 /* Clear any pending interrupts. */
2513 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002514
2515 igb_irq_enable(adapter);
2516
Alexander Duyckd4960302009-10-27 15:53:45 +00002517 /* notify VFs that reset has been completed */
2518 if (adapter->vfs_allocated_count) {
2519 u32 reg_data = rd32(E1000_CTRL_EXT);
2520 reg_data |= E1000_CTRL_EXT_PFRSTD;
2521 wr32(E1000_CTRL_EXT, reg_data);
2522 }
2523
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002524 netif_tx_start_all_queues(netdev);
2525
Alexander Duyck25568a52009-10-27 23:49:59 +00002526 /* start the watchdog. */
2527 hw->mac.get_link_status = 1;
2528 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002529
2530 return 0;
2531
2532err_req_irq:
2533 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002534 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002535 igb_free_all_rx_resources(adapter);
2536err_setup_rx:
2537 igb_free_all_tx_resources(adapter);
2538err_setup_tx:
2539 igb_reset(adapter);
2540
2541 return err;
2542}
2543
2544/**
2545 * igb_close - Disables a network interface
2546 * @netdev: network interface device structure
2547 *
2548 * Returns 0, this is not allowed to fail
2549 *
2550 * The close entry point is called when an interface is de-activated
2551 * by the OS. The hardware is still under the driver's control, but
2552 * needs to be disabled. A global MAC reset is issued to stop the
2553 * hardware, and all transmit and receive resources are freed.
2554 **/
2555static int igb_close(struct net_device *netdev)
2556{
2557 struct igb_adapter *adapter = netdev_priv(netdev);
2558
2559 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2560 igb_down(adapter);
2561
2562 igb_free_irq(adapter);
2563
2564 igb_free_all_tx_resources(adapter);
2565 igb_free_all_rx_resources(adapter);
2566
Auke Kok9d5c8242008-01-24 02:22:38 -08002567 return 0;
2568}
2569
2570/**
2571 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002572 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2573 *
2574 * Return 0 on success, negative on failure
2575 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002576int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002577{
Alexander Duyck59d71982010-04-27 13:09:25 +00002578 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002579 int size;
2580
Alexander Duyck06034642011-08-26 07:44:22 +00002581 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2582 tx_ring->tx_buffer_info = vzalloc(size);
2583 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002584 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002585
2586 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002587 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002588 tx_ring->size = ALIGN(tx_ring->size, 4096);
2589
Alexander Duyck59d71982010-04-27 13:09:25 +00002590 tx_ring->desc = dma_alloc_coherent(dev,
2591 tx_ring->size,
2592 &tx_ring->dma,
2593 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002594
2595 if (!tx_ring->desc)
2596 goto err;
2597
Auke Kok9d5c8242008-01-24 02:22:38 -08002598 tx_ring->next_to_use = 0;
2599 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002600 return 0;
2601
2602err:
Alexander Duyck06034642011-08-26 07:44:22 +00002603 vfree(tx_ring->tx_buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002604 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002605 "Unable to allocate memory for the transmit descriptor ring\n");
2606 return -ENOMEM;
2607}
2608
2609/**
2610 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2611 * (Descriptors) for all queues
2612 * @adapter: board private structure
2613 *
2614 * Return 0 on success, negative on failure
2615 **/
2616static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2617{
Alexander Duyck439705e2009-10-27 23:49:20 +00002618 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002619 int i, err = 0;
2620
2621 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002622 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002623 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002624 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002625 "Allocation for Tx Queue %u failed\n", i);
2626 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002627 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002628 break;
2629 }
2630 }
2631
2632 return err;
2633}
2634
2635/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002636 * igb_setup_tctl - configure the transmit control registers
2637 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002638 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002639void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002640{
Auke Kok9d5c8242008-01-24 02:22:38 -08002641 struct e1000_hw *hw = &adapter->hw;
2642 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002643
Alexander Duyck85b430b2009-10-27 15:50:29 +00002644 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2645 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002646
2647 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002648 tctl = rd32(E1000_TCTL);
2649 tctl &= ~E1000_TCTL_CT;
2650 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2651 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2652
2653 igb_config_collision_dist(hw);
2654
Auke Kok9d5c8242008-01-24 02:22:38 -08002655 /* Enable transmits */
2656 tctl |= E1000_TCTL_EN;
2657
2658 wr32(E1000_TCTL, tctl);
2659}
2660
2661/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002662 * igb_configure_tx_ring - Configure transmit ring after Reset
2663 * @adapter: board private structure
2664 * @ring: tx ring to configure
2665 *
2666 * Configure a transmit ring after a reset.
2667 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002668void igb_configure_tx_ring(struct igb_adapter *adapter,
2669 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002670{
2671 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002672 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002673 u64 tdba = ring->dma;
2674 int reg_idx = ring->reg_idx;
2675
2676 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002677 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002678 wrfl();
2679 mdelay(10);
2680
2681 wr32(E1000_TDLEN(reg_idx),
2682 ring->count * sizeof(union e1000_adv_tx_desc));
2683 wr32(E1000_TDBAL(reg_idx),
2684 tdba & 0x00000000ffffffffULL);
2685 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2686
Alexander Duyckfce99e32009-10-27 15:51:27 +00002687 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002688 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002689 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002690
2691 txdctl |= IGB_TX_PTHRESH;
2692 txdctl |= IGB_TX_HTHRESH << 8;
2693 txdctl |= IGB_TX_WTHRESH << 16;
2694
2695 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2696 wr32(E1000_TXDCTL(reg_idx), txdctl);
2697}
2698
2699/**
2700 * igb_configure_tx - Configure transmit Unit after Reset
2701 * @adapter: board private structure
2702 *
2703 * Configure the Tx unit of the MAC after a reset.
2704 **/
2705static void igb_configure_tx(struct igb_adapter *adapter)
2706{
2707 int i;
2708
2709 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002710 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002711}
2712
2713/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002714 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002715 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2716 *
2717 * Returns 0 on success, negative on failure
2718 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002719int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002720{
Alexander Duyck59d71982010-04-27 13:09:25 +00002721 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002722 int size, desc_len;
2723
Alexander Duyck06034642011-08-26 07:44:22 +00002724 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2725 rx_ring->rx_buffer_info = vzalloc(size);
2726 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08002727 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002728
2729 desc_len = sizeof(union e1000_adv_rx_desc);
2730
2731 /* Round up to nearest 4K */
2732 rx_ring->size = rx_ring->count * desc_len;
2733 rx_ring->size = ALIGN(rx_ring->size, 4096);
2734
Alexander Duyck59d71982010-04-27 13:09:25 +00002735 rx_ring->desc = dma_alloc_coherent(dev,
2736 rx_ring->size,
2737 &rx_ring->dma,
2738 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002739
2740 if (!rx_ring->desc)
2741 goto err;
2742
2743 rx_ring->next_to_clean = 0;
2744 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002745
Auke Kok9d5c8242008-01-24 02:22:38 -08002746 return 0;
2747
2748err:
Alexander Duyck06034642011-08-26 07:44:22 +00002749 vfree(rx_ring->rx_buffer_info);
2750 rx_ring->rx_buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002751 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2752 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002753 return -ENOMEM;
2754}
2755
2756/**
2757 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2758 * (Descriptors) for all queues
2759 * @adapter: board private structure
2760 *
2761 * Return 0 on success, negative on failure
2762 **/
2763static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2764{
Alexander Duyck439705e2009-10-27 23:49:20 +00002765 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002766 int i, err = 0;
2767
2768 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002769 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002770 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002771 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002772 "Allocation for Rx Queue %u failed\n", i);
2773 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002774 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002775 break;
2776 }
2777 }
2778
2779 return err;
2780}
2781
2782/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002783 * igb_setup_mrqc - configure the multiple receive queue control registers
2784 * @adapter: Board private structure
2785 **/
2786static void igb_setup_mrqc(struct igb_adapter *adapter)
2787{
2788 struct e1000_hw *hw = &adapter->hw;
2789 u32 mrqc, rxcsum;
2790 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2791 union e1000_reta {
2792 u32 dword;
2793 u8 bytes[4];
2794 } reta;
2795 static const u8 rsshash[40] = {
2796 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2797 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2798 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2799 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2800
2801 /* Fill out hash function seeds */
2802 for (j = 0; j < 10; j++) {
2803 u32 rsskey = rsshash[(j * 4)];
2804 rsskey |= rsshash[(j * 4) + 1] << 8;
2805 rsskey |= rsshash[(j * 4) + 2] << 16;
2806 rsskey |= rsshash[(j * 4) + 3] << 24;
2807 array_wr32(E1000_RSSRK(0), j, rsskey);
2808 }
2809
Alexander Duycka99955f2009-11-12 18:37:19 +00002810 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002811
2812 if (adapter->vfs_allocated_count) {
2813 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2814 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002815 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002816 case e1000_82580:
2817 num_rx_queues = 1;
2818 shift = 0;
2819 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002820 case e1000_82576:
2821 shift = 3;
2822 num_rx_queues = 2;
2823 break;
2824 case e1000_82575:
2825 shift = 2;
2826 shift2 = 6;
2827 default:
2828 break;
2829 }
2830 } else {
2831 if (hw->mac.type == e1000_82575)
2832 shift = 6;
2833 }
2834
2835 for (j = 0; j < (32 * 4); j++) {
2836 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2837 if (shift2)
2838 reta.bytes[j & 3] |= num_rx_queues << shift2;
2839 if ((j & 3) == 3)
2840 wr32(E1000_RETA(j >> 2), reta.dword);
2841 }
2842
2843 /*
2844 * Disable raw packet checksumming so that RSS hash is placed in
2845 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2846 * offloads as they are enabled by default
2847 */
2848 rxcsum = rd32(E1000_RXCSUM);
2849 rxcsum |= E1000_RXCSUM_PCSD;
2850
2851 if (adapter->hw.mac.type >= e1000_82576)
2852 /* Enable Receive Checksum Offload for SCTP */
2853 rxcsum |= E1000_RXCSUM_CRCOFL;
2854
2855 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2856 wr32(E1000_RXCSUM, rxcsum);
2857
2858 /* If VMDq is enabled then we set the appropriate mode for that, else
2859 * we default to RSS so that an RSS hash is calculated per packet even
2860 * if we are only using one queue */
2861 if (adapter->vfs_allocated_count) {
2862 if (hw->mac.type > e1000_82575) {
2863 /* Set the default pool for the PF's first queue */
2864 u32 vtctl = rd32(E1000_VT_CTL);
2865 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2866 E1000_VT_CTL_DISABLE_DEF_POOL);
2867 vtctl |= adapter->vfs_allocated_count <<
2868 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2869 wr32(E1000_VT_CTL, vtctl);
2870 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002871 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002872 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2873 else
2874 mrqc = E1000_MRQC_ENABLE_VMDQ;
2875 } else {
2876 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2877 }
2878 igb_vmm_control(adapter);
2879
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002880 /*
2881 * Generate RSS hash based on TCP port numbers and/or
2882 * IPv4/v6 src and dst addresses since UDP cannot be
2883 * hashed reliably due to IP fragmentation
2884 */
2885 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2886 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2887 E1000_MRQC_RSS_FIELD_IPV6 |
2888 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2889 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002890
2891 wr32(E1000_MRQC, mrqc);
2892}
2893
2894/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002895 * igb_setup_rctl - configure the receive control registers
2896 * @adapter: Board private structure
2897 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002898void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002899{
2900 struct e1000_hw *hw = &adapter->hw;
2901 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002902
2903 rctl = rd32(E1000_RCTL);
2904
2905 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002906 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002907
Alexander Duyck69d728b2008-11-25 01:04:03 -08002908 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002909 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002910
Auke Kok87cb7e82008-07-08 15:08:29 -07002911 /*
2912 * enable stripping of CRC. It's unlikely this will break BMC
2913 * redirection as it did with e1000. Newer features require
2914 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002915 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002916 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002917
Alexander Duyck559e9c42009-10-27 23:52:50 +00002918 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002919 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002920
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002921 /* enable LPE to prevent packets larger than max_frame_size */
2922 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002923
Alexander Duyck952f72a2009-10-27 15:51:07 +00002924 /* disable queue 0 to prevent tail write w/o re-config */
2925 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002926
Alexander Duycke1739522009-02-19 20:39:44 -08002927 /* Attention!!! For SR-IOV PF driver operations you must enable
2928 * queue drop for all VF and PF queues to prevent head of line blocking
2929 * if an un-trusted VF does not provide descriptors to hardware.
2930 */
2931 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002932 /* set all queue drop enable bits */
2933 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002934 }
2935
Auke Kok9d5c8242008-01-24 02:22:38 -08002936 wr32(E1000_RCTL, rctl);
2937}
2938
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002939static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2940 int vfn)
2941{
2942 struct e1000_hw *hw = &adapter->hw;
2943 u32 vmolr;
2944
2945 /* if it isn't the PF check to see if VFs are enabled and
2946 * increase the size to support vlan tags */
2947 if (vfn < adapter->vfs_allocated_count &&
2948 adapter->vf_data[vfn].vlans_enabled)
2949 size += VLAN_TAG_SIZE;
2950
2951 vmolr = rd32(E1000_VMOLR(vfn));
2952 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2953 vmolr |= size | E1000_VMOLR_LPE;
2954 wr32(E1000_VMOLR(vfn), vmolr);
2955
2956 return 0;
2957}
2958
Auke Kok9d5c8242008-01-24 02:22:38 -08002959/**
Alexander Duycke1739522009-02-19 20:39:44 -08002960 * igb_rlpml_set - set maximum receive packet size
2961 * @adapter: board private structure
2962 *
2963 * Configure maximum receivable packet size.
2964 **/
2965static void igb_rlpml_set(struct igb_adapter *adapter)
2966{
Alexander Duyck153285f2011-08-26 07:43:32 +00002967 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002968 struct e1000_hw *hw = &adapter->hw;
2969 u16 pf_id = adapter->vfs_allocated_count;
2970
Alexander Duycke1739522009-02-19 20:39:44 -08002971 if (pf_id) {
2972 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00002973 /*
2974 * If we're in VMDQ or SR-IOV mode, then set global RLPML
2975 * to our max jumbo frame size, in case we need to enable
2976 * jumbo frames on one of the rings later.
2977 * This will not pass over-length frames into the default
2978 * queue because it's gated by the VMOLR.RLPML.
2979 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002980 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002981 }
2982
2983 wr32(E1000_RLPML, max_frame_size);
2984}
2985
Williams, Mitch A8151d292010-02-10 01:44:24 +00002986static inline void igb_set_vmolr(struct igb_adapter *adapter,
2987 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002988{
2989 struct e1000_hw *hw = &adapter->hw;
2990 u32 vmolr;
2991
2992 /*
2993 * This register exists only on 82576 and newer so if we are older then
2994 * we should exit and do nothing
2995 */
2996 if (hw->mac.type < e1000_82576)
2997 return;
2998
2999 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00003000 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3001 if (aupe)
3002 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3003 else
3004 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003005
3006 /* clear all bits that might not be set */
3007 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3008
Alexander Duycka99955f2009-11-12 18:37:19 +00003009 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003010 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3011 /*
3012 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3013 * multicast packets
3014 */
3015 if (vfn <= adapter->vfs_allocated_count)
3016 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3017
3018 wr32(E1000_VMOLR(vfn), vmolr);
3019}
3020
Alexander Duycke1739522009-02-19 20:39:44 -08003021/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003022 * igb_configure_rx_ring - Configure a receive ring after Reset
3023 * @adapter: board private structure
3024 * @ring: receive ring to be configured
3025 *
3026 * Configure the Rx unit of the MAC after a reset.
3027 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003028void igb_configure_rx_ring(struct igb_adapter *adapter,
3029 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003030{
3031 struct e1000_hw *hw = &adapter->hw;
3032 u64 rdba = ring->dma;
3033 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003034 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003035
3036 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003037 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003038
3039 /* Set DMA base address registers */
3040 wr32(E1000_RDBAL(reg_idx),
3041 rdba & 0x00000000ffffffffULL);
3042 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3043 wr32(E1000_RDLEN(reg_idx),
3044 ring->count * sizeof(union e1000_adv_rx_desc));
3045
3046 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003047 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003048 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003049 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003050
Alexander Duyck952f72a2009-10-27 15:51:07 +00003051 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003052 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003053#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003054 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003055#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003056 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003057#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003058 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Nick Nunley757b77e2010-03-26 11:36:47 +00003059 if (hw->mac.type == e1000_82580)
3060 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003061 /* Only set Drop Enable if we are supporting multiple queues */
3062 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3063 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003064
3065 wr32(E1000_SRRCTL(reg_idx), srrctl);
3066
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003067 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003068 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003069
Alexander Duyck85b430b2009-10-27 15:50:29 +00003070 rxdctl |= IGB_RX_PTHRESH;
3071 rxdctl |= IGB_RX_HTHRESH << 8;
3072 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003073
3074 /* enable receive descriptor fetching */
3075 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003076 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3077}
3078
3079/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003080 * igb_configure_rx - Configure receive Unit after Reset
3081 * @adapter: board private structure
3082 *
3083 * Configure the Rx unit of the MAC after a reset.
3084 **/
3085static void igb_configure_rx(struct igb_adapter *adapter)
3086{
Hannes Eder91075842009-02-18 19:36:04 -08003087 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003088
Alexander Duyck68d480c2009-10-05 06:33:08 +00003089 /* set UTA to appropriate mode */
3090 igb_set_uta(adapter);
3091
Alexander Duyck26ad9172009-10-05 06:32:49 +00003092 /* set the correct pool for the PF default MAC address in entry 0 */
3093 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3094 adapter->vfs_allocated_count);
3095
Alexander Duyck06cf2662009-10-27 15:53:25 +00003096 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3097 * the Base and Length of the Rx Descriptor Ring */
3098 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003099 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003100}
3101
3102/**
3103 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003104 * @tx_ring: Tx descriptor ring for a specific queue
3105 *
3106 * Free all transmit software resources
3107 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003108void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003109{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003110 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003111
Alexander Duyck06034642011-08-26 07:44:22 +00003112 vfree(tx_ring->tx_buffer_info);
3113 tx_ring->tx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003114
Alexander Duyck439705e2009-10-27 23:49:20 +00003115 /* if not set, then don't free */
3116 if (!tx_ring->desc)
3117 return;
3118
Alexander Duyck59d71982010-04-27 13:09:25 +00003119 dma_free_coherent(tx_ring->dev, tx_ring->size,
3120 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003121
3122 tx_ring->desc = NULL;
3123}
3124
3125/**
3126 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3127 * @adapter: board private structure
3128 *
3129 * Free all transmit software resources
3130 **/
3131static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3132{
3133 int i;
3134
3135 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003136 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003137}
3138
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003139void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00003140 struct igb_tx_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003141{
Alexander Duyck6366ad32009-12-02 16:47:18 +00003142 if (buffer_info->dma) {
3143 if (buffer_info->mapped_as_page)
Alexander Duyck59d71982010-04-27 13:09:25 +00003144 dma_unmap_page(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003145 buffer_info->dma,
3146 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003147 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003148 else
Alexander Duyck59d71982010-04-27 13:09:25 +00003149 dma_unmap_single(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003150 buffer_info->dma,
3151 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003152 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003153 buffer_info->dma = 0;
3154 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003155 if (buffer_info->skb) {
3156 dev_kfree_skb_any(buffer_info->skb);
3157 buffer_info->skb = NULL;
3158 }
3159 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003160 buffer_info->length = 0;
3161 buffer_info->next_to_watch = 0;
3162 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08003163}
3164
3165/**
3166 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003167 * @tx_ring: ring to be cleaned
3168 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003169static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003170{
Alexander Duyck06034642011-08-26 07:44:22 +00003171 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003172 unsigned long size;
3173 unsigned int i;
3174
Alexander Duyck06034642011-08-26 07:44:22 +00003175 if (!tx_ring->tx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003176 return;
3177 /* Free all the Tx ring sk_buffs */
3178
3179 for (i = 0; i < tx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003180 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003181 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003182 }
3183
Alexander Duyck06034642011-08-26 07:44:22 +00003184 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3185 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003186
3187 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003188 memset(tx_ring->desc, 0, tx_ring->size);
3189
3190 tx_ring->next_to_use = 0;
3191 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003192}
3193
3194/**
3195 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3196 * @adapter: board private structure
3197 **/
3198static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3199{
3200 int i;
3201
3202 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003203 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003204}
3205
3206/**
3207 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003208 * @rx_ring: ring to clean the resources from
3209 *
3210 * Free all receive software resources
3211 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003212void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003213{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003214 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003215
Alexander Duyck06034642011-08-26 07:44:22 +00003216 vfree(rx_ring->rx_buffer_info);
3217 rx_ring->rx_buffer_info = NULL;
Auke Kok9d5c8242008-01-24 02:22:38 -08003218
Alexander Duyck439705e2009-10-27 23:49:20 +00003219 /* if not set, then don't free */
3220 if (!rx_ring->desc)
3221 return;
3222
Alexander Duyck59d71982010-04-27 13:09:25 +00003223 dma_free_coherent(rx_ring->dev, rx_ring->size,
3224 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003225
3226 rx_ring->desc = NULL;
3227}
3228
3229/**
3230 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3231 * @adapter: board private structure
3232 *
3233 * Free all receive software resources
3234 **/
3235static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3236{
3237 int i;
3238
3239 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003240 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003241}
3242
3243/**
3244 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003245 * @rx_ring: ring to free buffers from
3246 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003247static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003248{
Auke Kok9d5c8242008-01-24 02:22:38 -08003249 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003250 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003251
Alexander Duyck06034642011-08-26 07:44:22 +00003252 if (!rx_ring->rx_buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003253 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003254
Auke Kok9d5c8242008-01-24 02:22:38 -08003255 /* Free all the Rx ring sk_buffs */
3256 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyck06034642011-08-26 07:44:22 +00003257 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003258 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003259 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003260 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003261 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003262 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003263 buffer_info->dma = 0;
3264 }
3265
3266 if (buffer_info->skb) {
3267 dev_kfree_skb(buffer_info->skb);
3268 buffer_info->skb = NULL;
3269 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003270 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003271 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003272 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003273 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003274 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003275 buffer_info->page_dma = 0;
3276 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003277 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003278 put_page(buffer_info->page);
3279 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003280 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003281 }
3282 }
3283
Alexander Duyck06034642011-08-26 07:44:22 +00003284 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3285 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08003286
3287 /* Zero out the descriptor ring */
3288 memset(rx_ring->desc, 0, rx_ring->size);
3289
3290 rx_ring->next_to_clean = 0;
3291 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003292}
3293
3294/**
3295 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3296 * @adapter: board private structure
3297 **/
3298static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3299{
3300 int i;
3301
3302 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003303 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003304}
3305
3306/**
3307 * igb_set_mac - Change the Ethernet Address of the NIC
3308 * @netdev: network interface device structure
3309 * @p: pointer to an address structure
3310 *
3311 * Returns 0 on success, negative on failure
3312 **/
3313static int igb_set_mac(struct net_device *netdev, void *p)
3314{
3315 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003316 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003317 struct sockaddr *addr = p;
3318
3319 if (!is_valid_ether_addr(addr->sa_data))
3320 return -EADDRNOTAVAIL;
3321
3322 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003323 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003324
Alexander Duyck26ad9172009-10-05 06:32:49 +00003325 /* set the correct pool for the new PF MAC address in entry 0 */
3326 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3327 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003328
Auke Kok9d5c8242008-01-24 02:22:38 -08003329 return 0;
3330}
3331
3332/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003333 * igb_write_mc_addr_list - write multicast addresses to MTA
3334 * @netdev: network interface device structure
3335 *
3336 * Writes multicast address list to the MTA hash table.
3337 * Returns: -ENOMEM on failure
3338 * 0 on no addresses written
3339 * X on writing X addresses to MTA
3340 **/
3341static int igb_write_mc_addr_list(struct net_device *netdev)
3342{
3343 struct igb_adapter *adapter = netdev_priv(netdev);
3344 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003345 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003346 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003347 int i;
3348
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003349 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003350 /* nothing to program, so clear mc list */
3351 igb_update_mc_addr_list(hw, NULL, 0);
3352 igb_restore_vf_multicasts(adapter);
3353 return 0;
3354 }
3355
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003356 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003357 if (!mta_list)
3358 return -ENOMEM;
3359
Alexander Duyck68d480c2009-10-05 06:33:08 +00003360 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003361 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003362 netdev_for_each_mc_addr(ha, netdev)
3363 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003364
Alexander Duyck68d480c2009-10-05 06:33:08 +00003365 igb_update_mc_addr_list(hw, mta_list, i);
3366 kfree(mta_list);
3367
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003368 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003369}
3370
3371/**
3372 * igb_write_uc_addr_list - write unicast addresses to RAR table
3373 * @netdev: network interface device structure
3374 *
3375 * Writes unicast address list to the RAR table.
3376 * Returns: -ENOMEM on failure/insufficient address space
3377 * 0 on no addresses written
3378 * X on writing X addresses to the RAR table
3379 **/
3380static int igb_write_uc_addr_list(struct net_device *netdev)
3381{
3382 struct igb_adapter *adapter = netdev_priv(netdev);
3383 struct e1000_hw *hw = &adapter->hw;
3384 unsigned int vfn = adapter->vfs_allocated_count;
3385 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3386 int count = 0;
3387
3388 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003389 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003390 return -ENOMEM;
3391
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003392 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003393 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003394
3395 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003396 if (!rar_entries)
3397 break;
3398 igb_rar_set_qsel(adapter, ha->addr,
3399 rar_entries--,
3400 vfn);
3401 count++;
3402 }
3403 }
3404 /* write the addresses in reverse order to avoid write combining */
3405 for (; rar_entries > 0 ; rar_entries--) {
3406 wr32(E1000_RAH(rar_entries), 0);
3407 wr32(E1000_RAL(rar_entries), 0);
3408 }
3409 wrfl();
3410
3411 return count;
3412}
3413
3414/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003415 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003416 * @netdev: network interface device structure
3417 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003418 * The set_rx_mode entry point is called whenever the unicast or multicast
3419 * address lists or the network interface flags are updated. This routine is
3420 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003421 * promiscuous mode, and all-multi behavior.
3422 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003423static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003424{
3425 struct igb_adapter *adapter = netdev_priv(netdev);
3426 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003427 unsigned int vfn = adapter->vfs_allocated_count;
3428 u32 rctl, vmolr = 0;
3429 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003430
3431 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003432 rctl = rd32(E1000_RCTL);
3433
Alexander Duyck68d480c2009-10-05 06:33:08 +00003434 /* clear the effected bits */
3435 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3436
Patrick McHardy746b9f02008-07-16 20:15:45 -07003437 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003438 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003439 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003440 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003441 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003442 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003443 vmolr |= E1000_VMOLR_MPME;
3444 } else {
3445 /*
3446 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003447 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003448 * that we can at least receive multicast traffic
3449 */
3450 count = igb_write_mc_addr_list(netdev);
3451 if (count < 0) {
3452 rctl |= E1000_RCTL_MPE;
3453 vmolr |= E1000_VMOLR_MPME;
3454 } else if (count) {
3455 vmolr |= E1000_VMOLR_ROMPE;
3456 }
3457 }
3458 /*
3459 * Write addresses to available RAR registers, if there is not
3460 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003461 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003462 */
3463 count = igb_write_uc_addr_list(netdev);
3464 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003465 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003466 vmolr |= E1000_VMOLR_ROPE;
3467 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003468 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003469 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003470 wr32(E1000_RCTL, rctl);
3471
Alexander Duyck68d480c2009-10-05 06:33:08 +00003472 /*
3473 * In order to support SR-IOV and eventually VMDq it is necessary to set
3474 * the VMOLR to enable the appropriate modes. Without this workaround
3475 * we will have issues with VLAN tag stripping not being done for frames
3476 * that are only arriving because we are the default pool
3477 */
3478 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003479 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003480
Alexander Duyck68d480c2009-10-05 06:33:08 +00003481 vmolr |= rd32(E1000_VMOLR(vfn)) &
3482 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3483 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003484 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003485}
3486
Greg Rose13800462010-11-06 02:08:26 +00003487static void igb_check_wvbr(struct igb_adapter *adapter)
3488{
3489 struct e1000_hw *hw = &adapter->hw;
3490 u32 wvbr = 0;
3491
3492 switch (hw->mac.type) {
3493 case e1000_82576:
3494 case e1000_i350:
3495 if (!(wvbr = rd32(E1000_WVBR)))
3496 return;
3497 break;
3498 default:
3499 break;
3500 }
3501
3502 adapter->wvbr |= wvbr;
3503}
3504
3505#define IGB_STAGGERED_QUEUE_OFFSET 8
3506
3507static void igb_spoof_check(struct igb_adapter *adapter)
3508{
3509 int j;
3510
3511 if (!adapter->wvbr)
3512 return;
3513
3514 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3515 if (adapter->wvbr & (1 << j) ||
3516 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3517 dev_warn(&adapter->pdev->dev,
3518 "Spoof event(s) detected on VF %d\n", j);
3519 adapter->wvbr &=
3520 ~((1 << j) |
3521 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3522 }
3523 }
3524}
3525
Auke Kok9d5c8242008-01-24 02:22:38 -08003526/* Need to wait a few seconds after link up to get diagnostic information from
3527 * the phy */
3528static void igb_update_phy_info(unsigned long data)
3529{
3530 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003531 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003532}
3533
3534/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003535 * igb_has_link - check shared code for link and determine up/down
3536 * @adapter: pointer to driver private info
3537 **/
Nick Nunley31455352010-02-17 01:01:21 +00003538bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003539{
3540 struct e1000_hw *hw = &adapter->hw;
3541 bool link_active = false;
3542 s32 ret_val = 0;
3543
3544 /* get_link_status is set on LSC (link status) interrupt or
3545 * rx sequence error interrupt. get_link_status will stay
3546 * false until the e1000_check_for_link establishes link
3547 * for copper adapters ONLY
3548 */
3549 switch (hw->phy.media_type) {
3550 case e1000_media_type_copper:
3551 if (hw->mac.get_link_status) {
3552 ret_val = hw->mac.ops.check_for_link(hw);
3553 link_active = !hw->mac.get_link_status;
3554 } else {
3555 link_active = true;
3556 }
3557 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003558 case e1000_media_type_internal_serdes:
3559 ret_val = hw->mac.ops.check_for_link(hw);
3560 link_active = hw->mac.serdes_has_link;
3561 break;
3562 default:
3563 case e1000_media_type_unknown:
3564 break;
3565 }
3566
3567 return link_active;
3568}
3569
Stefan Assmann563988d2011-04-05 04:27:15 +00003570static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3571{
3572 bool ret = false;
3573 u32 ctrl_ext, thstat;
3574
3575 /* check for thermal sensor event on i350, copper only */
3576 if (hw->mac.type == e1000_i350) {
3577 thstat = rd32(E1000_THSTAT);
3578 ctrl_ext = rd32(E1000_CTRL_EXT);
3579
3580 if ((hw->phy.media_type == e1000_media_type_copper) &&
3581 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3582 ret = !!(thstat & event);
3583 }
3584 }
3585
3586 return ret;
3587}
3588
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003589/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003590 * igb_watchdog - Timer Call-back
3591 * @data: pointer to adapter cast into an unsigned long
3592 **/
3593static void igb_watchdog(unsigned long data)
3594{
3595 struct igb_adapter *adapter = (struct igb_adapter *)data;
3596 /* Do the rest outside of interrupt context */
3597 schedule_work(&adapter->watchdog_task);
3598}
3599
3600static void igb_watchdog_task(struct work_struct *work)
3601{
3602 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003603 struct igb_adapter,
3604 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003605 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003606 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003607 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003608 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003609
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003610 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003611 if (link) {
3612 if (!netif_carrier_ok(netdev)) {
3613 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003614 hw->mac.ops.get_speed_and_duplex(hw,
3615 &adapter->link_speed,
3616 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003617
3618 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003619 /* Links status message must follow this format */
3620 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003621 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003622 netdev->name,
3623 adapter->link_speed,
3624 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003625 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003626 ((ctrl & E1000_CTRL_TFCE) &&
3627 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3628 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3629 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003630
Stefan Assmann563988d2011-04-05 04:27:15 +00003631 /* check for thermal sensor event */
3632 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3633 printk(KERN_INFO "igb: %s The network adapter "
3634 "link speed was downshifted "
3635 "because it overheated.\n",
3636 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003637 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003638
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003639 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003640 adapter->tx_timeout_factor = 1;
3641 switch (adapter->link_speed) {
3642 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003643 adapter->tx_timeout_factor = 14;
3644 break;
3645 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003646 /* maybe add some timeout factor ? */
3647 break;
3648 }
3649
3650 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003651
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003652 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003653 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003654
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003655 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003656 if (!test_bit(__IGB_DOWN, &adapter->state))
3657 mod_timer(&adapter->phy_info_timer,
3658 round_jiffies(jiffies + 2 * HZ));
3659 }
3660 } else {
3661 if (netif_carrier_ok(netdev)) {
3662 adapter->link_speed = 0;
3663 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003664
3665 /* check for thermal sensor event */
3666 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3667 printk(KERN_ERR "igb: %s The network adapter "
3668 "was stopped because it "
3669 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003670 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003671 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003672
Alexander Duyck527d47c2008-11-27 00:21:39 -08003673 /* Links status message must follow this format */
3674 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3675 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003676 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003677
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003678 igb_ping_all_vfs(adapter);
3679
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003680 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003681 if (!test_bit(__IGB_DOWN, &adapter->state))
3682 mod_timer(&adapter->phy_info_timer,
3683 round_jiffies(jiffies + 2 * HZ));
3684 }
3685 }
3686
Eric Dumazet12dcd862010-10-15 17:27:10 +00003687 spin_lock(&adapter->stats64_lock);
3688 igb_update_stats(adapter, &adapter->stats64);
3689 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003690
Alexander Duyckdbabb062009-11-12 18:38:16 +00003691 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003692 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003693 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003694 /* We've lost link, so the controller stops DMA,
3695 * but we've got queued Tx work that's never going
3696 * to get done, so reset controller to flush Tx.
3697 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003698 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3699 adapter->tx_timeout_count++;
3700 schedule_work(&adapter->reset_task);
3701 /* return immediately since reset is imminent */
3702 return;
3703 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003704 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003705
Alexander Duyckdbabb062009-11-12 18:38:16 +00003706 /* Force detection of hung controller every watchdog period */
3707 tx_ring->detect_tx_hung = true;
3708 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003709
Auke Kok9d5c8242008-01-24 02:22:38 -08003710 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003711 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003712 u32 eics = 0;
3713 for (i = 0; i < adapter->num_q_vectors; i++) {
3714 struct igb_q_vector *q_vector = adapter->q_vector[i];
3715 eics |= q_vector->eims_value;
3716 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003717 wr32(E1000_EICS, eics);
3718 } else {
3719 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3720 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003721
Greg Rose13800462010-11-06 02:08:26 +00003722 igb_spoof_check(adapter);
3723
Auke Kok9d5c8242008-01-24 02:22:38 -08003724 /* Reset the timer */
3725 if (!test_bit(__IGB_DOWN, &adapter->state))
3726 mod_timer(&adapter->watchdog_timer,
3727 round_jiffies(jiffies + 2 * HZ));
3728}
3729
3730enum latency_range {
3731 lowest_latency = 0,
3732 low_latency = 1,
3733 bulk_latency = 2,
3734 latency_invalid = 255
3735};
3736
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003737/**
3738 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3739 *
3740 * Stores a new ITR value based on strictly on packet size. This
3741 * algorithm is less sophisticated than that used in igb_update_itr,
3742 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003743 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003744 * were determined based on theoretical maximum wire speed and testing
3745 * data, in order to minimize response time while increasing bulk
3746 * throughput.
3747 * This functionality is controlled by the InterruptThrottleRate module
3748 * parameter (see igb_param.c)
3749 * NOTE: This function is called only when operating in a multiqueue
3750 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003751 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003752 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003753static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003754{
Alexander Duyck047e0032009-10-27 15:49:27 +00003755 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003756 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003757 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003758 struct igb_ring *ring;
3759 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003760
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003761 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3762 * ints/sec - ITR timer value of 120 ticks.
3763 */
3764 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003765 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003766 goto set_itr_val;
3767 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003768
Eric Dumazet12dcd862010-10-15 17:27:10 +00003769 ring = q_vector->rx_ring;
3770 if (ring) {
3771 packets = ACCESS_ONCE(ring->total_packets);
3772
3773 if (packets)
3774 avg_wire_size = ring->total_bytes / packets;
Alexander Duyck047e0032009-10-27 15:49:27 +00003775 }
3776
Eric Dumazet12dcd862010-10-15 17:27:10 +00003777 ring = q_vector->tx_ring;
3778 if (ring) {
3779 packets = ACCESS_ONCE(ring->total_packets);
3780
3781 if (packets)
3782 avg_wire_size = max_t(u32, avg_wire_size,
3783 ring->total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003784 }
3785
3786 /* if avg_wire_size isn't set no work was done */
3787 if (!avg_wire_size)
3788 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003789
3790 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3791 avg_wire_size += 24;
3792
3793 /* Don't starve jumbo frames */
3794 avg_wire_size = min(avg_wire_size, 3000);
3795
3796 /* Give a little boost to mid-size frames */
3797 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3798 new_val = avg_wire_size / 3;
3799 else
3800 new_val = avg_wire_size / 2;
3801
Nick Nunleyabe1c362010-02-17 01:03:19 +00003802 /* when in itr mode 3 do not exceed 20K ints/sec */
3803 if (adapter->rx_itr_setting == 3 && new_val < 196)
3804 new_val = 196;
3805
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003806set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003807 if (new_val != q_vector->itr_val) {
3808 q_vector->itr_val = new_val;
3809 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003810 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003811clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003812 if (q_vector->rx_ring) {
3813 q_vector->rx_ring->total_bytes = 0;
3814 q_vector->rx_ring->total_packets = 0;
3815 }
3816 if (q_vector->tx_ring) {
3817 q_vector->tx_ring->total_bytes = 0;
3818 q_vector->tx_ring->total_packets = 0;
3819 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003820}
3821
3822/**
3823 * igb_update_itr - update the dynamic ITR value based on statistics
3824 * Stores a new ITR value based on packets and byte
3825 * counts during the last interrupt. The advantage of per interrupt
3826 * computation is faster updates and more accurate ITR for the current
3827 * traffic pattern. Constants in this function were computed
3828 * based on theoretical maximum wire speed and thresholds were set based
3829 * on testing data as well as attempting to minimize response time
3830 * while increasing bulk throughput.
3831 * this functionality is controlled by the InterruptThrottleRate module
3832 * parameter (see igb_param.c)
3833 * NOTE: These calculations are only valid when operating in a single-
3834 * queue environment.
3835 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003836 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003837 * @packets: the number of packets during this measurement interval
3838 * @bytes: the number of bytes during this measurement interval
3839 **/
3840static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3841 int packets, int bytes)
3842{
3843 unsigned int retval = itr_setting;
3844
3845 if (packets == 0)
3846 goto update_itr_done;
3847
3848 switch (itr_setting) {
3849 case lowest_latency:
3850 /* handle TSO and jumbo frames */
3851 if (bytes/packets > 8000)
3852 retval = bulk_latency;
3853 else if ((packets < 5) && (bytes > 512))
3854 retval = low_latency;
3855 break;
3856 case low_latency: /* 50 usec aka 20000 ints/s */
3857 if (bytes > 10000) {
3858 /* this if handles the TSO accounting */
3859 if (bytes/packets > 8000) {
3860 retval = bulk_latency;
3861 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3862 retval = bulk_latency;
3863 } else if ((packets > 35)) {
3864 retval = lowest_latency;
3865 }
3866 } else if (bytes/packets > 2000) {
3867 retval = bulk_latency;
3868 } else if (packets <= 2 && bytes < 512) {
3869 retval = lowest_latency;
3870 }
3871 break;
3872 case bulk_latency: /* 250 usec aka 4000 ints/s */
3873 if (bytes > 25000) {
3874 if (packets > 35)
3875 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003876 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003877 retval = low_latency;
3878 }
3879 break;
3880 }
3881
3882update_itr_done:
3883 return retval;
3884}
3885
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003886static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003887{
Alexander Duyck047e0032009-10-27 15:49:27 +00003888 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003889 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003890 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003891
3892 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3893 if (adapter->link_speed != SPEED_1000) {
3894 current_itr = 0;
3895 new_itr = 4000;
3896 goto set_itr_now;
3897 }
3898
3899 adapter->rx_itr = igb_update_itr(adapter,
3900 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003901 q_vector->rx_ring->total_packets,
3902 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003903
Alexander Duyck047e0032009-10-27 15:49:27 +00003904 adapter->tx_itr = igb_update_itr(adapter,
3905 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003906 q_vector->tx_ring->total_packets,
3907 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003908 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003909
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003910 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003911 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003912 current_itr = low_latency;
3913
Auke Kok9d5c8242008-01-24 02:22:38 -08003914 switch (current_itr) {
3915 /* counts and packets in update_itr are dependent on these numbers */
3916 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003917 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003918 break;
3919 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003920 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003921 break;
3922 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003923 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003924 break;
3925 default:
3926 break;
3927 }
3928
3929set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003930 q_vector->rx_ring->total_bytes = 0;
3931 q_vector->rx_ring->total_packets = 0;
3932 q_vector->tx_ring->total_bytes = 0;
3933 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003934
Alexander Duyck047e0032009-10-27 15:49:27 +00003935 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003936 /* this attempts to bias the interrupt rate towards Bulk
3937 * by adding intermediate steps when interrupt rate is
3938 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003939 new_itr = new_itr > q_vector->itr_val ?
3940 max((new_itr * q_vector->itr_val) /
3941 (new_itr + (q_vector->itr_val >> 2)),
3942 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003943 new_itr;
3944 /* Don't write the value here; it resets the adapter's
3945 * internal timer, and causes us to delay far longer than
3946 * we should between interrupts. Instead, we write the ITR
3947 * value at the beginning of the next interrupt so the timing
3948 * ends up being correct.
3949 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003950 q_vector->itr_val = new_itr;
3951 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003952 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003953}
3954
Auke Kok9d5c8242008-01-24 02:22:38 -08003955#define IGB_TX_FLAGS_CSUM 0x00000001
3956#define IGB_TX_FLAGS_VLAN 0x00000002
3957#define IGB_TX_FLAGS_TSO 0x00000004
3958#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003959#define IGB_TX_FLAGS_TSTAMP 0x00000010
3960#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3961#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003962
Alexander Duyckcd392f52011-08-26 07:43:59 +00003963static inline int igb_tso(struct igb_ring *tx_ring,
3964 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003965{
3966 struct e1000_adv_tx_context_desc *context_desc;
3967 unsigned int i;
3968 int err;
Alexander Duyck06034642011-08-26 07:44:22 +00003969 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003970 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003971 u32 mss_l4len_idx;
3972 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003973
3974 if (skb_header_cloned(skb)) {
3975 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3976 if (err)
3977 return err;
3978 }
3979
3980 l4len = tcp_hdrlen(skb);
3981 *hdr_len += l4len;
3982
3983 if (skb->protocol == htons(ETH_P_IP)) {
3984 struct iphdr *iph = ip_hdr(skb);
3985 iph->tot_len = 0;
3986 iph->check = 0;
3987 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3988 iph->daddr, 0,
3989 IPPROTO_TCP,
3990 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003991 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003992 ipv6_hdr(skb)->payload_len = 0;
3993 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3994 &ipv6_hdr(skb)->daddr,
3995 0, IPPROTO_TCP, 0);
3996 }
3997
3998 i = tx_ring->next_to_use;
3999
Alexander Duyck06034642011-08-26 07:44:22 +00004000 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00004001 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004002 /* VLAN MACLEN IPLEN */
4003 if (tx_flags & IGB_TX_FLAGS_VLAN)
4004 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4005 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4006 *hdr_len += skb_network_offset(skb);
4007 info |= skb_network_header_len(skb);
4008 *hdr_len += skb_network_header_len(skb);
4009 context_desc->vlan_macip_lens = cpu_to_le32(info);
4010
4011 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4012 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4013
4014 if (skb->protocol == htons(ETH_P_IP))
4015 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
4016 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4017
4018 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4019
4020 /* MSS L4LEN IDX */
4021 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
4022 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
4023
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004024 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004025 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
4026 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004027
4028 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4029 context_desc->seqnum_seed = 0;
4030
4031 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004032 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004033 buffer_info->dma = 0;
4034 i++;
4035 if (i == tx_ring->count)
4036 i = 0;
4037
4038 tx_ring->next_to_use = i;
4039
4040 return true;
4041}
4042
Alexander Duyckcd392f52011-08-26 07:43:59 +00004043static inline bool igb_tx_csum(struct igb_ring *tx_ring,
4044 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08004045{
4046 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck59d71982010-04-27 13:09:25 +00004047 struct device *dev = tx_ring->dev;
Alexander Duyck06034642011-08-26 07:44:22 +00004048 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08004049 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00004050 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004051
4052 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
4053 (tx_flags & IGB_TX_FLAGS_VLAN)) {
4054 i = tx_ring->next_to_use;
Alexander Duyck06034642011-08-26 07:44:22 +00004055 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00004056 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004057
4058 if (tx_flags & IGB_TX_FLAGS_VLAN)
4059 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004060
Auke Kok9d5c8242008-01-24 02:22:38 -08004061 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4062 if (skb->ip_summed == CHECKSUM_PARTIAL)
4063 info |= skb_network_header_len(skb);
4064
4065 context_desc->vlan_macip_lens = cpu_to_le32(info);
4066
4067 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4068
4069 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004070 __be16 protocol;
4071
4072 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
4073 const struct vlan_ethhdr *vhdr =
4074 (const struct vlan_ethhdr*)skb->data;
4075
4076 protocol = vhdr->h_vlan_encapsulated_proto;
4077 } else {
4078 protocol = skb->protocol;
4079 }
4080
4081 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004082 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08004083 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004084 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4085 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004086 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4087 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004088 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004089 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08004090 /* XXX what about other V6 headers?? */
4091 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4092 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004093 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4094 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004095 break;
4096 default:
4097 if (unlikely(net_ratelimit()))
Alexander Duyck59d71982010-04-27 13:09:25 +00004098 dev_warn(dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08004099 "partial checksum but proto=%x!\n",
4100 skb->protocol);
4101 break;
4102 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004103 }
4104
4105 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4106 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004107 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004108 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004109 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08004110
4111 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004112 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004113 buffer_info->dma = 0;
4114
4115 i++;
4116 if (i == tx_ring->count)
4117 i = 0;
4118 tx_ring->next_to_use = i;
4119
4120 return true;
4121 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004122 return false;
4123}
4124
4125#define IGB_MAX_TXD_PWR 16
4126#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4127
Alexander Duyckcd392f52011-08-26 07:43:59 +00004128static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4129 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004130{
Alexander Duyck06034642011-08-26 07:44:22 +00004131 struct igb_tx_buffer *buffer_info;
Alexander Duyck59d71982010-04-27 13:09:25 +00004132 struct device *dev = tx_ring->dev;
Nick Nunley28739572010-05-04 21:58:07 +00004133 unsigned int hlen = skb_headlen(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004134 unsigned int count = 0, i;
4135 unsigned int f;
Nick Nunley28739572010-05-04 21:58:07 +00004136 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004137
4138 i = tx_ring->next_to_use;
4139
Alexander Duyck06034642011-08-26 07:44:22 +00004140 buffer_info = &tx_ring->tx_buffer_info[i];
Nick Nunley28739572010-05-04 21:58:07 +00004141 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4142 buffer_info->length = hlen;
Auke Kok9d5c8242008-01-24 02:22:38 -08004143 /* set time_stamp *before* dma to help avoid a possible race */
4144 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004145 buffer_info->next_to_watch = i;
Nick Nunley28739572010-05-04 21:58:07 +00004146 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
Alexander Duyck59d71982010-04-27 13:09:25 +00004147 DMA_TO_DEVICE);
4148 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004149 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004150
4151 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Nick Nunley28739572010-05-04 21:58:07 +00004152 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
4153 unsigned int len = frag->size;
Auke Kok9d5c8242008-01-24 02:22:38 -08004154
Alexander Duyck85811452010-01-23 01:35:00 -08004155 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004156 i++;
4157 if (i == tx_ring->count)
4158 i = 0;
4159
Alexander Duyck06034642011-08-26 07:44:22 +00004160 buffer_info = &tx_ring->tx_buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08004161 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4162 buffer_info->length = len;
4163 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004164 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004165 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00004166 buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
Alexander Duyck59d71982010-04-27 13:09:25 +00004167 DMA_TO_DEVICE);
4168 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004169 goto dma_error;
4170
Auke Kok9d5c8242008-01-24 02:22:38 -08004171 }
4172
Alexander Duyck06034642011-08-26 07:44:22 +00004173 buffer_info->skb = skb;
4174 buffer_info->tx_flags = skb_shinfo(skb)->tx_flags;
Nick Nunley28739572010-05-04 21:58:07 +00004175 /* multiply data chunks by size of headers */
Alexander Duyck06034642011-08-26 07:44:22 +00004176 buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len;
4177 buffer_info->gso_segs = gso_segs;
4178 tx_ring->tx_buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004179
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004180 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004181
4182dma_error:
Alexander Duyck59d71982010-04-27 13:09:25 +00004183 dev_err(dev, "TX DMA map failed\n");
Alexander Duyck6366ad32009-12-02 16:47:18 +00004184
4185 /* clear timestamp and dma mappings for failed buffer_info mapping */
4186 buffer_info->dma = 0;
4187 buffer_info->time_stamp = 0;
4188 buffer_info->length = 0;
4189 buffer_info->next_to_watch = 0;
4190 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004191
4192 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00004193 while (count--) {
4194 if (i == 0)
4195 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004196 i--;
Alexander Duyck06034642011-08-26 07:44:22 +00004197 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck6366ad32009-12-02 16:47:18 +00004198 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4199 }
4200
4201 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004202}
4203
Alexander Duyckcd392f52011-08-26 07:43:59 +00004204static inline void igb_tx_queue(struct igb_ring *tx_ring,
4205 u32 tx_flags, int count, u32 paylen,
4206 u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004207{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004208 union e1000_adv_tx_desc *tx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00004209 struct igb_tx_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08004210 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004211 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08004212
4213 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
4214 E1000_ADVTXD_DCMD_DEXT);
4215
4216 if (tx_flags & IGB_TX_FLAGS_VLAN)
4217 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
4218
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004219 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4220 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
4221
Auke Kok9d5c8242008-01-24 02:22:38 -08004222 if (tx_flags & IGB_TX_FLAGS_TSO) {
4223 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
4224
4225 /* insert tcp checksum */
4226 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4227
4228 /* insert ip checksum */
4229 if (tx_flags & IGB_TX_FLAGS_IPV4)
4230 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4231
4232 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
4233 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4234 }
4235
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004236 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4237 (tx_flags & (IGB_TX_FLAGS_CSUM |
4238 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004239 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004240 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004241
4242 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4243
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004244 do {
Alexander Duyck06034642011-08-26 07:44:22 +00004245 buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00004246 tx_desc = IGB_TX_DESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004247 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4248 tx_desc->read.cmd_type_len =
4249 cpu_to_le32(cmd_type_len | buffer_info->length);
4250 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004251 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08004252 i++;
4253 if (i == tx_ring->count)
4254 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004255 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004256
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004257 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004258 /* Force memory writes to complete before letting h/w
4259 * know there are new descriptors to fetch. (Only
4260 * applicable for weak-ordered memory model archs,
4261 * such as IA-64). */
4262 wmb();
4263
4264 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00004265 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08004266 /* we need this if more than one processor can write to our tail
4267 * at a time, it syncronizes IO on IA64/Altix systems */
4268 mmiowb();
4269}
4270
Alexander Duycke694e962009-10-27 15:53:06 +00004271static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004272{
Alexander Duycke694e962009-10-27 15:53:06 +00004273 struct net_device *netdev = tx_ring->netdev;
4274
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004275 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004276
Auke Kok9d5c8242008-01-24 02:22:38 -08004277 /* Herbert's original patch had:
4278 * smp_mb__after_netif_stop_queue();
4279 * but since that doesn't exist yet, just open code it. */
4280 smp_mb();
4281
4282 /* We need to check again in a case another CPU has just
4283 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004284 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004285 return -EBUSY;
4286
4287 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004288 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004289
4290 u64_stats_update_begin(&tx_ring->tx_syncp2);
4291 tx_ring->tx_stats.restart_queue2++;
4292 u64_stats_update_end(&tx_ring->tx_syncp2);
4293
Auke Kok9d5c8242008-01-24 02:22:38 -08004294 return 0;
4295}
4296
Nick Nunley717ba0892010-02-17 01:04:18 +00004297static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004298{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004299 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004300 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004301 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004302}
4303
Alexander Duyckcd392f52011-08-26 07:43:59 +00004304netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4305 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004306{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004307 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004308 u32 tx_flags = 0;
4309 u16 first;
4310 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004311
Auke Kok9d5c8242008-01-24 02:22:38 -08004312 /* need: 1 descriptor per page,
4313 * + 2 desc gap to keep tail from touching head,
4314 * + 1 desc for skb->data,
4315 * + 1 desc for context descriptor,
4316 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004317 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004318 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004319 return NETDEV_TX_BUSY;
4320 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004321
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004322 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4323 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004324 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004325 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004326
Jesse Grosseab6d182010-10-20 13:56:03 +00004327 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004328 tx_flags |= IGB_TX_FLAGS_VLAN;
4329 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4330 }
4331
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004332 if (skb->protocol == htons(ETH_P_IP))
4333 tx_flags |= IGB_TX_FLAGS_IPV4;
4334
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004335 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004336 if (skb_is_gso(skb)) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00004337 tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004338
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004339 if (tso < 0) {
4340 dev_kfree_skb_any(skb);
4341 return NETDEV_TX_OK;
4342 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004343 }
4344
4345 if (tso)
4346 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyckcd392f52011-08-26 07:43:59 +00004347 else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00004348 (skb->ip_summed == CHECKSUM_PARTIAL))
4349 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004350
Alexander Duyck65689fe2009-03-20 00:17:43 +00004351 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004352 * count reflects descriptors mapped, if 0 or less then mapping error
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004353 * has occurred and we need to rewind the descriptor queue
Alexander Duyck65689fe2009-03-20 00:17:43 +00004354 */
Alexander Duyckcd392f52011-08-26 07:43:59 +00004355 count = igb_tx_map(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004356 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00004357 dev_kfree_skb_any(skb);
Alexander Duyck06034642011-08-26 07:44:22 +00004358 tx_ring->tx_buffer_info[first].time_stamp = 0;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004359 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004360 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004361 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004362
Alexander Duyckcd392f52011-08-26 07:43:59 +00004363 igb_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004364
4365 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004366 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004367
Auke Kok9d5c8242008-01-24 02:22:38 -08004368 return NETDEV_TX_OK;
4369}
4370
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004371static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4372 struct sk_buff *skb)
4373{
4374 unsigned int r_idx = skb->queue_mapping;
4375
4376 if (r_idx >= adapter->num_tx_queues)
4377 r_idx = r_idx % adapter->num_tx_queues;
4378
4379 return adapter->tx_ring[r_idx];
4380}
4381
Alexander Duyckcd392f52011-08-26 07:43:59 +00004382static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4383 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004384{
4385 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004386
4387 if (test_bit(__IGB_DOWN, &adapter->state)) {
4388 dev_kfree_skb_any(skb);
4389 return NETDEV_TX_OK;
4390 }
4391
4392 if (skb->len <= 0) {
4393 dev_kfree_skb_any(skb);
4394 return NETDEV_TX_OK;
4395 }
4396
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004397 /*
4398 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4399 * in order to meet this minimum size requirement.
4400 */
4401 if (skb->len < 17) {
4402 if (skb_padto(skb, 17))
4403 return NETDEV_TX_OK;
4404 skb->len = 17;
4405 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004406
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004407 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004408}
4409
4410/**
4411 * igb_tx_timeout - Respond to a Tx Hang
4412 * @netdev: network interface device structure
4413 **/
4414static void igb_tx_timeout(struct net_device *netdev)
4415{
4416 struct igb_adapter *adapter = netdev_priv(netdev);
4417 struct e1000_hw *hw = &adapter->hw;
4418
4419 /* Do the reset outside of interrupt context */
4420 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004421
Alexander Duyck55cac242009-11-19 12:42:21 +00004422 if (hw->mac.type == e1000_82580)
4423 hw->dev_spec._82575.global_device_reset = true;
4424
Auke Kok9d5c8242008-01-24 02:22:38 -08004425 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004426 wr32(E1000_EICS,
4427 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004428}
4429
4430static void igb_reset_task(struct work_struct *work)
4431{
4432 struct igb_adapter *adapter;
4433 adapter = container_of(work, struct igb_adapter, reset_task);
4434
Taku Izumic97ec422010-04-27 14:39:30 +00004435 igb_dump(adapter);
4436 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004437 igb_reinit_locked(adapter);
4438}
4439
4440/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004441 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004442 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004443 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004444 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004445 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004446static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4447 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004448{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004449 struct igb_adapter *adapter = netdev_priv(netdev);
4450
4451 spin_lock(&adapter->stats64_lock);
4452 igb_update_stats(adapter, &adapter->stats64);
4453 memcpy(stats, &adapter->stats64, sizeof(*stats));
4454 spin_unlock(&adapter->stats64_lock);
4455
4456 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004457}
4458
4459/**
4460 * igb_change_mtu - Change the Maximum Transfer Unit
4461 * @netdev: network interface device structure
4462 * @new_mtu: new value for maximum frame size
4463 *
4464 * Returns 0 on success, negative on failure
4465 **/
4466static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4467{
4468 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004469 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004470 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004471
Alexander Duyckc809d222009-10-27 23:52:13 +00004472 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004473 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004474 return -EINVAL;
4475 }
4476
Alexander Duyck153285f2011-08-26 07:43:32 +00004477#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004478 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004479 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004480 return -EINVAL;
4481 }
4482
4483 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4484 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004485
Auke Kok9d5c8242008-01-24 02:22:38 -08004486 /* igb_down has a dependency on max_frame_size */
4487 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004488
Alexander Duyck4c844852009-10-27 15:52:07 +00004489 if (netif_running(netdev))
4490 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004491
Alexander Duyck090b1792009-10-27 23:51:55 +00004492 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004493 netdev->mtu, new_mtu);
4494 netdev->mtu = new_mtu;
4495
4496 if (netif_running(netdev))
4497 igb_up(adapter);
4498 else
4499 igb_reset(adapter);
4500
4501 clear_bit(__IGB_RESETTING, &adapter->state);
4502
4503 return 0;
4504}
4505
4506/**
4507 * igb_update_stats - Update the board statistics counters
4508 * @adapter: board private structure
4509 **/
4510
Eric Dumazet12dcd862010-10-15 17:27:10 +00004511void igb_update_stats(struct igb_adapter *adapter,
4512 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004513{
4514 struct e1000_hw *hw = &adapter->hw;
4515 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004516 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004517 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004518 int i;
4519 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004520 unsigned int start;
4521 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004522
4523#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4524
4525 /*
4526 * Prevent stats update while adapter is being reset, or if the pci
4527 * connection is down.
4528 */
4529 if (adapter->link_speed == 0)
4530 return;
4531 if (pci_channel_offline(pdev))
4532 return;
4533
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004534 bytes = 0;
4535 packets = 0;
4536 for (i = 0; i < adapter->num_rx_queues; i++) {
4537 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004538 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004539
Alexander Duyck3025a442010-02-17 01:02:39 +00004540 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004541 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004542
4543 do {
4544 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4545 _bytes = ring->rx_stats.bytes;
4546 _packets = ring->rx_stats.packets;
4547 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4548 bytes += _bytes;
4549 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004550 }
4551
Alexander Duyck128e45e2009-11-12 18:37:38 +00004552 net_stats->rx_bytes = bytes;
4553 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004554
4555 bytes = 0;
4556 packets = 0;
4557 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004558 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004559 do {
4560 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4561 _bytes = ring->tx_stats.bytes;
4562 _packets = ring->tx_stats.packets;
4563 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4564 bytes += _bytes;
4565 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004566 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004567 net_stats->tx_bytes = bytes;
4568 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004569
4570 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004571 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4572 adapter->stats.gprc += rd32(E1000_GPRC);
4573 adapter->stats.gorc += rd32(E1000_GORCL);
4574 rd32(E1000_GORCH); /* clear GORCL */
4575 adapter->stats.bprc += rd32(E1000_BPRC);
4576 adapter->stats.mprc += rd32(E1000_MPRC);
4577 adapter->stats.roc += rd32(E1000_ROC);
4578
4579 adapter->stats.prc64 += rd32(E1000_PRC64);
4580 adapter->stats.prc127 += rd32(E1000_PRC127);
4581 adapter->stats.prc255 += rd32(E1000_PRC255);
4582 adapter->stats.prc511 += rd32(E1000_PRC511);
4583 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4584 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4585 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4586 adapter->stats.sec += rd32(E1000_SEC);
4587
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004588 mpc = rd32(E1000_MPC);
4589 adapter->stats.mpc += mpc;
4590 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004591 adapter->stats.scc += rd32(E1000_SCC);
4592 adapter->stats.ecol += rd32(E1000_ECOL);
4593 adapter->stats.mcc += rd32(E1000_MCC);
4594 adapter->stats.latecol += rd32(E1000_LATECOL);
4595 adapter->stats.dc += rd32(E1000_DC);
4596 adapter->stats.rlec += rd32(E1000_RLEC);
4597 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4598 adapter->stats.xontxc += rd32(E1000_XONTXC);
4599 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4600 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4601 adapter->stats.fcruc += rd32(E1000_FCRUC);
4602 adapter->stats.gptc += rd32(E1000_GPTC);
4603 adapter->stats.gotc += rd32(E1000_GOTCL);
4604 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004605 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004606 adapter->stats.ruc += rd32(E1000_RUC);
4607 adapter->stats.rfc += rd32(E1000_RFC);
4608 adapter->stats.rjc += rd32(E1000_RJC);
4609 adapter->stats.tor += rd32(E1000_TORH);
4610 adapter->stats.tot += rd32(E1000_TOTH);
4611 adapter->stats.tpr += rd32(E1000_TPR);
4612
4613 adapter->stats.ptc64 += rd32(E1000_PTC64);
4614 adapter->stats.ptc127 += rd32(E1000_PTC127);
4615 adapter->stats.ptc255 += rd32(E1000_PTC255);
4616 adapter->stats.ptc511 += rd32(E1000_PTC511);
4617 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4618 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4619
4620 adapter->stats.mptc += rd32(E1000_MPTC);
4621 adapter->stats.bptc += rd32(E1000_BPTC);
4622
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004623 adapter->stats.tpt += rd32(E1000_TPT);
4624 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004625
4626 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004627 /* read internal phy specific stats */
4628 reg = rd32(E1000_CTRL_EXT);
4629 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4630 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4631 adapter->stats.tncrs += rd32(E1000_TNCRS);
4632 }
4633
Auke Kok9d5c8242008-01-24 02:22:38 -08004634 adapter->stats.tsctc += rd32(E1000_TSCTC);
4635 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4636
4637 adapter->stats.iac += rd32(E1000_IAC);
4638 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4639 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4640 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4641 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4642 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4643 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4644 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4645 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4646
4647 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004648 net_stats->multicast = adapter->stats.mprc;
4649 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004650
4651 /* Rx Errors */
4652
4653 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004654 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004655 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004656 adapter->stats.crcerrs + adapter->stats.algnerrc +
4657 adapter->stats.ruc + adapter->stats.roc +
4658 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004659 net_stats->rx_length_errors = adapter->stats.ruc +
4660 adapter->stats.roc;
4661 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4662 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4663 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004664
4665 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004666 net_stats->tx_errors = adapter->stats.ecol +
4667 adapter->stats.latecol;
4668 net_stats->tx_aborted_errors = adapter->stats.ecol;
4669 net_stats->tx_window_errors = adapter->stats.latecol;
4670 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004671
4672 /* Tx Dropped needs to be maintained elsewhere */
4673
4674 /* Phy Stats */
4675 if (hw->phy.media_type == e1000_media_type_copper) {
4676 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004677 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004678 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4679 adapter->phy_stats.idle_errors += phy_tmp;
4680 }
4681 }
4682
4683 /* Management Stats */
4684 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4685 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4686 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004687
4688 /* OS2BMC Stats */
4689 reg = rd32(E1000_MANC);
4690 if (reg & E1000_MANC_EN_BMC2OS) {
4691 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4692 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4693 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4694 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4695 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004696}
4697
Auke Kok9d5c8242008-01-24 02:22:38 -08004698static irqreturn_t igb_msix_other(int irq, void *data)
4699{
Alexander Duyck047e0032009-10-27 15:49:27 +00004700 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004701 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004702 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004703 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004704
Alexander Duyck7f081d42010-01-07 17:41:00 +00004705 if (icr & E1000_ICR_DRSTA)
4706 schedule_work(&adapter->reset_task);
4707
Alexander Duyck047e0032009-10-27 15:49:27 +00004708 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004709 /* HW is reporting DMA is out of sync */
4710 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004711 /* The DMA Out of Sync is also indication of a spoof event
4712 * in IOV mode. Check the Wrong VM Behavior register to
4713 * see if it is really a spoof event. */
4714 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004715 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004716
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004717 /* Check for a mailbox event */
4718 if (icr & E1000_ICR_VMMB)
4719 igb_msg_task(adapter);
4720
4721 if (icr & E1000_ICR_LSC) {
4722 hw->mac.get_link_status = 1;
4723 /* guard against interrupt when we're going down */
4724 if (!test_bit(__IGB_DOWN, &adapter->state))
4725 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4726 }
4727
Alexander Duyck25568a52009-10-27 23:49:59 +00004728 if (adapter->vfs_allocated_count)
4729 wr32(E1000_IMS, E1000_IMS_LSC |
4730 E1000_IMS_VMMB |
4731 E1000_IMS_DOUTSYNC);
4732 else
4733 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004734 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004735
4736 return IRQ_HANDLED;
4737}
4738
Alexander Duyck047e0032009-10-27 15:49:27 +00004739static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004740{
Alexander Duyck26b39272010-02-17 01:00:41 +00004741 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004742 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004743
Alexander Duyck047e0032009-10-27 15:49:27 +00004744 if (!q_vector->set_itr)
4745 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004746
Alexander Duyck047e0032009-10-27 15:49:27 +00004747 if (!itr_val)
4748 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004749
Alexander Duyck26b39272010-02-17 01:00:41 +00004750 if (adapter->hw.mac.type == e1000_82575)
4751 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004752 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004753 itr_val |= 0x8000000;
4754
4755 writel(itr_val, q_vector->itr_register);
4756 q_vector->set_itr = 0;
4757}
4758
4759static irqreturn_t igb_msix_ring(int irq, void *data)
4760{
4761 struct igb_q_vector *q_vector = data;
4762
4763 /* Write the ITR value calculated from the previous interrupt. */
4764 igb_write_itr(q_vector);
4765
4766 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004767
Auke Kok9d5c8242008-01-24 02:22:38 -08004768 return IRQ_HANDLED;
4769}
4770
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004771#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004772static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004773{
Alexander Duyck047e0032009-10-27 15:49:27 +00004774 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004775 struct e1000_hw *hw = &adapter->hw;
4776 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004777
Alexander Duyck047e0032009-10-27 15:49:27 +00004778 if (q_vector->cpu == cpu)
4779 goto out_no_update;
4780
4781 if (q_vector->tx_ring) {
4782 int q = q_vector->tx_ring->reg_idx;
4783 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4784 if (hw->mac.type == e1000_82575) {
4785 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4786 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4787 } else {
4788 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4789 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4790 E1000_DCA_TXCTRL_CPUID_SHIFT;
4791 }
4792 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4793 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4794 }
4795 if (q_vector->rx_ring) {
4796 int q = q_vector->rx_ring->reg_idx;
4797 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4798 if (hw->mac.type == e1000_82575) {
4799 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4800 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4801 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004802 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004803 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004804 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004805 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004806 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4807 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4808 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4809 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004810 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004811 q_vector->cpu = cpu;
4812out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004813 put_cpu();
4814}
4815
4816static void igb_setup_dca(struct igb_adapter *adapter)
4817{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004818 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004819 int i;
4820
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004821 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004822 return;
4823
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004824 /* Always use CB2 mode, difference is masked in the CB driver. */
4825 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4826
Alexander Duyck047e0032009-10-27 15:49:27 +00004827 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004828 adapter->q_vector[i]->cpu = -1;
4829 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004830 }
4831}
4832
4833static int __igb_notify_dca(struct device *dev, void *data)
4834{
4835 struct net_device *netdev = dev_get_drvdata(dev);
4836 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004837 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004838 struct e1000_hw *hw = &adapter->hw;
4839 unsigned long event = *(unsigned long *)data;
4840
4841 switch (event) {
4842 case DCA_PROVIDER_ADD:
4843 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004844 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004845 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004846 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004847 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004848 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004849 igb_setup_dca(adapter);
4850 break;
4851 }
4852 /* Fall Through since DCA is disabled. */
4853 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004854 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004855 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004856 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004857 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004858 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004859 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004860 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004861 }
4862 break;
4863 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004864
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004865 return 0;
4866}
4867
4868static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4869 void *p)
4870{
4871 int ret_val;
4872
4873 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4874 __igb_notify_dca);
4875
4876 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4877}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004878#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004879
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004880static void igb_ping_all_vfs(struct igb_adapter *adapter)
4881{
4882 struct e1000_hw *hw = &adapter->hw;
4883 u32 ping;
4884 int i;
4885
4886 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4887 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004888 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004889 ping |= E1000_VT_MSGTYPE_CTS;
4890 igb_write_mbx(hw, &ping, 1, i);
4891 }
4892}
4893
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004894static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4895{
4896 struct e1000_hw *hw = &adapter->hw;
4897 u32 vmolr = rd32(E1000_VMOLR(vf));
4898 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4899
Alexander Duyckd85b90042010-09-22 17:56:20 +00004900 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004901 IGB_VF_FLAG_MULTI_PROMISC);
4902 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4903
4904 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4905 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004906 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004907 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4908 } else {
4909 /*
4910 * if we have hashes and we are clearing a multicast promisc
4911 * flag we need to write the hashes to the MTA as this step
4912 * was previously skipped
4913 */
4914 if (vf_data->num_vf_mc_hashes > 30) {
4915 vmolr |= E1000_VMOLR_MPME;
4916 } else if (vf_data->num_vf_mc_hashes) {
4917 int j;
4918 vmolr |= E1000_VMOLR_ROMPE;
4919 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4920 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4921 }
4922 }
4923
4924 wr32(E1000_VMOLR(vf), vmolr);
4925
4926 /* there are flags left unprocessed, likely not supported */
4927 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4928 return -EINVAL;
4929
4930 return 0;
4931
4932}
4933
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004934static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4935 u32 *msgbuf, u32 vf)
4936{
4937 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4938 u16 *hash_list = (u16 *)&msgbuf[1];
4939 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4940 int i;
4941
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004942 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004943 * to this VF for later use to restore when the PF multi cast
4944 * list changes
4945 */
4946 vf_data->num_vf_mc_hashes = n;
4947
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004948 /* only up to 30 hash values supported */
4949 if (n > 30)
4950 n = 30;
4951
4952 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004953 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004954 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004955
4956 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004957 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004958
4959 return 0;
4960}
4961
4962static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4963{
4964 struct e1000_hw *hw = &adapter->hw;
4965 struct vf_data_storage *vf_data;
4966 int i, j;
4967
4968 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004969 u32 vmolr = rd32(E1000_VMOLR(i));
4970 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4971
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004972 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004973
4974 if ((vf_data->num_vf_mc_hashes > 30) ||
4975 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4976 vmolr |= E1000_VMOLR_MPME;
4977 } else if (vf_data->num_vf_mc_hashes) {
4978 vmolr |= E1000_VMOLR_ROMPE;
4979 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4980 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4981 }
4982 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004983 }
4984}
4985
4986static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4987{
4988 struct e1000_hw *hw = &adapter->hw;
4989 u32 pool_mask, reg, vid;
4990 int i;
4991
4992 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4993
4994 /* Find the vlan filter for this id */
4995 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4996 reg = rd32(E1000_VLVF(i));
4997
4998 /* remove the vf from the pool */
4999 reg &= ~pool_mask;
5000
5001 /* if pool is empty then remove entry from vfta */
5002 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5003 (reg & E1000_VLVF_VLANID_ENABLE)) {
5004 reg = 0;
5005 vid = reg & E1000_VLVF_VLANID_MASK;
5006 igb_vfta_set(hw, vid, false);
5007 }
5008
5009 wr32(E1000_VLVF(i), reg);
5010 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005011
5012 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005013}
5014
5015static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5016{
5017 struct e1000_hw *hw = &adapter->hw;
5018 u32 reg, i;
5019
Alexander Duyck51466232009-10-27 23:47:35 +00005020 /* The vlvf table only exists on 82576 hardware and newer */
5021 if (hw->mac.type < e1000_82576)
5022 return -1;
5023
5024 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005025 if (!adapter->vfs_allocated_count)
5026 return -1;
5027
5028 /* Find the vlan filter for this id */
5029 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5030 reg = rd32(E1000_VLVF(i));
5031 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5032 vid == (reg & E1000_VLVF_VLANID_MASK))
5033 break;
5034 }
5035
5036 if (add) {
5037 if (i == E1000_VLVF_ARRAY_SIZE) {
5038 /* Did not find a matching VLAN ID entry that was
5039 * enabled. Search for a free filter entry, i.e.
5040 * one without the enable bit set
5041 */
5042 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5043 reg = rd32(E1000_VLVF(i));
5044 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5045 break;
5046 }
5047 }
5048 if (i < E1000_VLVF_ARRAY_SIZE) {
5049 /* Found an enabled/available entry */
5050 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5051
5052 /* if !enabled we need to set this up in vfta */
5053 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005054 /* add VID to filter table */
5055 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005056 reg |= E1000_VLVF_VLANID_ENABLE;
5057 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005058 reg &= ~E1000_VLVF_VLANID_MASK;
5059 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005060 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005061
5062 /* do not modify RLPML for PF devices */
5063 if (vf >= adapter->vfs_allocated_count)
5064 return 0;
5065
5066 if (!adapter->vf_data[vf].vlans_enabled) {
5067 u32 size;
5068 reg = rd32(E1000_VMOLR(vf));
5069 size = reg & E1000_VMOLR_RLPML_MASK;
5070 size += 4;
5071 reg &= ~E1000_VMOLR_RLPML_MASK;
5072 reg |= size;
5073 wr32(E1000_VMOLR(vf), reg);
5074 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005075
Alexander Duyck51466232009-10-27 23:47:35 +00005076 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005077 return 0;
5078 }
5079 } else {
5080 if (i < E1000_VLVF_ARRAY_SIZE) {
5081 /* remove vf from the pool */
5082 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5083 /* if pool is empty then remove entry from vfta */
5084 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5085 reg = 0;
5086 igb_vfta_set(hw, vid, false);
5087 }
5088 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005089
5090 /* do not modify RLPML for PF devices */
5091 if (vf >= adapter->vfs_allocated_count)
5092 return 0;
5093
5094 adapter->vf_data[vf].vlans_enabled--;
5095 if (!adapter->vf_data[vf].vlans_enabled) {
5096 u32 size;
5097 reg = rd32(E1000_VMOLR(vf));
5098 size = reg & E1000_VMOLR_RLPML_MASK;
5099 size -= 4;
5100 reg &= ~E1000_VMOLR_RLPML_MASK;
5101 reg |= size;
5102 wr32(E1000_VMOLR(vf), reg);
5103 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005104 }
5105 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005106 return 0;
5107}
5108
5109static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5110{
5111 struct e1000_hw *hw = &adapter->hw;
5112
5113 if (vid)
5114 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5115 else
5116 wr32(E1000_VMVIR(vf), 0);
5117}
5118
5119static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5120 int vf, u16 vlan, u8 qos)
5121{
5122 int err = 0;
5123 struct igb_adapter *adapter = netdev_priv(netdev);
5124
5125 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5126 return -EINVAL;
5127 if (vlan || qos) {
5128 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5129 if (err)
5130 goto out;
5131 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5132 igb_set_vmolr(adapter, vf, !vlan);
5133 adapter->vf_data[vf].pf_vlan = vlan;
5134 adapter->vf_data[vf].pf_qos = qos;
5135 dev_info(&adapter->pdev->dev,
5136 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5137 if (test_bit(__IGB_DOWN, &adapter->state)) {
5138 dev_warn(&adapter->pdev->dev,
5139 "The VF VLAN has been set,"
5140 " but the PF device is not up.\n");
5141 dev_warn(&adapter->pdev->dev,
5142 "Bring the PF device up before"
5143 " attempting to use the VF device.\n");
5144 }
5145 } else {
5146 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5147 false, vf);
5148 igb_set_vmvir(adapter, vlan, vf);
5149 igb_set_vmolr(adapter, vf, true);
5150 adapter->vf_data[vf].pf_vlan = 0;
5151 adapter->vf_data[vf].pf_qos = 0;
5152 }
5153out:
5154 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005155}
5156
5157static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5158{
5159 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5160 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5161
5162 return igb_vlvf_set(adapter, vid, add, vf);
5163}
5164
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005165static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005166{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005167 /* clear flags - except flag that indicates PF has set the MAC */
5168 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005169 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005170
5171 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005172 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005173
5174 /* reset vlans for device */
5175 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005176 if (adapter->vf_data[vf].pf_vlan)
5177 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5178 adapter->vf_data[vf].pf_vlan,
5179 adapter->vf_data[vf].pf_qos);
5180 else
5181 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005182
5183 /* reset multicast table array for vf */
5184 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5185
5186 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005187 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005188}
5189
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005190static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5191{
5192 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5193
5194 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005195 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5196 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005197
5198 /* process remaining reset events */
5199 igb_vf_reset(adapter, vf);
5200}
5201
5202static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005203{
5204 struct e1000_hw *hw = &adapter->hw;
5205 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005206 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005207 u32 reg, msgbuf[3];
5208 u8 *addr = (u8 *)(&msgbuf[1]);
5209
5210 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005211 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005212
5213 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005214 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005215
5216 /* enable transmit and receive for vf */
5217 reg = rd32(E1000_VFTE);
5218 wr32(E1000_VFTE, reg | (1 << vf));
5219 reg = rd32(E1000_VFRE);
5220 wr32(E1000_VFRE, reg | (1 << vf));
5221
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005222 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005223
5224 /* reply to reset with ack and vf mac address */
5225 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5226 memcpy(addr, vf_mac, 6);
5227 igb_write_mbx(hw, msgbuf, 3, vf);
5228}
5229
5230static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5231{
Greg Rosede42edd2010-07-01 13:39:23 +00005232 /*
5233 * The VF MAC Address is stored in a packed array of bytes
5234 * starting at the second 32 bit word of the msg array
5235 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005236 unsigned char *addr = (char *)&msg[1];
5237 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005238
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005239 if (is_valid_ether_addr(addr))
5240 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005241
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005242 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005243}
5244
5245static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5246{
5247 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005248 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005249 u32 msg = E1000_VT_MSGTYPE_NACK;
5250
5251 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005252 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5253 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005254 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005255 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005256 }
5257}
5258
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005259static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005260{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005261 struct pci_dev *pdev = adapter->pdev;
5262 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005263 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005264 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005265 s32 retval;
5266
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005267 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005268
Alexander Duyckfef45f42009-12-11 22:57:34 -08005269 if (retval) {
5270 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005271 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005272 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5273 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5274 return;
5275 goto out;
5276 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005277
5278 /* this is a message we already processed, do nothing */
5279 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005280 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005281
5282 /*
5283 * until the vf completes a reset it should not be
5284 * allowed to start any configuration.
5285 */
5286
5287 if (msgbuf[0] == E1000_VF_RESET) {
5288 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005289 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005290 }
5291
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005292 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005293 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5294 return;
5295 retval = -1;
5296 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005297 }
5298
5299 switch ((msgbuf[0] & 0xFFFF)) {
5300 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005301 retval = -EINVAL;
5302 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5303 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5304 else
5305 dev_warn(&pdev->dev,
5306 "VF %d attempted to override administratively "
5307 "set MAC address\nReload the VF driver to "
5308 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005309 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005310 case E1000_VF_SET_PROMISC:
5311 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5312 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005313 case E1000_VF_SET_MULTICAST:
5314 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5315 break;
5316 case E1000_VF_SET_LPE:
5317 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5318 break;
5319 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005320 retval = -1;
5321 if (vf_data->pf_vlan)
5322 dev_warn(&pdev->dev,
5323 "VF %d attempted to override administratively "
5324 "set VLAN tag\nReload the VF driver to "
5325 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005326 else
5327 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005328 break;
5329 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005330 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005331 retval = -1;
5332 break;
5333 }
5334
Alexander Duyckfef45f42009-12-11 22:57:34 -08005335 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5336out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005337 /* notify the VF of the results of what it sent us */
5338 if (retval)
5339 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5340 else
5341 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5342
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005343 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005344}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005345
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005346static void igb_msg_task(struct igb_adapter *adapter)
5347{
5348 struct e1000_hw *hw = &adapter->hw;
5349 u32 vf;
5350
5351 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5352 /* process any reset requests */
5353 if (!igb_check_for_rst(hw, vf))
5354 igb_vf_reset_event(adapter, vf);
5355
5356 /* process any messages pending */
5357 if (!igb_check_for_msg(hw, vf))
5358 igb_rcv_msg_from_vf(adapter, vf);
5359
5360 /* process any acks */
5361 if (!igb_check_for_ack(hw, vf))
5362 igb_rcv_ack_from_vf(adapter, vf);
5363 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005364}
5365
Auke Kok9d5c8242008-01-24 02:22:38 -08005366/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005367 * igb_set_uta - Set unicast filter table address
5368 * @adapter: board private structure
5369 *
5370 * The unicast table address is a register array of 32-bit registers.
5371 * The table is meant to be used in a way similar to how the MTA is used
5372 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005373 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5374 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005375 **/
5376static void igb_set_uta(struct igb_adapter *adapter)
5377{
5378 struct e1000_hw *hw = &adapter->hw;
5379 int i;
5380
5381 /* The UTA table only exists on 82576 hardware and newer */
5382 if (hw->mac.type < e1000_82576)
5383 return;
5384
5385 /* we only need to do this if VMDq is enabled */
5386 if (!adapter->vfs_allocated_count)
5387 return;
5388
5389 for (i = 0; i < hw->mac.uta_reg_count; i++)
5390 array_wr32(E1000_UTA, i, ~0);
5391}
5392
5393/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005394 * igb_intr_msi - Interrupt Handler
5395 * @irq: interrupt number
5396 * @data: pointer to a network interface device structure
5397 **/
5398static irqreturn_t igb_intr_msi(int irq, void *data)
5399{
Alexander Duyck047e0032009-10-27 15:49:27 +00005400 struct igb_adapter *adapter = data;
5401 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005402 struct e1000_hw *hw = &adapter->hw;
5403 /* read ICR disables interrupts using IAM */
5404 u32 icr = rd32(E1000_ICR);
5405
Alexander Duyck047e0032009-10-27 15:49:27 +00005406 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005407
Alexander Duyck7f081d42010-01-07 17:41:00 +00005408 if (icr & E1000_ICR_DRSTA)
5409 schedule_work(&adapter->reset_task);
5410
Alexander Duyck047e0032009-10-27 15:49:27 +00005411 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005412 /* HW is reporting DMA is out of sync */
5413 adapter->stats.doosync++;
5414 }
5415
Auke Kok9d5c8242008-01-24 02:22:38 -08005416 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5417 hw->mac.get_link_status = 1;
5418 if (!test_bit(__IGB_DOWN, &adapter->state))
5419 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5420 }
5421
Alexander Duyck047e0032009-10-27 15:49:27 +00005422 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005423
5424 return IRQ_HANDLED;
5425}
5426
5427/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005428 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005429 * @irq: interrupt number
5430 * @data: pointer to a network interface device structure
5431 **/
5432static irqreturn_t igb_intr(int irq, void *data)
5433{
Alexander Duyck047e0032009-10-27 15:49:27 +00005434 struct igb_adapter *adapter = data;
5435 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005436 struct e1000_hw *hw = &adapter->hw;
5437 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5438 * need for the IMC write */
5439 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005440 if (!icr)
5441 return IRQ_NONE; /* Not our interrupt */
5442
Alexander Duyck047e0032009-10-27 15:49:27 +00005443 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005444
5445 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5446 * not set, then the adapter didn't send an interrupt */
5447 if (!(icr & E1000_ICR_INT_ASSERTED))
5448 return IRQ_NONE;
5449
Alexander Duyck7f081d42010-01-07 17:41:00 +00005450 if (icr & E1000_ICR_DRSTA)
5451 schedule_work(&adapter->reset_task);
5452
Alexander Duyck047e0032009-10-27 15:49:27 +00005453 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005454 /* HW is reporting DMA is out of sync */
5455 adapter->stats.doosync++;
5456 }
5457
Auke Kok9d5c8242008-01-24 02:22:38 -08005458 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5459 hw->mac.get_link_status = 1;
5460 /* guard against interrupt when we're going down */
5461 if (!test_bit(__IGB_DOWN, &adapter->state))
5462 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5463 }
5464
Alexander Duyck047e0032009-10-27 15:49:27 +00005465 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005466
5467 return IRQ_HANDLED;
5468}
5469
Alexander Duyck047e0032009-10-27 15:49:27 +00005470static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005471{
Alexander Duyck047e0032009-10-27 15:49:27 +00005472 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005473 struct e1000_hw *hw = &adapter->hw;
5474
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00005475 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5476 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005477 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08005478 igb_set_itr(adapter);
5479 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005480 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005481 }
5482
5483 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5484 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005485 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005486 else
5487 igb_irq_enable(adapter);
5488 }
5489}
5490
Auke Kok9d5c8242008-01-24 02:22:38 -08005491/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005492 * igb_poll - NAPI Rx polling callback
5493 * @napi: napi polling structure
5494 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005495 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005496static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005497{
Alexander Duyck047e0032009-10-27 15:49:27 +00005498 struct igb_q_vector *q_vector = container_of(napi,
5499 struct igb_q_vector,
5500 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005501 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005502
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005503#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005504 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5505 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005506#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00005507 if (q_vector->tx_ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005508 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005509
Alexander Duyck047e0032009-10-27 15:49:27 +00005510 if (q_vector->rx_ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005511 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005512
Alexander Duyck16eb8812011-08-26 07:43:54 +00005513 /* If all work not completed, return budget and keep polling */
5514 if (!clean_complete)
5515 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005516
Alexander Duyck46544252009-02-19 20:39:04 -08005517 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005518 napi_complete(napi);
5519 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005520
Alexander Duyck16eb8812011-08-26 07:43:54 +00005521 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005522}
Al Viro6d8126f2008-03-16 22:23:24 +00005523
Auke Kok9d5c8242008-01-24 02:22:38 -08005524/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005525 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005526 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005527 * @shhwtstamps: timestamp structure to update
5528 * @regval: unsigned 64bit system time value.
5529 *
5530 * We need to convert the system time value stored in the RX/TXSTMP registers
5531 * into a hwtstamp which can be used by the upper level timestamping functions
5532 */
5533static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5534 struct skb_shared_hwtstamps *shhwtstamps,
5535 u64 regval)
5536{
5537 u64 ns;
5538
Alexander Duyck55cac242009-11-19 12:42:21 +00005539 /*
5540 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5541 * 24 to match clock shift we setup earlier.
5542 */
5543 if (adapter->hw.mac.type == e1000_82580)
5544 regval <<= IGB_82580_TSYNC_SHIFT;
5545
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005546 ns = timecounter_cyc2time(&adapter->clock, regval);
5547 timecompare_update(&adapter->compare, ns);
5548 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5549 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5550 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5551}
5552
5553/**
5554 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5555 * @q_vector: pointer to q_vector containing needed info
Alexander Duyck06034642011-08-26 07:44:22 +00005556 * @buffer: pointer to igb_tx_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005557 *
5558 * If we were asked to do hardware stamping and such a time stamp is
5559 * available, then it must have been for this skb here because we only
5560 * allow only one such packet into the queue.
5561 */
Alexander Duyck06034642011-08-26 07:44:22 +00005562static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5563 struct igb_tx_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005564{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005565 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005566 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005567 struct skb_shared_hwtstamps shhwtstamps;
5568 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005569
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005570 /* if skb does not support hw timestamp or TX stamp not valid exit */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005571 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005572 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5573 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005574
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005575 regval = rd32(E1000_TXSTMPL);
5576 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5577
5578 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005579 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005580}
5581
5582/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005583 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005584 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005585 * returns true if ring is completely cleaned
5586 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005587static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005588{
Alexander Duyck047e0032009-10-27 15:49:27 +00005589 struct igb_adapter *adapter = q_vector->adapter;
5590 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duyck06034642011-08-26 07:44:22 +00005591 struct igb_tx_buffer *tx_buffer;
Alexander Duyck13fde972011-10-05 13:35:24 +00005592 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005593 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck13fde972011-10-05 13:35:24 +00005594 unsigned int budget = q_vector->tx_work_limit;
5595 u16 i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005596
Alexander Duyck13fde972011-10-05 13:35:24 +00005597 if (test_bit(__IGB_DOWN, &adapter->state))
5598 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005599
Alexander Duyck06034642011-08-26 07:44:22 +00005600 tx_buffer = &tx_ring->tx_buffer_info[i];
Alexander Duyck13fde972011-10-05 13:35:24 +00005601 tx_desc = IGB_TX_DESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005602
Alexander Duyck13fde972011-10-05 13:35:24 +00005603 for (; budget; budget--) {
5604 u16 eop = tx_buffer->next_to_watch;
5605 union e1000_adv_tx_desc *eop_desc;
5606
5607 eop_desc = IGB_TX_DESC(tx_ring, eop);
5608
5609 /* if DD is not set pending work has not been completed */
5610 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5611 break;
5612
5613 /* prevent any other reads prior to eop_desc being verified */
5614 rmb();
5615
5616 do {
5617 tx_desc->wb.status = 0;
5618 if (likely(tx_desc == eop_desc)) {
5619 eop_desc = NULL;
5620
5621 total_bytes += tx_buffer->bytecount;
5622 total_packets += tx_buffer->gso_segs;
5623 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005624 }
5625
Alexander Duyck13fde972011-10-05 13:35:24 +00005626 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005627
Alexander Duyck13fde972011-10-05 13:35:24 +00005628 tx_buffer++;
5629 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005630 i++;
Alexander Duyck13fde972011-10-05 13:35:24 +00005631 if (unlikely(i == tx_ring->count)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005632 i = 0;
Alexander Duyck06034642011-08-26 07:44:22 +00005633 tx_buffer = tx_ring->tx_buffer_info;
Alexander Duyck13fde972011-10-05 13:35:24 +00005634 tx_desc = IGB_TX_DESC(tx_ring, 0);
5635 }
5636 } while (eop_desc);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005637 }
5638
Auke Kok9d5c8242008-01-24 02:22:38 -08005639 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005640 u64_stats_update_begin(&tx_ring->tx_syncp);
5641 tx_ring->tx_stats.bytes += total_bytes;
5642 tx_ring->tx_stats.packets += total_packets;
5643 u64_stats_update_end(&tx_ring->tx_syncp);
5644 tx_ring->total_bytes += total_bytes;
5645 tx_ring->total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005646
5647 if (tx_ring->detect_tx_hung) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005648 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck06034642011-08-26 07:44:22 +00005649 u16 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck13fde972011-10-05 13:35:24 +00005650 union e1000_adv_tx_desc *eop_desc;
5651
5652 eop_desc = IGB_TX_DESC(tx_ring, eop);
5653
Auke Kok9d5c8242008-01-24 02:22:38 -08005654 /* Detect a transmit hang in hardware, this serializes the
5655 * check with the clearing of time_stamp and movement of i */
5656 tx_ring->detect_tx_hung = false;
Alexander Duyck06034642011-08-26 07:44:22 +00005657 if (tx_ring->tx_buffer_info[i].time_stamp &&
5658 time_after(jiffies, tx_ring->tx_buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005659 (adapter->tx_timeout_factor * HZ)) &&
5660 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005661
Auke Kok9d5c8242008-01-24 02:22:38 -08005662 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005663 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005664 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005665 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005666 " TDH <%x>\n"
5667 " TDT <%x>\n"
5668 " next_to_use <%x>\n"
5669 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005670 "buffer_info[next_to_clean]\n"
5671 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005672 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005673 " jiffies <%lx>\n"
5674 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005675 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005676 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005677 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005678 tx_ring->next_to_use,
5679 tx_ring->next_to_clean,
Alexander Duyck06034642011-08-26 07:44:22 +00005680 tx_ring->tx_buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005681 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005682 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005683 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005684 netif_stop_subqueue(tx_ring->netdev,
5685 tx_ring->queue_index);
5686
5687 /* we are about to reset, no point in enabling stuff */
5688 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005689 }
5690 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005691
5692 if (unlikely(total_packets &&
5693 netif_carrier_ok(tx_ring->netdev) &&
5694 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5695 /* Make sure that anybody stopping the queue after this
5696 * sees the new next_to_clean.
5697 */
5698 smp_mb();
5699 if (__netif_subqueue_stopped(tx_ring->netdev,
5700 tx_ring->queue_index) &&
5701 !(test_bit(__IGB_DOWN, &adapter->state))) {
5702 netif_wake_subqueue(tx_ring->netdev,
5703 tx_ring->queue_index);
5704
5705 u64_stats_update_begin(&tx_ring->tx_syncp);
5706 tx_ring->tx_stats.restart_queue++;
5707 u64_stats_update_end(&tx_ring->tx_syncp);
5708 }
5709 }
5710
5711 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005712}
5713
Alexander Duyckcd392f52011-08-26 07:43:59 +00005714static inline void igb_rx_checksum(struct igb_ring *ring,
5715 u32 status_err, struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005716{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005717 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005718
5719 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005720 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5721 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005722 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005723
Auke Kok9d5c8242008-01-24 02:22:38 -08005724 /* TCP/UDP checksum error bit is set */
5725 if (status_err &
5726 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005727 /*
5728 * work around errata with sctp packets where the TCPE aka
5729 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5730 * packets, (aka let the stack check the crc32c)
5731 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005732 if ((skb->len == 60) &&
Eric Dumazet12dcd862010-10-15 17:27:10 +00005733 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5734 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005735 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005736 u64_stats_update_end(&ring->rx_syncp);
5737 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005738 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005739 return;
5740 }
5741 /* It must be a TCP or UDP packet with a valid checksum */
5742 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5743 skb->ip_summed = CHECKSUM_UNNECESSARY;
5744
Alexander Duyck59d71982010-04-27 13:09:25 +00005745 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005746}
5747
Nick Nunley757b77e2010-03-26 11:36:47 +00005748static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005749 struct sk_buff *skb)
5750{
5751 struct igb_adapter *adapter = q_vector->adapter;
5752 struct e1000_hw *hw = &adapter->hw;
5753 u64 regval;
5754
5755 /*
5756 * If this bit is set, then the RX registers contain the time stamp. No
5757 * other packet will be time stamped until we read these registers, so
5758 * read the registers to make them available again. Because only one
5759 * packet can be time stamped at a time, we know that the register
5760 * values must belong to this one here and therefore we don't need to
5761 * compare any of the additional attributes stored for it.
5762 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005763 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005764 * can turn into a skb_shared_hwtstamps.
5765 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005766 if (staterr & E1000_RXDADV_STAT_TSIP) {
5767 u32 *stamp = (u32 *)skb->data;
5768 regval = le32_to_cpu(*(stamp + 2));
5769 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5770 skb_pull(skb, IGB_TS_HDR_LEN);
5771 } else {
5772 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5773 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005774
Nick Nunley757b77e2010-03-26 11:36:47 +00005775 regval = rd32(E1000_RXSTMPL);
5776 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5777 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005778
5779 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5780}
Alexander Duyck44390ca2011-08-26 07:43:38 +00005781static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005782{
5783 /* HW will not DMA in data larger than the given buffer, even if it
5784 * parses the (NFS, of course) header to be larger. In that case, it
5785 * fills the header buffer and spills the rest into the page.
5786 */
5787 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5788 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005789 if (hlen > IGB_RX_HDR_LEN)
5790 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005791 return hlen;
5792}
5793
Alexander Duyckcd392f52011-08-26 07:43:59 +00005794static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005795{
Alexander Duyck047e0032009-10-27 15:49:27 +00005796 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005797 union e1000_adv_rx_desc *rx_desc;
5798 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005799 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005800 u32 staterr;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005801 u16 cleaned_count = igb_desc_unused(rx_ring);
5802 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005803
Alexander Duyck601369062011-08-26 07:44:05 +00005804 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005805 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5806
5807 while (staterr & E1000_RXD_STAT_DD) {
Alexander Duyck06034642011-08-26 07:44:22 +00005808 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck16eb8812011-08-26 07:43:54 +00005809 struct sk_buff *skb = buffer_info->skb;
5810 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005811
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005812 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005813 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005814
5815 i++;
5816 if (i == rx_ring->count)
5817 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005818
Alexander Duyck601369062011-08-26 07:44:05 +00005819 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005820 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005821
Alexander Duyck16eb8812011-08-26 07:43:54 +00005822 /*
5823 * This memory barrier is needed to keep us from reading
5824 * any other fields out of the rx_desc until we know the
5825 * RXD_STAT_DD bit is set
5826 */
5827 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005828
Alexander Duyck16eb8812011-08-26 07:43:54 +00005829 if (!skb_is_nonlinear(skb)) {
5830 __skb_put(skb, igb_get_hlen(rx_desc));
5831 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005832 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005833 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005834 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005835 }
5836
Alexander Duyck16eb8812011-08-26 07:43:54 +00005837 if (rx_desc->wb.upper.length) {
5838 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005839
Koki Sanagiaa913402010-04-27 01:01:19 +00005840 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005841 buffer_info->page,
5842 buffer_info->page_offset,
5843 length);
5844
Alexander Duyck16eb8812011-08-26 07:43:54 +00005845 skb->len += length;
5846 skb->data_len += length;
5847 skb->truesize += length;
5848
Alexander Duyckd1eff352009-11-12 18:38:35 +00005849 if ((page_count(buffer_info->page) != 1) ||
5850 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005851 buffer_info->page = NULL;
5852 else
5853 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005854
Alexander Duyck16eb8812011-08-26 07:43:54 +00005855 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5856 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5857 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005858 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005859
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005860 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyck06034642011-08-26 07:44:22 +00005861 struct igb_rx_buffer *next_buffer;
5862 next_buffer = &rx_ring->rx_buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005863 buffer_info->skb = next_buffer->skb;
5864 buffer_info->dma = next_buffer->dma;
5865 next_buffer->skb = skb;
5866 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005867 goto next_desc;
5868 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005869
Auke Kok9d5c8242008-01-24 02:22:38 -08005870 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005871 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005872 goto next_desc;
5873 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005874
Nick Nunley757b77e2010-03-26 11:36:47 +00005875 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5876 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005877 total_bytes += skb->len;
5878 total_packets++;
5879
Alexander Duyckcd392f52011-08-26 07:43:59 +00005880 igb_rx_checksum(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005881
Alexander Duyck16eb8812011-08-26 07:43:54 +00005882 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005883
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005884 if (staterr & E1000_RXD_STAT_VP) {
5885 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
Alexander Duyck047e0032009-10-27 15:49:27 +00005886
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005887 __vlan_hwaccel_put_tag(skb, vid);
5888 }
5889 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005890
Alexander Duyck16eb8812011-08-26 07:43:54 +00005891 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08005892next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00005893 if (!budget)
5894 break;
5895
5896 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005897 /* return some buffers to hardware, one at a time is too slow */
5898 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00005899 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005900 cleaned_count = 0;
5901 }
5902
5903 /* use prefetched values */
5904 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08005905 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5906 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005907
Auke Kok9d5c8242008-01-24 02:22:38 -08005908 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005909 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005910 rx_ring->rx_stats.packets += total_packets;
5911 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005912 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005913 rx_ring->total_packets += total_packets;
5914 rx_ring->total_bytes += total_bytes;
5915
5916 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005917 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005918
Alexander Duyck16eb8812011-08-26 07:43:54 +00005919 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005920}
5921
Alexander Duyckc023cd82011-08-26 07:43:43 +00005922static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00005923 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00005924{
5925 struct sk_buff *skb = bi->skb;
5926 dma_addr_t dma = bi->dma;
5927
5928 if (dma)
5929 return true;
5930
5931 if (likely(!skb)) {
5932 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5933 IGB_RX_HDR_LEN);
5934 bi->skb = skb;
5935 if (!skb) {
5936 rx_ring->rx_stats.alloc_failed++;
5937 return false;
5938 }
5939
5940 /* initialize skb for ring */
5941 skb_record_rx_queue(skb, rx_ring->queue_index);
5942 }
5943
5944 dma = dma_map_single(rx_ring->dev, skb->data,
5945 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
5946
5947 if (dma_mapping_error(rx_ring->dev, dma)) {
5948 rx_ring->rx_stats.alloc_failed++;
5949 return false;
5950 }
5951
5952 bi->dma = dma;
5953 return true;
5954}
5955
5956static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
Alexander Duyck06034642011-08-26 07:44:22 +00005957 struct igb_rx_buffer *bi)
Alexander Duyckc023cd82011-08-26 07:43:43 +00005958{
5959 struct page *page = bi->page;
5960 dma_addr_t page_dma = bi->page_dma;
5961 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
5962
5963 if (page_dma)
5964 return true;
5965
5966 if (!page) {
5967 page = netdev_alloc_page(rx_ring->netdev);
5968 bi->page = page;
5969 if (unlikely(!page)) {
5970 rx_ring->rx_stats.alloc_failed++;
5971 return false;
5972 }
5973 }
5974
5975 page_dma = dma_map_page(rx_ring->dev, page,
5976 page_offset, PAGE_SIZE / 2,
5977 DMA_FROM_DEVICE);
5978
5979 if (dma_mapping_error(rx_ring->dev, page_dma)) {
5980 rx_ring->rx_stats.alloc_failed++;
5981 return false;
5982 }
5983
5984 bi->page_dma = page_dma;
5985 bi->page_offset = page_offset;
5986 return true;
5987}
5988
Auke Kok9d5c8242008-01-24 02:22:38 -08005989/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00005990 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08005991 * @adapter: address of board private structure
5992 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00005993void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005994{
Auke Kok9d5c8242008-01-24 02:22:38 -08005995 union e1000_adv_rx_desc *rx_desc;
Alexander Duyck06034642011-08-26 07:44:22 +00005996 struct igb_rx_buffer *bi;
Alexander Duyckc023cd82011-08-26 07:43:43 +00005997 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08005998
Alexander Duyck601369062011-08-26 07:44:05 +00005999 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyck06034642011-08-26 07:44:22 +00006000 bi = &rx_ring->rx_buffer_info[i];
Alexander Duyckc023cd82011-08-26 07:43:43 +00006001 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006002
6003 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006004 if (!igb_alloc_mapped_skb(rx_ring, bi))
6005 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006006
Alexander Duyckc023cd82011-08-26 07:43:43 +00006007 /* Refresh the desc even if buffer_addrs didn't change
6008 * because each write-back erases this info. */
6009 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006010
Alexander Duyckc023cd82011-08-26 07:43:43 +00006011 if (!igb_alloc_mapped_page(rx_ring, bi))
6012 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006013
Alexander Duyckc023cd82011-08-26 07:43:43 +00006014 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006015
Alexander Duyckc023cd82011-08-26 07:43:43 +00006016 rx_desc++;
6017 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006018 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006019 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006020 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyck06034642011-08-26 07:44:22 +00006021 bi = rx_ring->rx_buffer_info;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006022 i -= rx_ring->count;
6023 }
6024
6025 /* clear the hdr_addr for the next_to_use descriptor */
6026 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006027 }
6028
Alexander Duyckc023cd82011-08-26 07:43:43 +00006029 i += rx_ring->count;
6030
Auke Kok9d5c8242008-01-24 02:22:38 -08006031 if (rx_ring->next_to_use != i) {
6032 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006033
6034 /* Force memory writes to complete before letting h/w
6035 * know there are new descriptors to fetch. (Only
6036 * applicable for weak-ordered memory model archs,
6037 * such as IA-64). */
6038 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006039 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006040 }
6041}
6042
6043/**
6044 * igb_mii_ioctl -
6045 * @netdev:
6046 * @ifreq:
6047 * @cmd:
6048 **/
6049static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6050{
6051 struct igb_adapter *adapter = netdev_priv(netdev);
6052 struct mii_ioctl_data *data = if_mii(ifr);
6053
6054 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6055 return -EOPNOTSUPP;
6056
6057 switch (cmd) {
6058 case SIOCGMIIPHY:
6059 data->phy_id = adapter->hw.phy.addr;
6060 break;
6061 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006062 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6063 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006064 return -EIO;
6065 break;
6066 case SIOCSMIIREG:
6067 default:
6068 return -EOPNOTSUPP;
6069 }
6070 return 0;
6071}
6072
6073/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006074 * igb_hwtstamp_ioctl - control hardware time stamping
6075 * @netdev:
6076 * @ifreq:
6077 * @cmd:
6078 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006079 * Outgoing time stamping can be enabled and disabled. Play nice and
6080 * disable it when requested, although it shouldn't case any overhead
6081 * when no packet needs it. At most one packet in the queue may be
6082 * marked for time stamping, otherwise it would be impossible to tell
6083 * for sure to which packet the hardware time stamp belongs.
6084 *
6085 * Incoming time stamping has to be configured via the hardware
6086 * filters. Not all combinations are supported, in particular event
6087 * type has to be specified. Matching the kind of event packet is
6088 * not supported, with the exception of "all V2 events regardless of
6089 * level 2 or 4".
6090 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006091 **/
6092static int igb_hwtstamp_ioctl(struct net_device *netdev,
6093 struct ifreq *ifr, int cmd)
6094{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006095 struct igb_adapter *adapter = netdev_priv(netdev);
6096 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006097 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006098 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6099 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006100 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006101 bool is_l4 = false;
6102 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006103 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006104
6105 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6106 return -EFAULT;
6107
6108 /* reserved for future extensions */
6109 if (config.flags)
6110 return -EINVAL;
6111
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006112 switch (config.tx_type) {
6113 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006114 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006115 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006116 break;
6117 default:
6118 return -ERANGE;
6119 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006120
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006121 switch (config.rx_filter) {
6122 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006123 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006124 break;
6125 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6126 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6127 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6128 case HWTSTAMP_FILTER_ALL:
6129 /*
6130 * register TSYNCRXCFG must be set, therefore it is not
6131 * possible to time stamp both Sync and Delay_Req messages
6132 * => fall back to time stamping all packets
6133 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006134 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006135 config.rx_filter = HWTSTAMP_FILTER_ALL;
6136 break;
6137 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006138 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006139 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006140 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006141 break;
6142 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006143 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006144 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006145 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006146 break;
6147 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6148 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006149 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006150 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006151 is_l2 = true;
6152 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006153 config.rx_filter = HWTSTAMP_FILTER_SOME;
6154 break;
6155 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6156 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006157 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006158 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006159 is_l2 = true;
6160 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006161 config.rx_filter = HWTSTAMP_FILTER_SOME;
6162 break;
6163 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6164 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6165 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006166 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006167 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006168 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006169 break;
6170 default:
6171 return -ERANGE;
6172 }
6173
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006174 if (hw->mac.type == e1000_82575) {
6175 if (tsync_rx_ctl | tsync_tx_ctl)
6176 return -EINVAL;
6177 return 0;
6178 }
6179
Nick Nunley757b77e2010-03-26 11:36:47 +00006180 /*
6181 * Per-packet timestamping only works if all packets are
6182 * timestamped, so enable timestamping in all packets as
6183 * long as one rx filter was configured.
6184 */
6185 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6186 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6187 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6188 }
6189
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006190 /* enable/disable TX */
6191 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006192 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6193 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006194 wr32(E1000_TSYNCTXCTL, regval);
6195
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006196 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006197 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006198 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6199 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006200 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006201
6202 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006203 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6204
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006205 /* define ethertype filter for timestamped packets */
6206 if (is_l2)
6207 wr32(E1000_ETQF(3),
6208 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6209 E1000_ETQF_1588 | /* enable timestamping */
6210 ETH_P_1588)); /* 1588 eth protocol type */
6211 else
6212 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006213
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006214#define PTP_PORT 319
6215 /* L4 Queue Filter[3]: filter by destination port and protocol */
6216 if (is_l4) {
6217 u32 ftqf = (IPPROTO_UDP /* UDP */
6218 | E1000_FTQF_VF_BP /* VF not compared */
6219 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6220 | E1000_FTQF_MASK); /* mask all inputs */
6221 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006222
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006223 wr32(E1000_IMIR(3), htons(PTP_PORT));
6224 wr32(E1000_IMIREXT(3),
6225 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6226 if (hw->mac.type == e1000_82576) {
6227 /* enable source port check */
6228 wr32(E1000_SPQF(3), htons(PTP_PORT));
6229 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6230 }
6231 wr32(E1000_FTQF(3), ftqf);
6232 } else {
6233 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6234 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006235 wrfl();
6236
6237 adapter->hwtstamp_config = config;
6238
6239 /* clear TX/RX time stamp registers, just to be sure */
6240 regval = rd32(E1000_TXSTMPH);
6241 regval = rd32(E1000_RXSTMPH);
6242
6243 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6244 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006245}
6246
6247/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006248 * igb_ioctl -
6249 * @netdev:
6250 * @ifreq:
6251 * @cmd:
6252 **/
6253static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6254{
6255 switch (cmd) {
6256 case SIOCGMIIPHY:
6257 case SIOCGMIIREG:
6258 case SIOCSMIIREG:
6259 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006260 case SIOCSHWTSTAMP:
6261 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006262 default:
6263 return -EOPNOTSUPP;
6264 }
6265}
6266
Alexander Duyck009bc062009-07-23 18:08:35 +00006267s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6268{
6269 struct igb_adapter *adapter = hw->back;
6270 u16 cap_offset;
6271
Jon Masonbdaae042011-06-27 07:44:01 +00006272 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006273 if (!cap_offset)
6274 return -E1000_ERR_CONFIG;
6275
6276 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6277
6278 return 0;
6279}
6280
6281s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6282{
6283 struct igb_adapter *adapter = hw->back;
6284 u16 cap_offset;
6285
Jon Masonbdaae042011-06-27 07:44:01 +00006286 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006287 if (!cap_offset)
6288 return -E1000_ERR_CONFIG;
6289
6290 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6291
6292 return 0;
6293}
6294
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006295static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006296{
6297 struct igb_adapter *adapter = netdev_priv(netdev);
6298 struct e1000_hw *hw = &adapter->hw;
6299 u32 ctrl, rctl;
6300
6301 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006302
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006303 if (features & NETIF_F_HW_VLAN_RX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006304 /* enable VLAN tag insert/strip */
6305 ctrl = rd32(E1000_CTRL);
6306 ctrl |= E1000_CTRL_VME;
6307 wr32(E1000_CTRL, ctrl);
6308
Alexander Duyck51466232009-10-27 23:47:35 +00006309 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006310 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006311 rctl &= ~E1000_RCTL_CFIEN;
6312 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006313 } else {
6314 /* disable VLAN tag insert/strip */
6315 ctrl = rd32(E1000_CTRL);
6316 ctrl &= ~E1000_CTRL_VME;
6317 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006318 }
6319
Alexander Duycke1739522009-02-19 20:39:44 -08006320 igb_rlpml_set(adapter);
6321
Auke Kok9d5c8242008-01-24 02:22:38 -08006322 if (!test_bit(__IGB_DOWN, &adapter->state))
6323 igb_irq_enable(adapter);
6324}
6325
6326static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6327{
6328 struct igb_adapter *adapter = netdev_priv(netdev);
6329 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006330 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006331
Alexander Duyck51466232009-10-27 23:47:35 +00006332 /* attempt to add filter to vlvf array */
6333 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006334
Alexander Duyck51466232009-10-27 23:47:35 +00006335 /* add the filter since PF can receive vlans w/o entry in vlvf */
6336 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006337
6338 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006339}
6340
6341static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6342{
6343 struct igb_adapter *adapter = netdev_priv(netdev);
6344 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006345 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006346 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006347
6348 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006349
6350 if (!test_bit(__IGB_DOWN, &adapter->state))
6351 igb_irq_enable(adapter);
6352
Alexander Duyck51466232009-10-27 23:47:35 +00006353 /* remove vlan from VLVF table array */
6354 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006355
Alexander Duyck51466232009-10-27 23:47:35 +00006356 /* if vid was not present in VLVF just remove it from table */
6357 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006358 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006359
6360 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006361}
6362
6363static void igb_restore_vlan(struct igb_adapter *adapter)
6364{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006365 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006366
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006367 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6368 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006369}
6370
David Decotigny14ad2512011-04-27 18:32:43 +00006371int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006372{
Alexander Duyck090b1792009-10-27 23:51:55 +00006373 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006374 struct e1000_mac_info *mac = &adapter->hw.mac;
6375
6376 mac->autoneg = 0;
6377
David Decotigny14ad2512011-04-27 18:32:43 +00006378 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6379 * for the switch() below to work */
6380 if ((spd & 1) || (dplx & ~1))
6381 goto err_inval;
6382
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006383 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6384 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006385 spd != SPEED_1000 &&
6386 dplx != DUPLEX_FULL)
6387 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006388
David Decotigny14ad2512011-04-27 18:32:43 +00006389 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006390 case SPEED_10 + DUPLEX_HALF:
6391 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6392 break;
6393 case SPEED_10 + DUPLEX_FULL:
6394 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6395 break;
6396 case SPEED_100 + DUPLEX_HALF:
6397 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6398 break;
6399 case SPEED_100 + DUPLEX_FULL:
6400 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6401 break;
6402 case SPEED_1000 + DUPLEX_FULL:
6403 mac->autoneg = 1;
6404 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6405 break;
6406 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6407 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006408 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006409 }
6410 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006411
6412err_inval:
6413 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6414 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006415}
6416
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006417static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006418{
6419 struct net_device *netdev = pci_get_drvdata(pdev);
6420 struct igb_adapter *adapter = netdev_priv(netdev);
6421 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006422 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006423 u32 wufc = adapter->wol;
6424#ifdef CONFIG_PM
6425 int retval = 0;
6426#endif
6427
6428 netif_device_detach(netdev);
6429
Alexander Duycka88f10e2008-07-08 15:13:38 -07006430 if (netif_running(netdev))
6431 igb_close(netdev);
6432
Alexander Duyck047e0032009-10-27 15:49:27 +00006433 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006434
6435#ifdef CONFIG_PM
6436 retval = pci_save_state(pdev);
6437 if (retval)
6438 return retval;
6439#endif
6440
6441 status = rd32(E1000_STATUS);
6442 if (status & E1000_STATUS_LU)
6443 wufc &= ~E1000_WUFC_LNKC;
6444
6445 if (wufc) {
6446 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006447 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006448
6449 /* turn on all-multi mode if wake on multicast is enabled */
6450 if (wufc & E1000_WUFC_MC) {
6451 rctl = rd32(E1000_RCTL);
6452 rctl |= E1000_RCTL_MPE;
6453 wr32(E1000_RCTL, rctl);
6454 }
6455
6456 ctrl = rd32(E1000_CTRL);
6457 /* advertise wake from D3Cold */
6458 #define E1000_CTRL_ADVD3WUC 0x00100000
6459 /* phy power management enable */
6460 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6461 ctrl |= E1000_CTRL_ADVD3WUC;
6462 wr32(E1000_CTRL, ctrl);
6463
Auke Kok9d5c8242008-01-24 02:22:38 -08006464 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006465 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006466
6467 wr32(E1000_WUC, E1000_WUC_PME_EN);
6468 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006469 } else {
6470 wr32(E1000_WUC, 0);
6471 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006472 }
6473
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006474 *enable_wake = wufc || adapter->en_mng_pt;
6475 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006476 igb_power_down_link(adapter);
6477 else
6478 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006479
6480 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6481 * would have already happened in close and is redundant. */
6482 igb_release_hw_control(adapter);
6483
6484 pci_disable_device(pdev);
6485
Auke Kok9d5c8242008-01-24 02:22:38 -08006486 return 0;
6487}
6488
6489#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006490static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6491{
6492 int retval;
6493 bool wake;
6494
6495 retval = __igb_shutdown(pdev, &wake);
6496 if (retval)
6497 return retval;
6498
6499 if (wake) {
6500 pci_prepare_to_sleep(pdev);
6501 } else {
6502 pci_wake_from_d3(pdev, false);
6503 pci_set_power_state(pdev, PCI_D3hot);
6504 }
6505
6506 return 0;
6507}
6508
Auke Kok9d5c8242008-01-24 02:22:38 -08006509static int igb_resume(struct pci_dev *pdev)
6510{
6511 struct net_device *netdev = pci_get_drvdata(pdev);
6512 struct igb_adapter *adapter = netdev_priv(netdev);
6513 struct e1000_hw *hw = &adapter->hw;
6514 u32 err;
6515
6516 pci_set_power_state(pdev, PCI_D0);
6517 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006518 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006519
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006520 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006521 if (err) {
6522 dev_err(&pdev->dev,
6523 "igb: Cannot enable PCI device from suspend\n");
6524 return err;
6525 }
6526 pci_set_master(pdev);
6527
6528 pci_enable_wake(pdev, PCI_D3hot, 0);
6529 pci_enable_wake(pdev, PCI_D3cold, 0);
6530
Alexander Duyck047e0032009-10-27 15:49:27 +00006531 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006532 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6533 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006534 }
6535
Auke Kok9d5c8242008-01-24 02:22:38 -08006536 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006537
6538 /* let the f/w know that the h/w is now under the control of the
6539 * driver. */
6540 igb_get_hw_control(adapter);
6541
Auke Kok9d5c8242008-01-24 02:22:38 -08006542 wr32(E1000_WUS, ~0);
6543
Alexander Duycka88f10e2008-07-08 15:13:38 -07006544 if (netif_running(netdev)) {
6545 err = igb_open(netdev);
6546 if (err)
6547 return err;
6548 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006549
6550 netif_device_attach(netdev);
6551
Auke Kok9d5c8242008-01-24 02:22:38 -08006552 return 0;
6553}
6554#endif
6555
6556static void igb_shutdown(struct pci_dev *pdev)
6557{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006558 bool wake;
6559
6560 __igb_shutdown(pdev, &wake);
6561
6562 if (system_state == SYSTEM_POWER_OFF) {
6563 pci_wake_from_d3(pdev, wake);
6564 pci_set_power_state(pdev, PCI_D3hot);
6565 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006566}
6567
6568#ifdef CONFIG_NET_POLL_CONTROLLER
6569/*
6570 * Polling 'interrupt' - used by things like netconsole to send skbs
6571 * without having to re-enable interrupts. It's not called while
6572 * the interrupt routine is executing.
6573 */
6574static void igb_netpoll(struct net_device *netdev)
6575{
6576 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006577 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006578 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006579
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006580 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006581 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006582 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006583 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006584 return;
6585 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006586
Alexander Duyck047e0032009-10-27 15:49:27 +00006587 for (i = 0; i < adapter->num_q_vectors; i++) {
6588 struct igb_q_vector *q_vector = adapter->q_vector[i];
6589 wr32(E1000_EIMC, q_vector->eims_value);
6590 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006591 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006592}
6593#endif /* CONFIG_NET_POLL_CONTROLLER */
6594
6595/**
6596 * igb_io_error_detected - called when PCI error is detected
6597 * @pdev: Pointer to PCI device
6598 * @state: The current pci connection state
6599 *
6600 * This function is called after a PCI bus error affecting
6601 * this device has been detected.
6602 */
6603static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6604 pci_channel_state_t state)
6605{
6606 struct net_device *netdev = pci_get_drvdata(pdev);
6607 struct igb_adapter *adapter = netdev_priv(netdev);
6608
6609 netif_device_detach(netdev);
6610
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006611 if (state == pci_channel_io_perm_failure)
6612 return PCI_ERS_RESULT_DISCONNECT;
6613
Auke Kok9d5c8242008-01-24 02:22:38 -08006614 if (netif_running(netdev))
6615 igb_down(adapter);
6616 pci_disable_device(pdev);
6617
6618 /* Request a slot slot reset. */
6619 return PCI_ERS_RESULT_NEED_RESET;
6620}
6621
6622/**
6623 * igb_io_slot_reset - called after the pci bus has been reset.
6624 * @pdev: Pointer to PCI device
6625 *
6626 * Restart the card from scratch, as if from a cold-boot. Implementation
6627 * resembles the first-half of the igb_resume routine.
6628 */
6629static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6630{
6631 struct net_device *netdev = pci_get_drvdata(pdev);
6632 struct igb_adapter *adapter = netdev_priv(netdev);
6633 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006634 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006635 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006636
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006637 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006638 dev_err(&pdev->dev,
6639 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006640 result = PCI_ERS_RESULT_DISCONNECT;
6641 } else {
6642 pci_set_master(pdev);
6643 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006644 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006645
6646 pci_enable_wake(pdev, PCI_D3hot, 0);
6647 pci_enable_wake(pdev, PCI_D3cold, 0);
6648
6649 igb_reset(adapter);
6650 wr32(E1000_WUS, ~0);
6651 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006652 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006653
Jeff Kirsherea943d42008-12-11 20:34:19 -08006654 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6655 if (err) {
6656 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6657 "failed 0x%0x\n", err);
6658 /* non-fatal, continue */
6659 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006660
Alexander Duyck40a914f2008-11-27 00:24:37 -08006661 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006662}
6663
6664/**
6665 * igb_io_resume - called when traffic can start flowing again.
6666 * @pdev: Pointer to PCI device
6667 *
6668 * This callback is called when the error recovery driver tells us that
6669 * its OK to resume normal operation. Implementation resembles the
6670 * second-half of the igb_resume routine.
6671 */
6672static void igb_io_resume(struct pci_dev *pdev)
6673{
6674 struct net_device *netdev = pci_get_drvdata(pdev);
6675 struct igb_adapter *adapter = netdev_priv(netdev);
6676
Auke Kok9d5c8242008-01-24 02:22:38 -08006677 if (netif_running(netdev)) {
6678 if (igb_up(adapter)) {
6679 dev_err(&pdev->dev, "igb_up failed after reset\n");
6680 return;
6681 }
6682 }
6683
6684 netif_device_attach(netdev);
6685
6686 /* let the f/w know that the h/w is now under the control of the
6687 * driver. */
6688 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006689}
6690
Alexander Duyck26ad9172009-10-05 06:32:49 +00006691static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6692 u8 qsel)
6693{
6694 u32 rar_low, rar_high;
6695 struct e1000_hw *hw = &adapter->hw;
6696
6697 /* HW expects these in little endian so we reverse the byte order
6698 * from network order (big endian) to little endian
6699 */
6700 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6701 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6702 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6703
6704 /* Indicate to hardware the Address is Valid. */
6705 rar_high |= E1000_RAH_AV;
6706
6707 if (hw->mac.type == e1000_82575)
6708 rar_high |= E1000_RAH_POOL_1 * qsel;
6709 else
6710 rar_high |= E1000_RAH_POOL_1 << qsel;
6711
6712 wr32(E1000_RAL(index), rar_low);
6713 wrfl();
6714 wr32(E1000_RAH(index), rar_high);
6715 wrfl();
6716}
6717
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006718static int igb_set_vf_mac(struct igb_adapter *adapter,
6719 int vf, unsigned char *mac_addr)
6720{
6721 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006722 /* VF MAC addresses start at end of receive addresses and moves
6723 * torwards the first, as a result a collision should not be possible */
6724 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006725
Alexander Duyck37680112009-02-19 20:40:30 -08006726 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006727
Alexander Duyck26ad9172009-10-05 06:32:49 +00006728 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006729
6730 return 0;
6731}
6732
Williams, Mitch A8151d292010-02-10 01:44:24 +00006733static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6734{
6735 struct igb_adapter *adapter = netdev_priv(netdev);
6736 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6737 return -EINVAL;
6738 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6739 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6740 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6741 " change effective.");
6742 if (test_bit(__IGB_DOWN, &adapter->state)) {
6743 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6744 " but the PF device is not up.\n");
6745 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6746 " attempting to use the VF device.\n");
6747 }
6748 return igb_set_vf_mac(adapter, vf, mac);
6749}
6750
Lior Levy17dc5662011-02-08 02:28:46 +00006751static int igb_link_mbps(int internal_link_speed)
6752{
6753 switch (internal_link_speed) {
6754 case SPEED_100:
6755 return 100;
6756 case SPEED_1000:
6757 return 1000;
6758 default:
6759 return 0;
6760 }
6761}
6762
6763static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6764 int link_speed)
6765{
6766 int rf_dec, rf_int;
6767 u32 bcnrc_val;
6768
6769 if (tx_rate != 0) {
6770 /* Calculate the rate factor values to set */
6771 rf_int = link_speed / tx_rate;
6772 rf_dec = (link_speed - (rf_int * tx_rate));
6773 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6774
6775 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6776 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6777 E1000_RTTBCNRC_RF_INT_MASK);
6778 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6779 } else {
6780 bcnrc_val = 0;
6781 }
6782
6783 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6784 wr32(E1000_RTTBCNRC, bcnrc_val);
6785}
6786
6787static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6788{
6789 int actual_link_speed, i;
6790 bool reset_rate = false;
6791
6792 /* VF TX rate limit was not set or not supported */
6793 if ((adapter->vf_rate_link_speed == 0) ||
6794 (adapter->hw.mac.type != e1000_82576))
6795 return;
6796
6797 actual_link_speed = igb_link_mbps(adapter->link_speed);
6798 if (actual_link_speed != adapter->vf_rate_link_speed) {
6799 reset_rate = true;
6800 adapter->vf_rate_link_speed = 0;
6801 dev_info(&adapter->pdev->dev,
6802 "Link speed has been changed. VF Transmit "
6803 "rate is disabled\n");
6804 }
6805
6806 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6807 if (reset_rate)
6808 adapter->vf_data[i].tx_rate = 0;
6809
6810 igb_set_vf_rate_limit(&adapter->hw, i,
6811 adapter->vf_data[i].tx_rate,
6812 actual_link_speed);
6813 }
6814}
6815
Williams, Mitch A8151d292010-02-10 01:44:24 +00006816static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6817{
Lior Levy17dc5662011-02-08 02:28:46 +00006818 struct igb_adapter *adapter = netdev_priv(netdev);
6819 struct e1000_hw *hw = &adapter->hw;
6820 int actual_link_speed;
6821
6822 if (hw->mac.type != e1000_82576)
6823 return -EOPNOTSUPP;
6824
6825 actual_link_speed = igb_link_mbps(adapter->link_speed);
6826 if ((vf >= adapter->vfs_allocated_count) ||
6827 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6828 (tx_rate < 0) || (tx_rate > actual_link_speed))
6829 return -EINVAL;
6830
6831 adapter->vf_rate_link_speed = actual_link_speed;
6832 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6833 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6834
6835 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006836}
6837
6838static int igb_ndo_get_vf_config(struct net_device *netdev,
6839 int vf, struct ifla_vf_info *ivi)
6840{
6841 struct igb_adapter *adapter = netdev_priv(netdev);
6842 if (vf >= adapter->vfs_allocated_count)
6843 return -EINVAL;
6844 ivi->vf = vf;
6845 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006846 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006847 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6848 ivi->qos = adapter->vf_data[vf].pf_qos;
6849 return 0;
6850}
6851
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006852static void igb_vmm_control(struct igb_adapter *adapter)
6853{
6854 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006855 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006856
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006857 switch (hw->mac.type) {
6858 case e1000_82575:
6859 default:
6860 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006861 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006862 case e1000_82576:
6863 /* notify HW that the MAC is adding vlan tags */
6864 reg = rd32(E1000_DTXCTL);
6865 reg |= E1000_DTXCTL_VLAN_ADDED;
6866 wr32(E1000_DTXCTL, reg);
6867 case e1000_82580:
6868 /* enable replication vlan tag stripping */
6869 reg = rd32(E1000_RPLOLR);
6870 reg |= E1000_RPLOLR_STRVLAN;
6871 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006872 case e1000_i350:
6873 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006874 break;
6875 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006876
Alexander Duyckd4960302009-10-27 15:53:45 +00006877 if (adapter->vfs_allocated_count) {
6878 igb_vmdq_set_loopback_pf(hw, true);
6879 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006880 igb_vmdq_set_anti_spoofing_pf(hw, true,
6881 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006882 } else {
6883 igb_vmdq_set_loopback_pf(hw, false);
6884 igb_vmdq_set_replication_pf(hw, false);
6885 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006886}
6887
Auke Kok9d5c8242008-01-24 02:22:38 -08006888/* igb_main.c */