blob: 12faa99cac53bfd8c14ba409a9f47c2ebf54118d [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Carolyn Wyborny4297f992011-06-29 01:16:10 +00004 Copyright(c) 2007-2011 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
Jiri Pirkob2cb09b2011-07-21 03:27:27 +000031#include <linux/bitops.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080032#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080035#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080037#include <net/checksum.h>
38#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000039#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080040#include <linux/mii.h>
41#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000042#include <linux/if.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080043#include <linux/if_vlan.h>
44#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070045#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080046#include <linux/delay.h>
47#include <linux/interrupt.h>
48#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080049#include <linux/aer.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040050#include <linux/prefetch.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070051#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070052#include <linux/dca.h>
53#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080054#include "igb.h"
55
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080056#define MAJ 3
57#define MIN 0
58#define BUILD 6
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080059#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
Carolyn Wyborny929dd042011-05-26 03:02:26 +000060__stringify(BUILD) "-k"
Auke Kok9d5c8242008-01-24 02:22:38 -080061char igb_driver_name[] = "igb";
62char igb_driver_version[] = DRV_VERSION;
63static const char igb_driver_string[] =
64 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000065static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080066
Auke Kok9d5c8242008-01-24 02:22:38 -080067static const struct e1000_info *igb_info_tbl[] = {
68 [board_82575] = &e1000_82575_info,
69};
70
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000071static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
97 /* required last entry */
98 {0, }
99};
100
101MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
102
103void igb_reset(struct igb_adapter *);
104static int igb_setup_all_tx_resources(struct igb_adapter *);
105static int igb_setup_all_rx_resources(struct igb_adapter *);
106static void igb_free_all_tx_resources(struct igb_adapter *);
107static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000108static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800109static int igb_probe(struct pci_dev *, const struct pci_device_id *);
110static void __devexit igb_remove(struct pci_dev *pdev);
Anders Berggren673b8b72011-02-04 07:32:32 +0000111static void igb_init_hw_timer(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800112static int igb_sw_init(struct igb_adapter *);
113static int igb_open(struct net_device *);
114static int igb_close(struct net_device *);
115static void igb_configure_tx(struct igb_adapter *);
116static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800117static void igb_clean_all_tx_rings(struct igb_adapter *);
118static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700119static void igb_clean_tx_ring(struct igb_ring *);
120static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000121static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800122static void igb_update_phy_info(unsigned long);
123static void igb_watchdog(unsigned long);
124static void igb_watchdog_task(struct work_struct *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000125static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000126static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
127 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static int igb_change_mtu(struct net_device *, int);
129static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000130static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800131static irqreturn_t igb_intr(int irq, void *);
132static irqreturn_t igb_intr_msi(int irq, void *);
133static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000134static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700135#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000136static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700137static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700138#endif /* CONFIG_IGB_DCA */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700139static int igb_poll(struct napi_struct *, int);
Alexander Duyck13fde972011-10-05 13:35:24 +0000140static bool igb_clean_tx_irq(struct igb_q_vector *);
Alexander Duyckcd392f52011-08-26 07:43:59 +0000141static bool igb_clean_rx_irq(struct igb_q_vector *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800142static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
143static void igb_tx_timeout(struct net_device *);
144static void igb_reset_task(struct work_struct *);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +0000145static void igb_vlan_mode(struct net_device *netdev, u32 features);
Auke Kok9d5c8242008-01-24 02:22:38 -0800146static void igb_vlan_rx_add_vid(struct net_device *, u16);
147static void igb_vlan_rx_kill_vid(struct net_device *, u16);
148static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000149static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800150static void igb_ping_all_vfs(struct igb_adapter *);
151static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800152static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000153static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800154static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000155static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
156static int igb_ndo_set_vf_vlan(struct net_device *netdev,
157 int vf, u16 vlan, u8 qos);
158static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
159static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
160 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000161static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800162
Auke Kok9d5c8242008-01-24 02:22:38 -0800163#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000164static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800165static int igb_resume(struct pci_dev *);
166#endif
167static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700168#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700169static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
170static struct notifier_block dca_notifier = {
171 .notifier_call = igb_notify_dca,
172 .next = NULL,
173 .priority = 0
174};
175#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800176#ifdef CONFIG_NET_POLL_CONTROLLER
177/* for netdump / net console */
178static void igb_netpoll(struct net_device *);
179#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800180#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000181static unsigned int max_vfs = 0;
182module_param(max_vfs, uint, 0);
183MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
184 "per physical function");
185#endif /* CONFIG_PCI_IOV */
186
Auke Kok9d5c8242008-01-24 02:22:38 -0800187static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
188 pci_channel_state_t);
189static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
190static void igb_io_resume(struct pci_dev *);
191
192static struct pci_error_handlers igb_err_handler = {
193 .error_detected = igb_io_error_detected,
194 .slot_reset = igb_io_slot_reset,
195 .resume = igb_io_resume,
196};
197
198
199static struct pci_driver igb_driver = {
200 .name = igb_driver_name,
201 .id_table = igb_pci_tbl,
202 .probe = igb_probe,
203 .remove = __devexit_p(igb_remove),
204#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300205 /* Power Management Hooks */
Auke Kok9d5c8242008-01-24 02:22:38 -0800206 .suspend = igb_suspend,
207 .resume = igb_resume,
208#endif
209 .shutdown = igb_shutdown,
210 .err_handler = &igb_err_handler
211};
212
213MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
214MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
215MODULE_LICENSE("GPL");
216MODULE_VERSION(DRV_VERSION);
217
Taku Izumic97ec422010-04-27 14:39:30 +0000218struct igb_reg_info {
219 u32 ofs;
220 char *name;
221};
222
223static const struct igb_reg_info igb_reg_info_tbl[] = {
224
225 /* General Registers */
226 {E1000_CTRL, "CTRL"},
227 {E1000_STATUS, "STATUS"},
228 {E1000_CTRL_EXT, "CTRL_EXT"},
229
230 /* Interrupt Registers */
231 {E1000_ICR, "ICR"},
232
233 /* RX Registers */
234 {E1000_RCTL, "RCTL"},
235 {E1000_RDLEN(0), "RDLEN"},
236 {E1000_RDH(0), "RDH"},
237 {E1000_RDT(0), "RDT"},
238 {E1000_RXDCTL(0), "RXDCTL"},
239 {E1000_RDBAL(0), "RDBAL"},
240 {E1000_RDBAH(0), "RDBAH"},
241
242 /* TX Registers */
243 {E1000_TCTL, "TCTL"},
244 {E1000_TDBAL(0), "TDBAL"},
245 {E1000_TDBAH(0), "TDBAH"},
246 {E1000_TDLEN(0), "TDLEN"},
247 {E1000_TDH(0), "TDH"},
248 {E1000_TDT(0), "TDT"},
249 {E1000_TXDCTL(0), "TXDCTL"},
250 {E1000_TDFH, "TDFH"},
251 {E1000_TDFT, "TDFT"},
252 {E1000_TDFHS, "TDFHS"},
253 {E1000_TDFPC, "TDFPC"},
254
255 /* List Terminator */
256 {}
257};
258
259/*
260 * igb_regdump - register printout routine
261 */
262static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
263{
264 int n = 0;
265 char rname[16];
266 u32 regs[8];
267
268 switch (reginfo->ofs) {
269 case E1000_RDLEN(0):
270 for (n = 0; n < 4; n++)
271 regs[n] = rd32(E1000_RDLEN(n));
272 break;
273 case E1000_RDH(0):
274 for (n = 0; n < 4; n++)
275 regs[n] = rd32(E1000_RDH(n));
276 break;
277 case E1000_RDT(0):
278 for (n = 0; n < 4; n++)
279 regs[n] = rd32(E1000_RDT(n));
280 break;
281 case E1000_RXDCTL(0):
282 for (n = 0; n < 4; n++)
283 regs[n] = rd32(E1000_RXDCTL(n));
284 break;
285 case E1000_RDBAL(0):
286 for (n = 0; n < 4; n++)
287 regs[n] = rd32(E1000_RDBAL(n));
288 break;
289 case E1000_RDBAH(0):
290 for (n = 0; n < 4; n++)
291 regs[n] = rd32(E1000_RDBAH(n));
292 break;
293 case E1000_TDBAL(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RDBAL(n));
296 break;
297 case E1000_TDBAH(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_TDBAH(n));
300 break;
301 case E1000_TDLEN(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_TDLEN(n));
304 break;
305 case E1000_TDH(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_TDH(n));
308 break;
309 case E1000_TDT(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_TDT(n));
312 break;
313 case E1000_TXDCTL(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_TXDCTL(n));
316 break;
317 default:
318 printk(KERN_INFO "%-15s %08x\n",
319 reginfo->name, rd32(reginfo->ofs));
320 return;
321 }
322
323 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
324 printk(KERN_INFO "%-15s ", rname);
325 for (n = 0; n < 4; n++)
326 printk(KERN_CONT "%08x ", regs[n]);
327 printk(KERN_CONT "\n");
328}
329
330/*
331 * igb_dump - Print registers, tx-rings and rx-rings
332 */
333static void igb_dump(struct igb_adapter *adapter)
334{
335 struct net_device *netdev = adapter->netdev;
336 struct e1000_hw *hw = &adapter->hw;
337 struct igb_reg_info *reginfo;
338 int n = 0;
339 struct igb_ring *tx_ring;
340 union e1000_adv_tx_desc *tx_desc;
341 struct my_u0 { u64 a; u64 b; } *u0;
342 struct igb_buffer *buffer_info;
343 struct igb_ring *rx_ring;
344 union e1000_adv_rx_desc *rx_desc;
345 u32 staterr;
346 int i = 0;
347
348 if (!netif_msg_hw(adapter))
349 return;
350
351 /* Print netdevice Info */
352 if (netdev) {
353 dev_info(&adapter->pdev->dev, "Net device Info\n");
354 printk(KERN_INFO "Device Name state "
355 "trans_start last_rx\n");
356 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
357 netdev->name,
358 netdev->state,
359 netdev->trans_start,
360 netdev->last_rx);
361 }
362
363 /* Print Registers */
364 dev_info(&adapter->pdev->dev, "Register Dump\n");
365 printk(KERN_INFO " Register Name Value\n");
366 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
367 reginfo->name; reginfo++) {
368 igb_regdump(hw, reginfo);
369 }
370
371 /* Print TX Ring Summary */
372 if (!netdev || !netif_running(netdev))
373 goto exit;
374
375 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
376 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
377 " leng ntw timestamp\n");
378 for (n = 0; n < adapter->num_tx_queues; n++) {
379 tx_ring = adapter->tx_ring[n];
380 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
381 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
382 n, tx_ring->next_to_use, tx_ring->next_to_clean,
383 (u64)buffer_info->dma,
384 buffer_info->length,
385 buffer_info->next_to_watch,
386 (u64)buffer_info->time_stamp);
387 }
388
389 /* Print TX Rings */
390 if (!netif_msg_tx_done(adapter))
391 goto rx_ring_summary;
392
393 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
394
395 /* Transmit Descriptor Formats
396 *
397 * Advanced Transmit Descriptor
398 * +--------------------------------------------------------------+
399 * 0 | Buffer Address [63:0] |
400 * +--------------------------------------------------------------+
401 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
402 * +--------------------------------------------------------------+
403 * 63 46 45 40 39 38 36 35 32 31 24 15 0
404 */
405
406 for (n = 0; n < adapter->num_tx_queues; n++) {
407 tx_ring = adapter->tx_ring[n];
408 printk(KERN_INFO "------------------------------------\n");
409 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "T [desc] [address 63:0 ] "
412 "[PlPOCIStDDM Ln] [bi->dma ] "
413 "leng ntw timestamp bi->skb\n");
414
415 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck601369062011-08-26 07:44:05 +0000416 tx_desc = IGB_TX_DESC(tx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000417 buffer_info = &tx_ring->buffer_info[i];
418 u0 = (struct my_u0 *)tx_desc;
419 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
420 " %04X %3X %016llX %p", i,
421 le64_to_cpu(u0->a),
422 le64_to_cpu(u0->b),
423 (u64)buffer_info->dma,
424 buffer_info->length,
425 buffer_info->next_to_watch,
426 (u64)buffer_info->time_stamp,
427 buffer_info->skb);
428 if (i == tx_ring->next_to_use &&
429 i == tx_ring->next_to_clean)
430 printk(KERN_CONT " NTC/U\n");
431 else if (i == tx_ring->next_to_use)
432 printk(KERN_CONT " NTU\n");
433 else if (i == tx_ring->next_to_clean)
434 printk(KERN_CONT " NTC\n");
435 else
436 printk(KERN_CONT "\n");
437
438 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
439 print_hex_dump(KERN_INFO, "",
440 DUMP_PREFIX_ADDRESS,
441 16, 1, phys_to_virt(buffer_info->dma),
442 buffer_info->length, true);
443 }
444 }
445
446 /* Print RX Rings Summary */
447rx_ring_summary:
448 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
449 printk(KERN_INFO "Queue [NTU] [NTC]\n");
450 for (n = 0; n < adapter->num_rx_queues; n++) {
451 rx_ring = adapter->rx_ring[n];
452 printk(KERN_INFO " %5d %5X %5X\n", n,
453 rx_ring->next_to_use, rx_ring->next_to_clean);
454 }
455
456 /* Print RX Rings */
457 if (!netif_msg_rx_status(adapter))
458 goto exit;
459
460 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
461
462 /* Advanced Receive Descriptor (Read) Format
463 * 63 1 0
464 * +-----------------------------------------------------+
465 * 0 | Packet Buffer Address [63:1] |A0/NSE|
466 * +----------------------------------------------+------+
467 * 8 | Header Buffer Address [63:1] | DD |
468 * +-----------------------------------------------------+
469 *
470 *
471 * Advanced Receive Descriptor (Write-Back) Format
472 *
473 * 63 48 47 32 31 30 21 20 17 16 4 3 0
474 * +------------------------------------------------------+
475 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
476 * | Checksum Ident | | | | Type | Type |
477 * +------------------------------------------------------+
478 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
479 * +------------------------------------------------------+
480 * 63 48 47 32 31 20 19 0
481 */
482
483 for (n = 0; n < adapter->num_rx_queues; n++) {
484 rx_ring = adapter->rx_ring[n];
485 printk(KERN_INFO "------------------------------------\n");
486 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
487 printk(KERN_INFO "------------------------------------\n");
488 printk(KERN_INFO "R [desc] [ PktBuf A0] "
489 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
490 "<-- Adv Rx Read format\n");
491 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
492 "[vl er S cks ln] ---------------- [bi->skb] "
493 "<-- Adv Rx Write-Back format\n");
494
495 for (i = 0; i < rx_ring->count; i++) {
496 buffer_info = &rx_ring->buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +0000497 rx_desc = IGB_RX_DESC(rx_ring, i);
Taku Izumic97ec422010-04-27 14:39:30 +0000498 u0 = (struct my_u0 *)rx_desc;
499 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
500 if (staterr & E1000_RXD_STAT_DD) {
501 /* Descriptor Done */
502 printk(KERN_INFO "RWB[0x%03X] %016llX "
503 "%016llX ---------------- %p", i,
504 le64_to_cpu(u0->a),
505 le64_to_cpu(u0->b),
506 buffer_info->skb);
507 } else {
508 printk(KERN_INFO "R [0x%03X] %016llX "
509 "%016llX %016llX %p", i,
510 le64_to_cpu(u0->a),
511 le64_to_cpu(u0->b),
512 (u64)buffer_info->dma,
513 buffer_info->skb);
514
515 if (netif_msg_pktdata(adapter)) {
516 print_hex_dump(KERN_INFO, "",
517 DUMP_PREFIX_ADDRESS,
518 16, 1,
519 phys_to_virt(buffer_info->dma),
Alexander Duyck44390ca2011-08-26 07:43:38 +0000520 IGB_RX_HDR_LEN, true);
521 print_hex_dump(KERN_INFO, "",
522 DUMP_PREFIX_ADDRESS,
523 16, 1,
524 phys_to_virt(
525 buffer_info->page_dma +
526 buffer_info->page_offset),
527 PAGE_SIZE/2, true);
Taku Izumic97ec422010-04-27 14:39:30 +0000528 }
529 }
530
531 if (i == rx_ring->next_to_use)
532 printk(KERN_CONT " NTU\n");
533 else if (i == rx_ring->next_to_clean)
534 printk(KERN_CONT " NTC\n");
535 else
536 printk(KERN_CONT "\n");
537
538 }
539 }
540
541exit:
542 return;
543}
544
545
Patrick Ohly38c845c2009-02-12 05:03:41 +0000546/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000547 * igb_read_clock - read raw cycle counter (to be used by time counter)
548 */
549static cycle_t igb_read_clock(const struct cyclecounter *tc)
550{
551 struct igb_adapter *adapter =
552 container_of(tc, struct igb_adapter, cycles);
553 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000554 u64 stamp = 0;
555 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000556
Alexander Duyck55cac242009-11-19 12:42:21 +0000557 /*
558 * The timestamp latches on lowest register read. For the 82580
559 * the lowest register is SYSTIMR instead of SYSTIML. However we never
560 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
561 */
562 if (hw->mac.type == e1000_82580) {
563 stamp = rd32(E1000_SYSTIMR) >> 8;
564 shift = IGB_82580_TSYNC_SHIFT;
565 }
566
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000567 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
568 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000569 return stamp;
570}
571
Auke Kok9d5c8242008-01-24 02:22:38 -0800572/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000573 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800574 * used by hardware layer to print debugging information
575 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000576struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800577{
578 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000579 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800580}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000581
582/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800583 * igb_init_module - Driver Registration Routine
584 *
585 * igb_init_module is the first routine called when the driver is
586 * loaded. All it does is register with the PCI subsystem.
587 **/
588static int __init igb_init_module(void)
589{
590 int ret;
591 printk(KERN_INFO "%s - version %s\n",
592 igb_driver_string, igb_driver_version);
593
594 printk(KERN_INFO "%s\n", igb_copyright);
595
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700596#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700597 dca_register_notify(&dca_notifier);
598#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800599 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800600 return ret;
601}
602
603module_init(igb_init_module);
604
605/**
606 * igb_exit_module - Driver Exit Cleanup Routine
607 *
608 * igb_exit_module is called just before the driver is removed
609 * from memory.
610 **/
611static void __exit igb_exit_module(void)
612{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700613#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700614 dca_unregister_notify(&dca_notifier);
615#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800616 pci_unregister_driver(&igb_driver);
617}
618
619module_exit(igb_exit_module);
620
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800621#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
622/**
623 * igb_cache_ring_register - Descriptor ring to register mapping
624 * @adapter: board private structure to initialize
625 *
626 * Once we know the feature-set enabled for the device, we'll cache
627 * the register offset the descriptor ring is assigned to.
628 **/
629static void igb_cache_ring_register(struct igb_adapter *adapter)
630{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000631 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000632 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800633
634 switch (adapter->hw.mac.type) {
635 case e1000_82576:
636 /* The queues are allocated for virtualization such that VF 0
637 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
638 * In order to avoid collision we start at the first free queue
639 * and continue consuming queues in the same sequence
640 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000641 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000642 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000643 adapter->rx_ring[i]->reg_idx = rbase_offset +
644 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000645 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800646 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000647 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000648 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800649 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000650 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000651 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000652 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000653 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800654 break;
655 }
656}
657
Alexander Duyck047e0032009-10-27 15:49:27 +0000658static void igb_free_queues(struct igb_adapter *adapter)
659{
Alexander Duyck3025a442010-02-17 01:02:39 +0000660 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000661
Alexander Duyck3025a442010-02-17 01:02:39 +0000662 for (i = 0; i < adapter->num_tx_queues; i++) {
663 kfree(adapter->tx_ring[i]);
664 adapter->tx_ring[i] = NULL;
665 }
666 for (i = 0; i < adapter->num_rx_queues; i++) {
667 kfree(adapter->rx_ring[i]);
668 adapter->rx_ring[i] = NULL;
669 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000670 adapter->num_rx_queues = 0;
671 adapter->num_tx_queues = 0;
672}
673
Auke Kok9d5c8242008-01-24 02:22:38 -0800674/**
675 * igb_alloc_queues - Allocate memory for all rings
676 * @adapter: board private structure to initialize
677 *
678 * We allocate one ring per queue at run-time since we don't know the
679 * number of queues at compile-time.
680 **/
681static int igb_alloc_queues(struct igb_adapter *adapter)
682{
Alexander Duyck3025a442010-02-17 01:02:39 +0000683 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800684 int i;
685
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700686 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000687 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
688 if (!ring)
689 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800690 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700691 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000692 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000693 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000694 /* For 82575, context index must be unique per ring. */
695 if (adapter->hw.mac.type == e1000_82575)
696 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000697 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700698 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000699
Auke Kok9d5c8242008-01-24 02:22:38 -0800700 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
702 if (!ring)
703 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800704 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700705 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000706 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000707 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000708 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
709 /* set flag indicating ring supports SCTP checksum offload */
710 if (adapter->hw.mac.type >= e1000_82576)
711 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000712 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800713 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800714
715 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000716
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800718
Alexander Duyck047e0032009-10-27 15:49:27 +0000719err:
720 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700721
Alexander Duyck047e0032009-10-27 15:49:27 +0000722 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700723}
724
Auke Kok9d5c8242008-01-24 02:22:38 -0800725#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000726static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800727{
728 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000729 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800730 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700731 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000732 int rx_queue = IGB_N0_QUEUE;
733 int tx_queue = IGB_N0_QUEUE;
734
735 if (q_vector->rx_ring)
736 rx_queue = q_vector->rx_ring->reg_idx;
737 if (q_vector->tx_ring)
738 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700739
740 switch (hw->mac.type) {
741 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800742 /* The 82575 assigns vectors using a bitmask, which matches the
743 bitmask for the EICR/EIMS/EIMC registers. To assign one
744 or more queues to a vector, we write the appropriate bits
745 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000746 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000748 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800749 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000750 if (!adapter->msix_entries && msix_vector == 0)
751 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800752 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000753 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700754 break;
755 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800756 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700757 Each queue has a single entry in the table to which we write
758 a vector number along with a "valid" bit. Sadly, the layout
759 of the table is somewhat counterintuitive. */
760 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000761 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700762 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800764 /* vector goes into low byte of register */
765 ivar = ivar & 0xFFFFFF00;
766 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767 } else {
768 /* vector goes into third byte of register */
769 ivar = ivar & 0xFF00FFFF;
770 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700771 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700772 array_wr32(E1000_IVAR0, index, ivar);
773 }
774 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000775 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700776 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000777 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800778 /* vector goes into second byte of register */
779 ivar = ivar & 0xFFFF00FF;
780 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000781 } else {
782 /* vector goes into high byte of register */
783 ivar = ivar & 0x00FFFFFF;
784 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700785 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700786 array_wr32(E1000_IVAR0, index, ivar);
787 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000788 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700789 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000790 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000791 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +0000792 /* 82580 uses the same table-based approach as 82576 but has fewer
793 entries as a result we carry over for queues greater than 4. */
794 if (rx_queue > IGB_N0_QUEUE) {
795 index = (rx_queue >> 1);
796 ivar = array_rd32(E1000_IVAR0, index);
797 if (rx_queue & 0x1) {
798 /* vector goes into third byte of register */
799 ivar = ivar & 0xFF00FFFF;
800 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
801 } else {
802 /* vector goes into low byte of register */
803 ivar = ivar & 0xFFFFFF00;
804 ivar |= msix_vector | E1000_IVAR_VALID;
805 }
806 array_wr32(E1000_IVAR0, index, ivar);
807 }
808 if (tx_queue > IGB_N0_QUEUE) {
809 index = (tx_queue >> 1);
810 ivar = array_rd32(E1000_IVAR0, index);
811 if (tx_queue & 0x1) {
812 /* vector goes into high byte of register */
813 ivar = ivar & 0x00FFFFFF;
814 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
815 } else {
816 /* vector goes into second byte of register */
817 ivar = ivar & 0xFFFF00FF;
818 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
819 }
820 array_wr32(E1000_IVAR0, index, ivar);
821 }
822 q_vector->eims_value = 1 << msix_vector;
823 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700824 default:
825 BUG();
826 break;
827 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000828
829 /* add q_vector eims value to global eims_enable_mask */
830 adapter->eims_enable_mask |= q_vector->eims_value;
831
832 /* configure q_vector to set itr on first interrupt */
833 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800834}
835
836/**
837 * igb_configure_msix - Configure MSI-X hardware
838 *
839 * igb_configure_msix sets up the hardware to properly
840 * generate MSI-X interrupts.
841 **/
842static void igb_configure_msix(struct igb_adapter *adapter)
843{
844 u32 tmp;
845 int i, vector = 0;
846 struct e1000_hw *hw = &adapter->hw;
847
848 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800849
850 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700851 switch (hw->mac.type) {
852 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800853 tmp = rd32(E1000_CTRL_EXT);
854 /* enable MSI-X PBA support*/
855 tmp |= E1000_CTRL_EXT_PBA_CLR;
856
857 /* Auto-Mask interrupts upon ICR read. */
858 tmp |= E1000_CTRL_EXT_EIAME;
859 tmp |= E1000_CTRL_EXT_IRCA;
860
861 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000862
863 /* enable msix_other interrupt */
864 array_wr32(E1000_MSIXBM(0), vector++,
865 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700866 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800867
Alexander Duyck2d064c02008-07-08 15:10:12 -0700868 break;
869
870 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000871 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000872 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000873 /* Turn on MSI-X capability first, or our settings
874 * won't stick. And it will take days to debug. */
875 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
876 E1000_GPIE_PBA | E1000_GPIE_EIAME |
877 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700878
Alexander Duyck047e0032009-10-27 15:49:27 +0000879 /* enable msix_other interrupt */
880 adapter->eims_other = 1 << vector;
881 tmp = (vector++ | E1000_IVAR_VALID) << 8;
882
883 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700884 break;
885 default:
886 /* do nothing, since nothing else supports MSI-X */
887 break;
888 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000889
890 adapter->eims_enable_mask |= adapter->eims_other;
891
Alexander Duyck26b39272010-02-17 01:00:41 +0000892 for (i = 0; i < adapter->num_q_vectors; i++)
893 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000894
Auke Kok9d5c8242008-01-24 02:22:38 -0800895 wrfl();
896}
897
898/**
899 * igb_request_msix - Initialize MSI-X interrupts
900 *
901 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
902 * kernel.
903 **/
904static int igb_request_msix(struct igb_adapter *adapter)
905{
906 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000907 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800908 int i, err = 0, vector = 0;
909
Auke Kok9d5c8242008-01-24 02:22:38 -0800910 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800911 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800912 if (err)
913 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000914 vector++;
915
916 for (i = 0; i < adapter->num_q_vectors; i++) {
917 struct igb_q_vector *q_vector = adapter->q_vector[i];
918
919 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
920
921 if (q_vector->rx_ring && q_vector->tx_ring)
922 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
923 q_vector->rx_ring->queue_index);
924 else if (q_vector->tx_ring)
925 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
926 q_vector->tx_ring->queue_index);
927 else if (q_vector->rx_ring)
928 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
929 q_vector->rx_ring->queue_index);
930 else
931 sprintf(q_vector->name, "%s-unused", netdev->name);
932
933 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800934 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000935 q_vector);
936 if (err)
937 goto out;
938 vector++;
939 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800940
Auke Kok9d5c8242008-01-24 02:22:38 -0800941 igb_configure_msix(adapter);
942 return 0;
943out:
944 return err;
945}
946
947static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
948{
949 if (adapter->msix_entries) {
950 pci_disable_msix(adapter->pdev);
951 kfree(adapter->msix_entries);
952 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000953 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800954 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800956}
957
Alexander Duyck047e0032009-10-27 15:49:27 +0000958/**
959 * igb_free_q_vectors - Free memory allocated for interrupt vectors
960 * @adapter: board private structure to initialize
961 *
962 * This function frees the memory allocated to the q_vectors. In addition if
963 * NAPI is enabled it will delete any references to the NAPI struct prior
964 * to freeing the q_vector.
965 **/
966static void igb_free_q_vectors(struct igb_adapter *adapter)
967{
968 int v_idx;
969
970 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
971 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
972 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000973 if (!q_vector)
974 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000975 netif_napi_del(&q_vector->napi);
976 kfree(q_vector);
977 }
978 adapter->num_q_vectors = 0;
979}
980
981/**
982 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
983 *
984 * This function resets the device so that it has 0 rx queues, tx queues, and
985 * MSI-X interrupts allocated.
986 */
987static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
988{
989 igb_free_queues(adapter);
990 igb_free_q_vectors(adapter);
991 igb_reset_interrupt_capability(adapter);
992}
Auke Kok9d5c8242008-01-24 02:22:38 -0800993
994/**
995 * igb_set_interrupt_capability - set MSI or MSI-X if supported
996 *
997 * Attempt to configure interrupts using the best available
998 * capabilities of the hardware and kernel.
999 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001000static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001001{
1002 int err;
1003 int numvecs, i;
1004
Alexander Duyck83b71802009-02-06 23:15:45 +00001005 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001006 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001007 if (adapter->vfs_allocated_count)
1008 adapter->num_tx_queues = 1;
1009 else
1010 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001011
Alexander Duyck047e0032009-10-27 15:49:27 +00001012 /* start with one vector for every rx queue */
1013 numvecs = adapter->num_rx_queues;
1014
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001015 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001016 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1017 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001018
1019 /* store the number of vectors reserved for queues */
1020 adapter->num_q_vectors = numvecs;
1021
1022 /* add 1 vector for link status interrupts */
1023 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001024 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1025 GFP_KERNEL);
1026 if (!adapter->msix_entries)
1027 goto msi_only;
1028
1029 for (i = 0; i < numvecs; i++)
1030 adapter->msix_entries[i].entry = i;
1031
1032 err = pci_enable_msix(adapter->pdev,
1033 adapter->msix_entries,
1034 numvecs);
1035 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001036 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001037
1038 igb_reset_interrupt_capability(adapter);
1039
1040 /* If we can't do MSI-X, try MSI */
1041msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001042#ifdef CONFIG_PCI_IOV
1043 /* disable SR-IOV for non MSI-X configurations */
1044 if (adapter->vf_data) {
1045 struct e1000_hw *hw = &adapter->hw;
1046 /* disable iov and allow time for transactions to clear */
1047 pci_disable_sriov(adapter->pdev);
1048 msleep(500);
1049
1050 kfree(adapter->vf_data);
1051 adapter->vf_data = NULL;
1052 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00001053 wrfl();
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001054 msleep(100);
1055 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1056 }
1057#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001058 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001059 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001060 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001061 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001062 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001063 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001064 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001065 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001066out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001067 /* Notify the stack of the (possibly) reduced queue counts. */
1068 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1069 return netif_set_real_num_rx_queues(adapter->netdev,
1070 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001071}
1072
1073/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001074 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1075 * @adapter: board private structure to initialize
1076 *
1077 * We allocate one q_vector per queue interrupt. If allocation fails we
1078 * return -ENOMEM.
1079 **/
1080static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1081{
1082 struct igb_q_vector *q_vector;
1083 struct e1000_hw *hw = &adapter->hw;
1084 int v_idx;
1085
1086 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1087 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1088 if (!q_vector)
1089 goto err_out;
1090 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001091 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1092 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001093 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1094 adapter->q_vector[v_idx] = q_vector;
1095 }
1096 return 0;
1097
1098err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001099 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001100 return -ENOMEM;
1101}
1102
1103static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1104 int ring_idx, int v_idx)
1105{
Alexander Duyck3025a442010-02-17 01:02:39 +00001106 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001107
Alexander Duyck3025a442010-02-17 01:02:39 +00001108 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001109 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001110 q_vector->itr_val = adapter->rx_itr_setting;
1111 if (q_vector->itr_val && q_vector->itr_val <= 3)
1112 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001113}
1114
1115static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1116 int ring_idx, int v_idx)
1117{
Alexander Duyck3025a442010-02-17 01:02:39 +00001118 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001119
Alexander Duyck3025a442010-02-17 01:02:39 +00001120 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001121 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001122 q_vector->itr_val = adapter->tx_itr_setting;
Alexander Duyck13fde972011-10-05 13:35:24 +00001123 q_vector->tx_work_limit = adapter->tx_work_limit;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001124 if (q_vector->itr_val && q_vector->itr_val <= 3)
1125 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001126}
1127
1128/**
1129 * igb_map_ring_to_vector - maps allocated queues to vectors
1130 *
1131 * This function maps the recently allocated queues to vectors.
1132 **/
1133static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1134{
1135 int i;
1136 int v_idx = 0;
1137
1138 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1139 (adapter->num_q_vectors < adapter->num_tx_queues))
1140 return -ENOMEM;
1141
1142 if (adapter->num_q_vectors >=
1143 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1144 for (i = 0; i < adapter->num_rx_queues; i++)
1145 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1146 for (i = 0; i < adapter->num_tx_queues; i++)
1147 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1148 } else {
1149 for (i = 0; i < adapter->num_rx_queues; i++) {
1150 if (i < adapter->num_tx_queues)
1151 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1152 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1153 }
1154 for (; i < adapter->num_tx_queues; i++)
1155 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1156 }
1157 return 0;
1158}
1159
1160/**
1161 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1162 *
1163 * This function initializes the interrupts and allocates all of the queues.
1164 **/
1165static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1166{
1167 struct pci_dev *pdev = adapter->pdev;
1168 int err;
1169
Ben Hutchings21adef32010-09-27 08:28:39 +00001170 err = igb_set_interrupt_capability(adapter);
1171 if (err)
1172 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001173
1174 err = igb_alloc_q_vectors(adapter);
1175 if (err) {
1176 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1177 goto err_alloc_q_vectors;
1178 }
1179
1180 err = igb_alloc_queues(adapter);
1181 if (err) {
1182 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1183 goto err_alloc_queues;
1184 }
1185
1186 err = igb_map_ring_to_vector(adapter);
1187 if (err) {
1188 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1189 goto err_map_queues;
1190 }
1191
1192
1193 return 0;
1194err_map_queues:
1195 igb_free_queues(adapter);
1196err_alloc_queues:
1197 igb_free_q_vectors(adapter);
1198err_alloc_q_vectors:
1199 igb_reset_interrupt_capability(adapter);
1200 return err;
1201}
1202
1203/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001204 * igb_request_irq - initialize interrupts
1205 *
1206 * Attempts to configure interrupts using the best available
1207 * capabilities of the hardware and kernel.
1208 **/
1209static int igb_request_irq(struct igb_adapter *adapter)
1210{
1211 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001212 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001213 int err = 0;
1214
1215 if (adapter->msix_entries) {
1216 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001217 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001218 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001219 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001220 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001221 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001222 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001223 igb_free_all_tx_resources(adapter);
1224 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001225 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001226 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001227 adapter->num_q_vectors = 1;
1228 err = igb_alloc_q_vectors(adapter);
1229 if (err) {
1230 dev_err(&pdev->dev,
1231 "Unable to allocate memory for vectors\n");
1232 goto request_done;
1233 }
1234 err = igb_alloc_queues(adapter);
1235 if (err) {
1236 dev_err(&pdev->dev,
1237 "Unable to allocate memory for queues\n");
1238 igb_free_q_vectors(adapter);
1239 goto request_done;
1240 }
1241 igb_setup_all_tx_resources(adapter);
1242 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001243 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001244 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001245 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001246
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001247 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001248 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001249 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001250 if (!err)
1251 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001252
Auke Kok9d5c8242008-01-24 02:22:38 -08001253 /* fall back to legacy interrupts */
1254 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001255 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001256 }
1257
Joe Perchesa0607fd2009-11-18 23:29:17 -08001258 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001259 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001260
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001261 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001262 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1263 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001264
1265request_done:
1266 return err;
1267}
1268
1269static void igb_free_irq(struct igb_adapter *adapter)
1270{
Auke Kok9d5c8242008-01-24 02:22:38 -08001271 if (adapter->msix_entries) {
1272 int vector = 0, i;
1273
Alexander Duyck047e0032009-10-27 15:49:27 +00001274 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001275
Alexander Duyck047e0032009-10-27 15:49:27 +00001276 for (i = 0; i < adapter->num_q_vectors; i++) {
1277 struct igb_q_vector *q_vector = adapter->q_vector[i];
1278 free_irq(adapter->msix_entries[vector++].vector,
1279 q_vector);
1280 }
1281 } else {
1282 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001283 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001284}
1285
1286/**
1287 * igb_irq_disable - Mask off interrupt generation on the NIC
1288 * @adapter: board private structure
1289 **/
1290static void igb_irq_disable(struct igb_adapter *adapter)
1291{
1292 struct e1000_hw *hw = &adapter->hw;
1293
Alexander Duyck25568a52009-10-27 23:49:59 +00001294 /*
1295 * we need to be careful when disabling interrupts. The VFs are also
1296 * mapped into these registers and so clearing the bits can cause
1297 * issues on the VF drivers so we only need to clear what we set
1298 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001299 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001300 u32 regval = rd32(E1000_EIAM);
1301 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1302 wr32(E1000_EIMC, adapter->eims_enable_mask);
1303 regval = rd32(E1000_EIAC);
1304 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001305 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001306
1307 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001308 wr32(E1000_IMC, ~0);
1309 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001310 if (adapter->msix_entries) {
1311 int i;
1312 for (i = 0; i < adapter->num_q_vectors; i++)
1313 synchronize_irq(adapter->msix_entries[i].vector);
1314 } else {
1315 synchronize_irq(adapter->pdev->irq);
1316 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001317}
1318
1319/**
1320 * igb_irq_enable - Enable default interrupt generation settings
1321 * @adapter: board private structure
1322 **/
1323static void igb_irq_enable(struct igb_adapter *adapter)
1324{
1325 struct e1000_hw *hw = &adapter->hw;
1326
1327 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001328 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001329 u32 regval = rd32(E1000_EIAC);
1330 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1331 regval = rd32(E1000_EIAM);
1332 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001333 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001334 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001335 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001336 ims |= E1000_IMS_VMMB;
1337 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001338 if (adapter->hw.mac.type == e1000_82580)
1339 ims |= E1000_IMS_DRSTA;
1340
Alexander Duyck25568a52009-10-27 23:49:59 +00001341 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001342 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001343 wr32(E1000_IMS, IMS_ENABLE_MASK |
1344 E1000_IMS_DRSTA);
1345 wr32(E1000_IAM, IMS_ENABLE_MASK |
1346 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001347 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001348}
1349
1350static void igb_update_mng_vlan(struct igb_adapter *adapter)
1351{
Alexander Duyck51466232009-10-27 23:47:35 +00001352 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001353 u16 vid = adapter->hw.mng_cookie.vlan_id;
1354 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001355
Alexander Duyck51466232009-10-27 23:47:35 +00001356 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1357 /* add VID to filter table */
1358 igb_vfta_set(hw, vid, true);
1359 adapter->mng_vlan_id = vid;
1360 } else {
1361 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1362 }
1363
1364 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1365 (vid != old_vid) &&
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001366 !test_bit(old_vid, adapter->active_vlans)) {
Alexander Duyck51466232009-10-27 23:47:35 +00001367 /* remove VID from filter table */
1368 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001369 }
1370}
1371
1372/**
1373 * igb_release_hw_control - release control of the h/w to f/w
1374 * @adapter: address of board private structure
1375 *
1376 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1377 * For ASF and Pass Through versions of f/w this means that the
1378 * driver is no longer loaded.
1379 *
1380 **/
1381static void igb_release_hw_control(struct igb_adapter *adapter)
1382{
1383 struct e1000_hw *hw = &adapter->hw;
1384 u32 ctrl_ext;
1385
1386 /* Let firmware take over control of h/w */
1387 ctrl_ext = rd32(E1000_CTRL_EXT);
1388 wr32(E1000_CTRL_EXT,
1389 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1390}
1391
Auke Kok9d5c8242008-01-24 02:22:38 -08001392/**
1393 * igb_get_hw_control - get control of the h/w from f/w
1394 * @adapter: address of board private structure
1395 *
1396 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1397 * For ASF and Pass Through versions of f/w this means that
1398 * the driver is loaded.
1399 *
1400 **/
1401static void igb_get_hw_control(struct igb_adapter *adapter)
1402{
1403 struct e1000_hw *hw = &adapter->hw;
1404 u32 ctrl_ext;
1405
1406 /* Let firmware know the driver has taken over */
1407 ctrl_ext = rd32(E1000_CTRL_EXT);
1408 wr32(E1000_CTRL_EXT,
1409 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1410}
1411
Auke Kok9d5c8242008-01-24 02:22:38 -08001412/**
1413 * igb_configure - configure the hardware for RX and TX
1414 * @adapter: private board structure
1415 **/
1416static void igb_configure(struct igb_adapter *adapter)
1417{
1418 struct net_device *netdev = adapter->netdev;
1419 int i;
1420
1421 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001422 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001423
1424 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001425
Alexander Duyck85b430b2009-10-27 15:50:29 +00001426 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001427 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001428 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001429
1430 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001431 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001432
1433 igb_rx_fifo_flush_82575(&adapter->hw);
1434
Alexander Duyckc493ea42009-03-20 00:16:50 +00001435 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001436 * at least 1 descriptor unused to make sure
1437 * next_to_use != next_to_clean */
1438 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001439 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckcd392f52011-08-26 07:43:59 +00001440 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001441 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001442}
1443
Nick Nunley88a268c2010-02-17 01:01:59 +00001444/**
1445 * igb_power_up_link - Power up the phy/serdes link
1446 * @adapter: address of board private structure
1447 **/
1448void igb_power_up_link(struct igb_adapter *adapter)
1449{
1450 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1451 igb_power_up_phy_copper(&adapter->hw);
1452 else
1453 igb_power_up_serdes_link_82575(&adapter->hw);
1454}
1455
1456/**
1457 * igb_power_down_link - Power down the phy/serdes link
1458 * @adapter: address of board private structure
1459 */
1460static void igb_power_down_link(struct igb_adapter *adapter)
1461{
1462 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1463 igb_power_down_phy_copper_82575(&adapter->hw);
1464 else
1465 igb_shutdown_serdes_link_82575(&adapter->hw);
1466}
Auke Kok9d5c8242008-01-24 02:22:38 -08001467
1468/**
1469 * igb_up - Open the interface and prepare it to handle traffic
1470 * @adapter: board private structure
1471 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001472int igb_up(struct igb_adapter *adapter)
1473{
1474 struct e1000_hw *hw = &adapter->hw;
1475 int i;
1476
1477 /* hardware has been reset, we need to reload some things */
1478 igb_configure(adapter);
1479
1480 clear_bit(__IGB_DOWN, &adapter->state);
1481
Alexander Duyck047e0032009-10-27 15:49:27 +00001482 for (i = 0; i < adapter->num_q_vectors; i++) {
1483 struct igb_q_vector *q_vector = adapter->q_vector[i];
1484 napi_enable(&q_vector->napi);
1485 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001486 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001487 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001488 else
1489 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001490
1491 /* Clear any pending interrupts. */
1492 rd32(E1000_ICR);
1493 igb_irq_enable(adapter);
1494
Alexander Duyckd4960302009-10-27 15:53:45 +00001495 /* notify VFs that reset has been completed */
1496 if (adapter->vfs_allocated_count) {
1497 u32 reg_data = rd32(E1000_CTRL_EXT);
1498 reg_data |= E1000_CTRL_EXT_PFRSTD;
1499 wr32(E1000_CTRL_EXT, reg_data);
1500 }
1501
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001502 netif_tx_start_all_queues(adapter->netdev);
1503
Alexander Duyck25568a52009-10-27 23:49:59 +00001504 /* start the watchdog. */
1505 hw->mac.get_link_status = 1;
1506 schedule_work(&adapter->watchdog_task);
1507
Auke Kok9d5c8242008-01-24 02:22:38 -08001508 return 0;
1509}
1510
1511void igb_down(struct igb_adapter *adapter)
1512{
Auke Kok9d5c8242008-01-24 02:22:38 -08001513 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001514 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001515 u32 tctl, rctl;
1516 int i;
1517
1518 /* signal that we're down so the interrupt handler does not
1519 * reschedule our watchdog timer */
1520 set_bit(__IGB_DOWN, &adapter->state);
1521
1522 /* disable receives in the hardware */
1523 rctl = rd32(E1000_RCTL);
1524 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1525 /* flush and sleep below */
1526
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001527 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001528
1529 /* disable transmits in the hardware */
1530 tctl = rd32(E1000_TCTL);
1531 tctl &= ~E1000_TCTL_EN;
1532 wr32(E1000_TCTL, tctl);
1533 /* flush both disables and wait for them to finish */
1534 wrfl();
1535 msleep(10);
1536
Alexander Duyck047e0032009-10-27 15:49:27 +00001537 for (i = 0; i < adapter->num_q_vectors; i++) {
1538 struct igb_q_vector *q_vector = adapter->q_vector[i];
1539 napi_disable(&q_vector->napi);
1540 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001541
Auke Kok9d5c8242008-01-24 02:22:38 -08001542 igb_irq_disable(adapter);
1543
1544 del_timer_sync(&adapter->watchdog_timer);
1545 del_timer_sync(&adapter->phy_info_timer);
1546
Auke Kok9d5c8242008-01-24 02:22:38 -08001547 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001548
1549 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001550 spin_lock(&adapter->stats64_lock);
1551 igb_update_stats(adapter, &adapter->stats64);
1552 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001553
Auke Kok9d5c8242008-01-24 02:22:38 -08001554 adapter->link_speed = 0;
1555 adapter->link_duplex = 0;
1556
Jeff Kirsher30236822008-06-24 17:01:15 -07001557 if (!pci_channel_offline(adapter->pdev))
1558 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001559 igb_clean_all_tx_rings(adapter);
1560 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001561#ifdef CONFIG_IGB_DCA
1562
1563 /* since we reset the hardware DCA settings were cleared */
1564 igb_setup_dca(adapter);
1565#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001566}
1567
1568void igb_reinit_locked(struct igb_adapter *adapter)
1569{
1570 WARN_ON(in_interrupt());
1571 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1572 msleep(1);
1573 igb_down(adapter);
1574 igb_up(adapter);
1575 clear_bit(__IGB_RESETTING, &adapter->state);
1576}
1577
1578void igb_reset(struct igb_adapter *adapter)
1579{
Alexander Duyck090b1792009-10-27 23:51:55 +00001580 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001581 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001582 struct e1000_mac_info *mac = &hw->mac;
1583 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001584 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1585 u16 hwm;
1586
1587 /* Repartition Pba for greater than 9k mtu
1588 * To take effect CTRL.RST is required.
1589 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001590 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001591 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001592 case e1000_82580:
1593 pba = rd32(E1000_RXPBS);
1594 pba = igb_rxpbs_adjust_82580(pba);
1595 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001596 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001597 pba = rd32(E1000_RXPBS);
1598 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001599 break;
1600 case e1000_82575:
1601 default:
1602 pba = E1000_PBA_34K;
1603 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001604 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001605
Alexander Duyck2d064c02008-07-08 15:10:12 -07001606 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1607 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001608 /* adjust PBA for jumbo frames */
1609 wr32(E1000_PBA, pba);
1610
1611 /* To maintain wire speed transmits, the Tx FIFO should be
1612 * large enough to accommodate two full transmit packets,
1613 * rounded up to the next 1KB and expressed in KB. Likewise,
1614 * the Rx FIFO should be large enough to accommodate at least
1615 * one full receive packet and is similarly rounded up and
1616 * expressed in KB. */
1617 pba = rd32(E1000_PBA);
1618 /* upper 16 bits has Tx packet buffer allocation size in KB */
1619 tx_space = pba >> 16;
1620 /* lower 16 bits has Rx packet buffer allocation size in KB */
1621 pba &= 0xffff;
1622 /* the tx fifo also stores 16 bytes of information about the tx
1623 * but don't include ethernet FCS because hardware appends it */
1624 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001625 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001626 ETH_FCS_LEN) * 2;
1627 min_tx_space = ALIGN(min_tx_space, 1024);
1628 min_tx_space >>= 10;
1629 /* software strips receive CRC, so leave room for it */
1630 min_rx_space = adapter->max_frame_size;
1631 min_rx_space = ALIGN(min_rx_space, 1024);
1632 min_rx_space >>= 10;
1633
1634 /* If current Tx allocation is less than the min Tx FIFO size,
1635 * and the min Tx FIFO size is less than the current Rx FIFO
1636 * allocation, take space away from current Rx allocation */
1637 if (tx_space < min_tx_space &&
1638 ((min_tx_space - tx_space) < pba)) {
1639 pba = pba - (min_tx_space - tx_space);
1640
1641 /* if short on rx space, rx wins and must trump tx
1642 * adjustment */
1643 if (pba < min_rx_space)
1644 pba = min_rx_space;
1645 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001646 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001647 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001648
1649 /* flow control settings */
1650 /* The high water mark must be low enough to fit one full frame
1651 * (or the size used for early receive) above it in the Rx FIFO.
1652 * Set it to the lower of:
1653 * - 90% of the Rx FIFO size, or
1654 * - the full Rx FIFO size minus one full frame */
1655 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001656 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001657
Alexander Duyckd405ea32009-12-23 13:21:27 +00001658 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1659 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001660 fc->pause_time = 0xFFFF;
1661 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001662 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001663
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001664 /* disable receive for all VFs and wait one second */
1665 if (adapter->vfs_allocated_count) {
1666 int i;
1667 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001668 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001669
1670 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001671 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001672
1673 /* disable transmits and receives */
1674 wr32(E1000_VFRE, 0);
1675 wr32(E1000_VFTE, 0);
1676 }
1677
Auke Kok9d5c8242008-01-24 02:22:38 -08001678 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001679 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001680 wr32(E1000_WUC, 0);
1681
Alexander Duyck330a6d62009-10-27 23:51:35 +00001682 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001683 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001684 if (hw->mac.type > e1000_82580) {
1685 if (adapter->flags & IGB_FLAG_DMAC) {
1686 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001687
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001688 /*
1689 * DMA Coalescing high water mark needs to be higher
1690 * than * the * Rx threshold. The Rx threshold is
1691 * currently * pba - 6, so we * should use a high water
1692 * mark of pba * - 4. */
1693 hwm = (pba - 4) << 10;
1694
1695 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1696 & E1000_DMACR_DMACTHR_MASK);
1697
1698 /* transition to L0x or L1 if available..*/
1699 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1700
1701 /* watchdog timer= +-1000 usec in 32usec intervals */
1702 reg |= (1000 >> 5);
1703 wr32(E1000_DMACR, reg);
1704
1705 /* no lower threshold to disable coalescing(smart fifb)
1706 * -UTRESH=0*/
1707 wr32(E1000_DMCRTRH, 0);
1708
1709 /* set hwm to PBA - 2 * max frame size */
1710 wr32(E1000_FCRTC, hwm);
1711
1712 /*
1713 * This sets the time to wait before requesting tran-
1714 * sition to * low power state to number of usecs needed
1715 * to receive 1 512 * byte frame at gigabit line rate
1716 */
1717 reg = rd32(E1000_DMCTLX);
1718 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1719
1720 /* Delay 255 usec before entering Lx state. */
1721 reg |= 0xFF;
1722 wr32(E1000_DMCTLX, reg);
1723
1724 /* free space in Tx packet buffer to wake from DMAC */
1725 wr32(E1000_DMCTXTH,
1726 (IGB_MIN_TXPBSIZE -
1727 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1728 >> 6);
1729
1730 /* make low power state decision controlled by DMAC */
1731 reg = rd32(E1000_PCIEMISC);
1732 reg |= E1000_PCIEMISC_LX_DECISION;
1733 wr32(E1000_PCIEMISC, reg);
1734 } /* end if IGB_FLAG_DMAC set */
1735 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001736 if (hw->mac.type == e1000_82580) {
1737 u32 reg = rd32(E1000_PCIEMISC);
1738 wr32(E1000_PCIEMISC,
1739 reg & ~E1000_PCIEMISC_LX_DECISION);
1740 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001741 if (!netif_running(adapter->netdev))
1742 igb_power_down_link(adapter);
1743
Auke Kok9d5c8242008-01-24 02:22:38 -08001744 igb_update_mng_vlan(adapter);
1745
1746 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1747 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1748
Alexander Duyck330a6d62009-10-27 23:51:35 +00001749 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001750}
1751
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001752static u32 igb_fix_features(struct net_device *netdev, u32 features)
1753{
1754 /*
1755 * Since there is no support for separate rx/tx vlan accel
1756 * enable/disable make sure tx flag is always in same state as rx.
1757 */
1758 if (features & NETIF_F_HW_VLAN_RX)
1759 features |= NETIF_F_HW_VLAN_TX;
1760 else
1761 features &= ~NETIF_F_HW_VLAN_TX;
1762
1763 return features;
1764}
1765
Michał Mirosławac52caa2011-06-08 08:38:01 +00001766static int igb_set_features(struct net_device *netdev, u32 features)
1767{
1768 struct igb_adapter *adapter = netdev_priv(netdev);
1769 int i;
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001770 u32 changed = netdev->features ^ features;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001771
1772 for (i = 0; i < adapter->num_rx_queues; i++) {
1773 if (features & NETIF_F_RXCSUM)
1774 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
1775 else
1776 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
1777 }
1778
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001779 if (changed & NETIF_F_HW_VLAN_RX)
1780 igb_vlan_mode(netdev, features);
1781
Michał Mirosławac52caa2011-06-08 08:38:01 +00001782 return 0;
1783}
1784
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001785static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001786 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001787 .ndo_stop = igb_close,
Alexander Duyckcd392f52011-08-26 07:43:59 +00001788 .ndo_start_xmit = igb_xmit_frame,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001789 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001790 .ndo_set_rx_mode = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001791 .ndo_set_mac_address = igb_set_mac,
1792 .ndo_change_mtu = igb_change_mtu,
1793 .ndo_do_ioctl = igb_ioctl,
1794 .ndo_tx_timeout = igb_tx_timeout,
1795 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001796 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1797 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001798 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1799 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1800 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1801 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001802#ifdef CONFIG_NET_POLL_CONTROLLER
1803 .ndo_poll_controller = igb_netpoll,
1804#endif
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001805 .ndo_fix_features = igb_fix_features,
1806 .ndo_set_features = igb_set_features,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001807};
1808
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001809/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001810 * igb_probe - Device Initialization Routine
1811 * @pdev: PCI device information struct
1812 * @ent: entry in igb_pci_tbl
1813 *
1814 * Returns 0 on success, negative on failure
1815 *
1816 * igb_probe initializes an adapter identified by a pci_dev structure.
1817 * The OS initialization, configuring of the adapter private structure,
1818 * and a hardware reset occur.
1819 **/
1820static int __devinit igb_probe(struct pci_dev *pdev,
1821 const struct pci_device_id *ent)
1822{
1823 struct net_device *netdev;
1824 struct igb_adapter *adapter;
1825 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001826 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001827 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001828 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001829 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1830 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001831 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001832 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001833 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001834
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001835 /* Catch broken hardware that put the wrong VF device ID in
1836 * the PCIe SR-IOV capability.
1837 */
1838 if (pdev->is_virtfn) {
1839 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1840 pci_name(pdev), pdev->vendor, pdev->device);
1841 return -EINVAL;
1842 }
1843
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001844 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001845 if (err)
1846 return err;
1847
1848 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001849 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001850 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001851 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001852 if (!err)
1853 pci_using_dac = 1;
1854 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001855 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001856 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001857 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001858 if (err) {
1859 dev_err(&pdev->dev, "No usable DMA "
1860 "configuration, aborting\n");
1861 goto err_dma;
1862 }
1863 }
1864 }
1865
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001866 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1867 IORESOURCE_MEM),
1868 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001869 if (err)
1870 goto err_pci_reg;
1871
Frans Pop19d5afd2009-10-02 10:04:12 -07001872 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001873
Auke Kok9d5c8242008-01-24 02:22:38 -08001874 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001875 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001876
1877 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001878 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00001879 IGB_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001880 if (!netdev)
1881 goto err_alloc_etherdev;
1882
1883 SET_NETDEV_DEV(netdev, &pdev->dev);
1884
1885 pci_set_drvdata(pdev, netdev);
1886 adapter = netdev_priv(netdev);
1887 adapter->netdev = netdev;
1888 adapter->pdev = pdev;
1889 hw = &adapter->hw;
1890 hw->back = adapter;
1891 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1892
1893 mmio_start = pci_resource_start(pdev, 0);
1894 mmio_len = pci_resource_len(pdev, 0);
1895
1896 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001897 hw->hw_addr = ioremap(mmio_start, mmio_len);
1898 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001899 goto err_ioremap;
1900
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001901 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001902 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001903 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001904
1905 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1906
1907 netdev->mem_start = mmio_start;
1908 netdev->mem_end = mmio_start + mmio_len;
1909
Auke Kok9d5c8242008-01-24 02:22:38 -08001910 /* PCI config space info */
1911 hw->vendor_id = pdev->vendor;
1912 hw->device_id = pdev->device;
1913 hw->revision_id = pdev->revision;
1914 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1915 hw->subsystem_device_id = pdev->subsystem_device;
1916
Auke Kok9d5c8242008-01-24 02:22:38 -08001917 /* Copy the default MAC, PHY and NVM function pointers */
1918 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1919 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1920 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1921 /* Initialize skew-specific constants */
1922 err = ei->get_invariants(hw);
1923 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001924 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001925
Alexander Duyck450c87c2009-02-06 23:22:11 +00001926 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001927 err = igb_sw_init(adapter);
1928 if (err)
1929 goto err_sw_init;
1930
1931 igb_get_bus_info_pcie(hw);
1932
1933 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001934
1935 /* Copper options */
1936 if (hw->phy.media_type == e1000_media_type_copper) {
1937 hw->phy.mdix = AUTO_ALL_MODES;
1938 hw->phy.disable_polarity_correction = false;
1939 hw->phy.ms_type = e1000_ms_hw_default;
1940 }
1941
1942 if (igb_check_reset_block(hw))
1943 dev_info(&pdev->dev,
1944 "PHY reset is blocked due to SOL/IDER session.\n");
1945
Michał Mirosławac52caa2011-06-08 08:38:01 +00001946 netdev->hw_features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001947 NETIF_F_IP_CSUM |
Michał Mirosławac52caa2011-06-08 08:38:01 +00001948 NETIF_F_IPV6_CSUM |
1949 NETIF_F_TSO |
1950 NETIF_F_TSO6 |
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00001951 NETIF_F_RXCSUM |
1952 NETIF_F_HW_VLAN_RX;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001953
1954 netdev->features = netdev->hw_features |
Auke Kok9d5c8242008-01-24 02:22:38 -08001955 NETIF_F_HW_VLAN_TX |
Auke Kok9d5c8242008-01-24 02:22:38 -08001956 NETIF_F_HW_VLAN_FILTER;
1957
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001958 netdev->vlan_features |= NETIF_F_TSO;
1959 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001960 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001961 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001962 netdev->vlan_features |= NETIF_F_SG;
1963
Yi Zou7b872a52010-09-22 17:57:58 +00001964 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001965 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001966 netdev->vlan_features |= NETIF_F_HIGHDMA;
1967 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001968
Michał Mirosławac52caa2011-06-08 08:38:01 +00001969 if (hw->mac.type >= e1000_82576) {
1970 netdev->hw_features |= NETIF_F_SCTP_CSUM;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001971 netdev->features |= NETIF_F_SCTP_CSUM;
Michał Mirosławac52caa2011-06-08 08:38:01 +00001972 }
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001973
Jiri Pirko01789342011-08-16 06:29:00 +00001974 netdev->priv_flags |= IFF_UNICAST_FLT;
1975
Alexander Duyck330a6d62009-10-27 23:51:35 +00001976 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001977
1978 /* before reading the NVM, reset the controller to put the device in a
1979 * known good starting state */
1980 hw->mac.ops.reset_hw(hw);
1981
1982 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001983 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001984 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1985 err = -EIO;
1986 goto err_eeprom;
1987 }
1988
1989 /* copy the MAC address out of the NVM */
1990 if (hw->mac.ops.read_mac_addr(hw))
1991 dev_err(&pdev->dev, "NVM Read Error\n");
1992
1993 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1994 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1995
1996 if (!is_valid_ether_addr(netdev->perm_addr)) {
1997 dev_err(&pdev->dev, "Invalid MAC Address\n");
1998 err = -EIO;
1999 goto err_eeprom;
2000 }
2001
Joe Perchesc061b182010-08-23 18:20:03 +00002002 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00002003 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00002004 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00002005 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002006
2007 INIT_WORK(&adapter->reset_task, igb_reset_task);
2008 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2009
Alexander Duyck450c87c2009-02-06 23:22:11 +00002010 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08002011 adapter->fc_autoneg = true;
2012 hw->mac.autoneg = true;
2013 hw->phy.autoneg_advertised = 0x2f;
2014
Alexander Duyck0cce1192009-07-23 18:10:24 +00002015 hw->fc.requested_mode = e1000_fc_default;
2016 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08002017
Auke Kok9d5c8242008-01-24 02:22:38 -08002018 igb_validate_mdi_setting(hw);
2019
Auke Kok9d5c8242008-01-24 02:22:38 -08002020 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2021 * enable the ACPI Magic Packet filter
2022 */
2023
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002024 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00002025 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Carolyn Wyborny6d337dc2011-07-07 00:24:56 +00002026 else if (hw->mac.type >= e1000_82580)
Alexander Duyck55cac242009-11-19 12:42:21 +00002027 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2028 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2029 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00002030 else if (hw->bus.func == 1)
2031 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08002032
2033 if (eeprom_data & eeprom_apme_mask)
2034 adapter->eeprom_wol |= E1000_WUFC_MAG;
2035
2036 /* now that we have the eeprom settings, apply the special cases where
2037 * the eeprom may be wrong or the board simply won't support wake on
2038 * lan on a particular port */
2039 switch (pdev->device) {
2040 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2041 adapter->eeprom_wol = 0;
2042 break;
2043 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002044 case E1000_DEV_ID_82576_FIBER:
2045 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002046 /* Wake events only supported on port A for dual fiber
2047 * regardless of eeprom setting */
2048 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2049 adapter->eeprom_wol = 0;
2050 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002051 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002052 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002053 /* if quad port adapter, disable WoL on all but port A */
2054 if (global_quad_port_a != 0)
2055 adapter->eeprom_wol = 0;
2056 else
2057 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2058 /* Reset for multiple quad port adapters */
2059 if (++global_quad_port_a == 4)
2060 global_quad_port_a = 0;
2061 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002062 }
2063
2064 /* initialize the wol settings based on the eeprom settings */
2065 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002066 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002067
2068 /* reset the hardware with the new settings */
2069 igb_reset(adapter);
2070
2071 /* let the f/w know that the h/w is now under the control of the
2072 * driver. */
2073 igb_get_hw_control(adapter);
2074
Auke Kok9d5c8242008-01-24 02:22:38 -08002075 strcpy(netdev->name, "eth%d");
2076 err = register_netdev(netdev);
2077 if (err)
2078 goto err_register;
2079
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00002080 igb_vlan_mode(netdev, netdev->features);
2081
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002082 /* carrier off reporting is important to ethtool even BEFORE open */
2083 netif_carrier_off(netdev);
2084
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002085#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002086 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002087 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002088 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002089 igb_setup_dca(adapter);
2090 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002091
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002092#endif
Anders Berggren673b8b72011-02-04 07:32:32 +00002093 /* do hw tstamp init after resetting */
2094 igb_init_hw_timer(adapter);
2095
Auke Kok9d5c8242008-01-24 02:22:38 -08002096 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2097 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002098 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002099 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002100 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002101 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002102 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002103 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2104 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2105 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2106 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002107 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002108
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002109 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2110 if (ret_val)
2111 strcpy(part_str, "Unknown");
2112 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002113 dev_info(&pdev->dev,
2114 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2115 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002116 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002117 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002118 switch (hw->mac.type) {
2119 case e1000_i350:
2120 igb_set_eee_i350(hw);
2121 break;
2122 default:
2123 break;
2124 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002125 return 0;
2126
2127err_register:
2128 igb_release_hw_control(adapter);
2129err_eeprom:
2130 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002131 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002132
2133 if (hw->flash_address)
2134 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002135err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002136 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002137 iounmap(hw->hw_addr);
2138err_ioremap:
2139 free_netdev(netdev);
2140err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002141 pci_release_selected_regions(pdev,
2142 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002143err_pci_reg:
2144err_dma:
2145 pci_disable_device(pdev);
2146 return err;
2147}
2148
2149/**
2150 * igb_remove - Device Removal Routine
2151 * @pdev: PCI device information struct
2152 *
2153 * igb_remove is called by the PCI subsystem to alert the driver
2154 * that it should release a PCI device. The could be caused by a
2155 * Hot-Plug event, or because the driver is going to be removed from
2156 * memory.
2157 **/
2158static void __devexit igb_remove(struct pci_dev *pdev)
2159{
2160 struct net_device *netdev = pci_get_drvdata(pdev);
2161 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002162 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002163
Tejun Heo760141a2010-12-12 16:45:14 +01002164 /*
2165 * The watchdog timer may be rescheduled, so explicitly
2166 * disable watchdog from being rescheduled.
2167 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002168 set_bit(__IGB_DOWN, &adapter->state);
2169 del_timer_sync(&adapter->watchdog_timer);
2170 del_timer_sync(&adapter->phy_info_timer);
2171
Tejun Heo760141a2010-12-12 16:45:14 +01002172 cancel_work_sync(&adapter->reset_task);
2173 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002174
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002175#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002176 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002177 dev_info(&pdev->dev, "DCA disabled\n");
2178 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002179 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002180 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002181 }
2182#endif
2183
Auke Kok9d5c8242008-01-24 02:22:38 -08002184 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2185 * would have already happened in close and is redundant. */
2186 igb_release_hw_control(adapter);
2187
2188 unregister_netdev(netdev);
2189
Alexander Duyck047e0032009-10-27 15:49:27 +00002190 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002191
Alexander Duyck37680112009-02-19 20:40:30 -08002192#ifdef CONFIG_PCI_IOV
2193 /* reclaim resources allocated to VFs */
2194 if (adapter->vf_data) {
2195 /* disable iov and allow time for transactions to clear */
2196 pci_disable_sriov(pdev);
2197 msleep(500);
2198
2199 kfree(adapter->vf_data);
2200 adapter->vf_data = NULL;
2201 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
Jesse Brandeburg945a5152011-07-20 00:56:21 +00002202 wrfl();
Alexander Duyck37680112009-02-19 20:40:30 -08002203 msleep(100);
2204 dev_info(&pdev->dev, "IOV Disabled\n");
2205 }
2206#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002207
Alexander Duyck28b07592009-02-06 23:20:31 +00002208 iounmap(hw->hw_addr);
2209 if (hw->flash_address)
2210 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002211 pci_release_selected_regions(pdev,
2212 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002213
2214 free_netdev(netdev);
2215
Frans Pop19d5afd2009-10-02 10:04:12 -07002216 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002217
Auke Kok9d5c8242008-01-24 02:22:38 -08002218 pci_disable_device(pdev);
2219}
2220
2221/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002222 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2223 * @adapter: board private structure to initialize
2224 *
2225 * This function initializes the vf specific data storage and then attempts to
2226 * allocate the VFs. The reason for ordering it this way is because it is much
2227 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2228 * the memory for the VFs.
2229 **/
2230static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2231{
2232#ifdef CONFIG_PCI_IOV
2233 struct pci_dev *pdev = adapter->pdev;
2234
Alexander Duycka6b623e2009-10-27 23:47:53 +00002235 if (adapter->vfs_allocated_count) {
2236 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2237 sizeof(struct vf_data_storage),
2238 GFP_KERNEL);
2239 /* if allocation failed then we do not support SR-IOV */
2240 if (!adapter->vf_data) {
2241 adapter->vfs_allocated_count = 0;
2242 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2243 "Data Storage\n");
2244 }
2245 }
2246
2247 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2248 kfree(adapter->vf_data);
2249 adapter->vf_data = NULL;
2250#endif /* CONFIG_PCI_IOV */
2251 adapter->vfs_allocated_count = 0;
2252#ifdef CONFIG_PCI_IOV
2253 } else {
2254 unsigned char mac_addr[ETH_ALEN];
2255 int i;
2256 dev_info(&pdev->dev, "%d vfs allocated\n",
2257 adapter->vfs_allocated_count);
2258 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2259 random_ether_addr(mac_addr);
2260 igb_set_vf_mac(adapter, i, mac_addr);
2261 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002262 /* DMA Coalescing is not supported in IOV mode. */
2263 if (adapter->flags & IGB_FLAG_DMAC)
2264 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002265 }
2266#endif /* CONFIG_PCI_IOV */
2267}
2268
Alexander Duyck115f4592009-11-12 18:37:00 +00002269
2270/**
2271 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2272 * @adapter: board private structure to initialize
2273 *
2274 * igb_init_hw_timer initializes the function pointer and values for the hw
2275 * timer found in hardware.
2276 **/
2277static void igb_init_hw_timer(struct igb_adapter *adapter)
2278{
2279 struct e1000_hw *hw = &adapter->hw;
2280
2281 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002282 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002283 case e1000_82580:
2284 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2285 adapter->cycles.read = igb_read_clock;
2286 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2287 adapter->cycles.mult = 1;
2288 /*
2289 * The 82580 timesync updates the system timer every 8ns by 8ns
2290 * and the value cannot be shifted. Instead we need to shift
2291 * the registers to generate a 64bit timer value. As a result
2292 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2293 * 24 in order to generate a larger value for synchronization.
2294 */
2295 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2296 /* disable system timer temporarily by setting bit 31 */
2297 wr32(E1000_TSAUXC, 0x80000000);
2298 wrfl();
2299
2300 /* Set registers so that rollover occurs soon to test this. */
2301 wr32(E1000_SYSTIMR, 0x00000000);
2302 wr32(E1000_SYSTIML, 0x80000000);
2303 wr32(E1000_SYSTIMH, 0x000000FF);
2304 wrfl();
2305
2306 /* enable system timer by clearing bit 31 */
2307 wr32(E1000_TSAUXC, 0x0);
2308 wrfl();
2309
2310 timecounter_init(&adapter->clock,
2311 &adapter->cycles,
2312 ktime_to_ns(ktime_get_real()));
2313 /*
2314 * Synchronize our NIC clock against system wall clock. NIC
2315 * time stamp reading requires ~3us per sample, each sample
2316 * was pretty stable even under load => only require 10
2317 * samples for each offset comparison.
2318 */
2319 memset(&adapter->compare, 0, sizeof(adapter->compare));
2320 adapter->compare.source = &adapter->clock;
2321 adapter->compare.target = ktime_get_real;
2322 adapter->compare.num_samples = 10;
2323 timecompare_update(&adapter->compare, 0);
2324 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002325 case e1000_82576:
2326 /*
2327 * Initialize hardware timer: we keep it running just in case
2328 * that some program needs it later on.
2329 */
2330 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2331 adapter->cycles.read = igb_read_clock;
2332 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2333 adapter->cycles.mult = 1;
2334 /**
2335 * Scale the NIC clock cycle by a large factor so that
2336 * relatively small clock corrections can be added or
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002337 * subtracted at each clock tick. The drawbacks of a large
Alexander Duyck115f4592009-11-12 18:37:00 +00002338 * factor are a) that the clock register overflows more quickly
2339 * (not such a big deal) and b) that the increment per tick has
2340 * to fit into 24 bits. As a result we need to use a shift of
2341 * 19 so we can fit a value of 16 into the TIMINCA register.
2342 */
2343 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2344 wr32(E1000_TIMINCA,
2345 (1 << E1000_TIMINCA_16NS_SHIFT) |
2346 (16 << IGB_82576_TSYNC_SHIFT));
2347
2348 /* Set registers so that rollover occurs soon to test this. */
2349 wr32(E1000_SYSTIML, 0x00000000);
2350 wr32(E1000_SYSTIMH, 0xFF800000);
2351 wrfl();
2352
2353 timecounter_init(&adapter->clock,
2354 &adapter->cycles,
2355 ktime_to_ns(ktime_get_real()));
2356 /*
2357 * Synchronize our NIC clock against system wall clock. NIC
2358 * time stamp reading requires ~3us per sample, each sample
2359 * was pretty stable even under load => only require 10
2360 * samples for each offset comparison.
2361 */
2362 memset(&adapter->compare, 0, sizeof(adapter->compare));
2363 adapter->compare.source = &adapter->clock;
2364 adapter->compare.target = ktime_get_real;
2365 adapter->compare.num_samples = 10;
2366 timecompare_update(&adapter->compare, 0);
2367 break;
2368 case e1000_82575:
2369 /* 82575 does not support timesync */
2370 default:
2371 break;
2372 }
2373
2374}
2375
Alexander Duycka6b623e2009-10-27 23:47:53 +00002376/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002377 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2378 * @adapter: board private structure to initialize
2379 *
2380 * igb_sw_init initializes the Adapter private data structure.
2381 * Fields are initialized based on PCI device information and
2382 * OS network device settings (MTU size).
2383 **/
2384static int __devinit igb_sw_init(struct igb_adapter *adapter)
2385{
2386 struct e1000_hw *hw = &adapter->hw;
2387 struct net_device *netdev = adapter->netdev;
2388 struct pci_dev *pdev = adapter->pdev;
2389
2390 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2391
Alexander Duyck13fde972011-10-05 13:35:24 +00002392 /* set default ring sizes */
Alexander Duyck68fd9912008-11-20 00:48:10 -08002393 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2394 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck13fde972011-10-05 13:35:24 +00002395
2396 /* set default ITR values */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002397 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2398 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2399
Alexander Duyck13fde972011-10-05 13:35:24 +00002400 /* set default work limits */
2401 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2402
Alexander Duyck153285f2011-08-26 07:43:32 +00002403 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2404 VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08002405 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2406
Eric Dumazet12dcd862010-10-15 17:27:10 +00002407 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002408#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002409 switch (hw->mac.type) {
2410 case e1000_82576:
2411 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002412 if (max_vfs > 7) {
2413 dev_warn(&pdev->dev,
2414 "Maximum of 7 VFs per PF, using max\n");
2415 adapter->vfs_allocated_count = 7;
2416 } else
2417 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002418 break;
2419 default:
2420 break;
2421 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002422#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002423 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
Williams, Mitch A665c8c82011-06-07 14:22:57 -07002424 /* i350 cannot do RSS and SR-IOV at the same time */
2425 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2426 adapter->rss_queues = 1;
Alexander Duycka99955f2009-11-12 18:37:19 +00002427
2428 /*
2429 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2430 * then we should combine the queues into a queue pair in order to
2431 * conserve interrupts due to limited supply
2432 */
2433 if ((adapter->rss_queues > 4) ||
2434 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2435 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2436
Alexander Duycka6b623e2009-10-27 23:47:53 +00002437 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002438 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002439 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2440 return -ENOMEM;
2441 }
2442
Alexander Duycka6b623e2009-10-27 23:47:53 +00002443 igb_probe_vfs(adapter);
2444
Auke Kok9d5c8242008-01-24 02:22:38 -08002445 /* Explicitly disable IRQ since the NIC can be in any state. */
2446 igb_irq_disable(adapter);
2447
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002448 if (hw->mac.type == e1000_i350)
2449 adapter->flags &= ~IGB_FLAG_DMAC;
2450
Auke Kok9d5c8242008-01-24 02:22:38 -08002451 set_bit(__IGB_DOWN, &adapter->state);
2452 return 0;
2453}
2454
2455/**
2456 * igb_open - Called when a network interface is made active
2457 * @netdev: network interface device structure
2458 *
2459 * Returns 0 on success, negative value on failure
2460 *
2461 * The open entry point is called when a network interface is made
2462 * active by the system (IFF_UP). At this point all resources needed
2463 * for transmit and receive operations are allocated, the interrupt
2464 * handler is registered with the OS, the watchdog timer is started,
2465 * and the stack is notified that the interface is ready.
2466 **/
2467static int igb_open(struct net_device *netdev)
2468{
2469 struct igb_adapter *adapter = netdev_priv(netdev);
2470 struct e1000_hw *hw = &adapter->hw;
2471 int err;
2472 int i;
2473
2474 /* disallow open during test */
2475 if (test_bit(__IGB_TESTING, &adapter->state))
2476 return -EBUSY;
2477
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002478 netif_carrier_off(netdev);
2479
Auke Kok9d5c8242008-01-24 02:22:38 -08002480 /* allocate transmit descriptors */
2481 err = igb_setup_all_tx_resources(adapter);
2482 if (err)
2483 goto err_setup_tx;
2484
2485 /* allocate receive descriptors */
2486 err = igb_setup_all_rx_resources(adapter);
2487 if (err)
2488 goto err_setup_rx;
2489
Nick Nunley88a268c2010-02-17 01:01:59 +00002490 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002491
Auke Kok9d5c8242008-01-24 02:22:38 -08002492 /* before we allocate an interrupt, we must be ready to handle it.
2493 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2494 * as soon as we call pci_request_irq, so we have to setup our
2495 * clean_rx handler before we do so. */
2496 igb_configure(adapter);
2497
2498 err = igb_request_irq(adapter);
2499 if (err)
2500 goto err_req_irq;
2501
2502 /* From here on the code is the same as igb_up() */
2503 clear_bit(__IGB_DOWN, &adapter->state);
2504
Alexander Duyck047e0032009-10-27 15:49:27 +00002505 for (i = 0; i < adapter->num_q_vectors; i++) {
2506 struct igb_q_vector *q_vector = adapter->q_vector[i];
2507 napi_enable(&q_vector->napi);
2508 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002509
2510 /* Clear any pending interrupts. */
2511 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002512
2513 igb_irq_enable(adapter);
2514
Alexander Duyckd4960302009-10-27 15:53:45 +00002515 /* notify VFs that reset has been completed */
2516 if (adapter->vfs_allocated_count) {
2517 u32 reg_data = rd32(E1000_CTRL_EXT);
2518 reg_data |= E1000_CTRL_EXT_PFRSTD;
2519 wr32(E1000_CTRL_EXT, reg_data);
2520 }
2521
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002522 netif_tx_start_all_queues(netdev);
2523
Alexander Duyck25568a52009-10-27 23:49:59 +00002524 /* start the watchdog. */
2525 hw->mac.get_link_status = 1;
2526 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002527
2528 return 0;
2529
2530err_req_irq:
2531 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002532 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002533 igb_free_all_rx_resources(adapter);
2534err_setup_rx:
2535 igb_free_all_tx_resources(adapter);
2536err_setup_tx:
2537 igb_reset(adapter);
2538
2539 return err;
2540}
2541
2542/**
2543 * igb_close - Disables a network interface
2544 * @netdev: network interface device structure
2545 *
2546 * Returns 0, this is not allowed to fail
2547 *
2548 * The close entry point is called when an interface is de-activated
2549 * by the OS. The hardware is still under the driver's control, but
2550 * needs to be disabled. A global MAC reset is issued to stop the
2551 * hardware, and all transmit and receive resources are freed.
2552 **/
2553static int igb_close(struct net_device *netdev)
2554{
2555 struct igb_adapter *adapter = netdev_priv(netdev);
2556
2557 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2558 igb_down(adapter);
2559
2560 igb_free_irq(adapter);
2561
2562 igb_free_all_tx_resources(adapter);
2563 igb_free_all_rx_resources(adapter);
2564
Auke Kok9d5c8242008-01-24 02:22:38 -08002565 return 0;
2566}
2567
2568/**
2569 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002570 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2571 *
2572 * Return 0 on success, negative on failure
2573 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002574int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002575{
Alexander Duyck59d71982010-04-27 13:09:25 +00002576 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002577 int size;
2578
2579 size = sizeof(struct igb_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002580 tx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002581 if (!tx_ring->buffer_info)
2582 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002583
2584 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002585 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002586 tx_ring->size = ALIGN(tx_ring->size, 4096);
2587
Alexander Duyck59d71982010-04-27 13:09:25 +00002588 tx_ring->desc = dma_alloc_coherent(dev,
2589 tx_ring->size,
2590 &tx_ring->dma,
2591 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002592
2593 if (!tx_ring->desc)
2594 goto err;
2595
Auke Kok9d5c8242008-01-24 02:22:38 -08002596 tx_ring->next_to_use = 0;
2597 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002598 return 0;
2599
2600err:
2601 vfree(tx_ring->buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002602 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002603 "Unable to allocate memory for the transmit descriptor ring\n");
2604 return -ENOMEM;
2605}
2606
2607/**
2608 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2609 * (Descriptors) for all queues
2610 * @adapter: board private structure
2611 *
2612 * Return 0 on success, negative on failure
2613 **/
2614static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2615{
Alexander Duyck439705e2009-10-27 23:49:20 +00002616 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002617 int i, err = 0;
2618
2619 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002620 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002621 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002622 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002623 "Allocation for Tx Queue %u failed\n", i);
2624 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002625 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002626 break;
2627 }
2628 }
2629
2630 return err;
2631}
2632
2633/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002634 * igb_setup_tctl - configure the transmit control registers
2635 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002636 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002637void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002638{
Auke Kok9d5c8242008-01-24 02:22:38 -08002639 struct e1000_hw *hw = &adapter->hw;
2640 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002641
Alexander Duyck85b430b2009-10-27 15:50:29 +00002642 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2643 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002644
2645 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002646 tctl = rd32(E1000_TCTL);
2647 tctl &= ~E1000_TCTL_CT;
2648 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2649 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2650
2651 igb_config_collision_dist(hw);
2652
Auke Kok9d5c8242008-01-24 02:22:38 -08002653 /* Enable transmits */
2654 tctl |= E1000_TCTL_EN;
2655
2656 wr32(E1000_TCTL, tctl);
2657}
2658
2659/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002660 * igb_configure_tx_ring - Configure transmit ring after Reset
2661 * @adapter: board private structure
2662 * @ring: tx ring to configure
2663 *
2664 * Configure a transmit ring after a reset.
2665 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002666void igb_configure_tx_ring(struct igb_adapter *adapter,
2667 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002668{
2669 struct e1000_hw *hw = &adapter->hw;
Alexander Duycka74420e2011-08-26 07:43:27 +00002670 u32 txdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002671 u64 tdba = ring->dma;
2672 int reg_idx = ring->reg_idx;
2673
2674 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00002675 wr32(E1000_TXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002676 wrfl();
2677 mdelay(10);
2678
2679 wr32(E1000_TDLEN(reg_idx),
2680 ring->count * sizeof(union e1000_adv_tx_desc));
2681 wr32(E1000_TDBAL(reg_idx),
2682 tdba & 0x00000000ffffffffULL);
2683 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2684
Alexander Duyckfce99e32009-10-27 15:51:27 +00002685 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00002686 wr32(E1000_TDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00002687 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002688
2689 txdctl |= IGB_TX_PTHRESH;
2690 txdctl |= IGB_TX_HTHRESH << 8;
2691 txdctl |= IGB_TX_WTHRESH << 16;
2692
2693 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2694 wr32(E1000_TXDCTL(reg_idx), txdctl);
2695}
2696
2697/**
2698 * igb_configure_tx - Configure transmit Unit after Reset
2699 * @adapter: board private structure
2700 *
2701 * Configure the Tx unit of the MAC after a reset.
2702 **/
2703static void igb_configure_tx(struct igb_adapter *adapter)
2704{
2705 int i;
2706
2707 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002708 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002709}
2710
2711/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002712 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002713 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2714 *
2715 * Returns 0 on success, negative on failure
2716 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002717int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002718{
Alexander Duyck59d71982010-04-27 13:09:25 +00002719 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 int size, desc_len;
2721
2722 size = sizeof(struct igb_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002723 rx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002724 if (!rx_ring->buffer_info)
2725 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002726
2727 desc_len = sizeof(union e1000_adv_rx_desc);
2728
2729 /* Round up to nearest 4K */
2730 rx_ring->size = rx_ring->count * desc_len;
2731 rx_ring->size = ALIGN(rx_ring->size, 4096);
2732
Alexander Duyck59d71982010-04-27 13:09:25 +00002733 rx_ring->desc = dma_alloc_coherent(dev,
2734 rx_ring->size,
2735 &rx_ring->dma,
2736 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002737
2738 if (!rx_ring->desc)
2739 goto err;
2740
2741 rx_ring->next_to_clean = 0;
2742 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002743
Auke Kok9d5c8242008-01-24 02:22:38 -08002744 return 0;
2745
2746err:
2747 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002748 rx_ring->buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002749 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2750 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002751 return -ENOMEM;
2752}
2753
2754/**
2755 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2756 * (Descriptors) for all queues
2757 * @adapter: board private structure
2758 *
2759 * Return 0 on success, negative on failure
2760 **/
2761static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2762{
Alexander Duyck439705e2009-10-27 23:49:20 +00002763 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002764 int i, err = 0;
2765
2766 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002767 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002768 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002769 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002770 "Allocation for Rx Queue %u failed\n", i);
2771 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002772 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002773 break;
2774 }
2775 }
2776
2777 return err;
2778}
2779
2780/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002781 * igb_setup_mrqc - configure the multiple receive queue control registers
2782 * @adapter: Board private structure
2783 **/
2784static void igb_setup_mrqc(struct igb_adapter *adapter)
2785{
2786 struct e1000_hw *hw = &adapter->hw;
2787 u32 mrqc, rxcsum;
2788 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2789 union e1000_reta {
2790 u32 dword;
2791 u8 bytes[4];
2792 } reta;
2793 static const u8 rsshash[40] = {
2794 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2795 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2796 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2797 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2798
2799 /* Fill out hash function seeds */
2800 for (j = 0; j < 10; j++) {
2801 u32 rsskey = rsshash[(j * 4)];
2802 rsskey |= rsshash[(j * 4) + 1] << 8;
2803 rsskey |= rsshash[(j * 4) + 2] << 16;
2804 rsskey |= rsshash[(j * 4) + 3] << 24;
2805 array_wr32(E1000_RSSRK(0), j, rsskey);
2806 }
2807
Alexander Duycka99955f2009-11-12 18:37:19 +00002808 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002809
2810 if (adapter->vfs_allocated_count) {
2811 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2812 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002813 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002814 case e1000_82580:
2815 num_rx_queues = 1;
2816 shift = 0;
2817 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002818 case e1000_82576:
2819 shift = 3;
2820 num_rx_queues = 2;
2821 break;
2822 case e1000_82575:
2823 shift = 2;
2824 shift2 = 6;
2825 default:
2826 break;
2827 }
2828 } else {
2829 if (hw->mac.type == e1000_82575)
2830 shift = 6;
2831 }
2832
2833 for (j = 0; j < (32 * 4); j++) {
2834 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2835 if (shift2)
2836 reta.bytes[j & 3] |= num_rx_queues << shift2;
2837 if ((j & 3) == 3)
2838 wr32(E1000_RETA(j >> 2), reta.dword);
2839 }
2840
2841 /*
2842 * Disable raw packet checksumming so that RSS hash is placed in
2843 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2844 * offloads as they are enabled by default
2845 */
2846 rxcsum = rd32(E1000_RXCSUM);
2847 rxcsum |= E1000_RXCSUM_PCSD;
2848
2849 if (adapter->hw.mac.type >= e1000_82576)
2850 /* Enable Receive Checksum Offload for SCTP */
2851 rxcsum |= E1000_RXCSUM_CRCOFL;
2852
2853 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2854 wr32(E1000_RXCSUM, rxcsum);
2855
2856 /* If VMDq is enabled then we set the appropriate mode for that, else
2857 * we default to RSS so that an RSS hash is calculated per packet even
2858 * if we are only using one queue */
2859 if (adapter->vfs_allocated_count) {
2860 if (hw->mac.type > e1000_82575) {
2861 /* Set the default pool for the PF's first queue */
2862 u32 vtctl = rd32(E1000_VT_CTL);
2863 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2864 E1000_VT_CTL_DISABLE_DEF_POOL);
2865 vtctl |= adapter->vfs_allocated_count <<
2866 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2867 wr32(E1000_VT_CTL, vtctl);
2868 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002869 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002870 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2871 else
2872 mrqc = E1000_MRQC_ENABLE_VMDQ;
2873 } else {
2874 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2875 }
2876 igb_vmm_control(adapter);
2877
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002878 /*
2879 * Generate RSS hash based on TCP port numbers and/or
2880 * IPv4/v6 src and dst addresses since UDP cannot be
2881 * hashed reliably due to IP fragmentation
2882 */
2883 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2884 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2885 E1000_MRQC_RSS_FIELD_IPV6 |
2886 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2887 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002888
2889 wr32(E1000_MRQC, mrqc);
2890}
2891
2892/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002893 * igb_setup_rctl - configure the receive control registers
2894 * @adapter: Board private structure
2895 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002896void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002897{
2898 struct e1000_hw *hw = &adapter->hw;
2899 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002900
2901 rctl = rd32(E1000_RCTL);
2902
2903 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002904 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002905
Alexander Duyck69d728b2008-11-25 01:04:03 -08002906 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002907 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002908
Auke Kok87cb7e82008-07-08 15:08:29 -07002909 /*
2910 * enable stripping of CRC. It's unlikely this will break BMC
2911 * redirection as it did with e1000. Newer features require
2912 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002913 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002914 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002915
Alexander Duyck559e9c42009-10-27 23:52:50 +00002916 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002917 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002918
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002919 /* enable LPE to prevent packets larger than max_frame_size */
2920 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002921
Alexander Duyck952f72a2009-10-27 15:51:07 +00002922 /* disable queue 0 to prevent tail write w/o re-config */
2923 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002924
Alexander Duycke1739522009-02-19 20:39:44 -08002925 /* Attention!!! For SR-IOV PF driver operations you must enable
2926 * queue drop for all VF and PF queues to prevent head of line blocking
2927 * if an un-trusted VF does not provide descriptors to hardware.
2928 */
2929 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002930 /* set all queue drop enable bits */
2931 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002932 }
2933
Auke Kok9d5c8242008-01-24 02:22:38 -08002934 wr32(E1000_RCTL, rctl);
2935}
2936
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002937static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2938 int vfn)
2939{
2940 struct e1000_hw *hw = &adapter->hw;
2941 u32 vmolr;
2942
2943 /* if it isn't the PF check to see if VFs are enabled and
2944 * increase the size to support vlan tags */
2945 if (vfn < adapter->vfs_allocated_count &&
2946 adapter->vf_data[vfn].vlans_enabled)
2947 size += VLAN_TAG_SIZE;
2948
2949 vmolr = rd32(E1000_VMOLR(vfn));
2950 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2951 vmolr |= size | E1000_VMOLR_LPE;
2952 wr32(E1000_VMOLR(vfn), vmolr);
2953
2954 return 0;
2955}
2956
Auke Kok9d5c8242008-01-24 02:22:38 -08002957/**
Alexander Duycke1739522009-02-19 20:39:44 -08002958 * igb_rlpml_set - set maximum receive packet size
2959 * @adapter: board private structure
2960 *
2961 * Configure maximum receivable packet size.
2962 **/
2963static void igb_rlpml_set(struct igb_adapter *adapter)
2964{
Alexander Duyck153285f2011-08-26 07:43:32 +00002965 u32 max_frame_size = adapter->max_frame_size;
Alexander Duycke1739522009-02-19 20:39:44 -08002966 struct e1000_hw *hw = &adapter->hw;
2967 u16 pf_id = adapter->vfs_allocated_count;
2968
Alexander Duycke1739522009-02-19 20:39:44 -08002969 if (pf_id) {
2970 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck153285f2011-08-26 07:43:32 +00002971 /*
2972 * If we're in VMDQ or SR-IOV mode, then set global RLPML
2973 * to our max jumbo frame size, in case we need to enable
2974 * jumbo frames on one of the rings later.
2975 * This will not pass over-length frames into the default
2976 * queue because it's gated by the VMOLR.RLPML.
2977 */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002978 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002979 }
2980
2981 wr32(E1000_RLPML, max_frame_size);
2982}
2983
Williams, Mitch A8151d292010-02-10 01:44:24 +00002984static inline void igb_set_vmolr(struct igb_adapter *adapter,
2985 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002986{
2987 struct e1000_hw *hw = &adapter->hw;
2988 u32 vmolr;
2989
2990 /*
2991 * This register exists only on 82576 and newer so if we are older then
2992 * we should exit and do nothing
2993 */
2994 if (hw->mac.type < e1000_82576)
2995 return;
2996
2997 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002998 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2999 if (aupe)
3000 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3001 else
3002 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003003
3004 /* clear all bits that might not be set */
3005 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3006
Alexander Duycka99955f2009-11-12 18:37:19 +00003007 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003008 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3009 /*
3010 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3011 * multicast packets
3012 */
3013 if (vfn <= adapter->vfs_allocated_count)
3014 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3015
3016 wr32(E1000_VMOLR(vfn), vmolr);
3017}
3018
Alexander Duycke1739522009-02-19 20:39:44 -08003019/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00003020 * igb_configure_rx_ring - Configure a receive ring after Reset
3021 * @adapter: board private structure
3022 * @ring: receive ring to be configured
3023 *
3024 * Configure the Rx unit of the MAC after a reset.
3025 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00003026void igb_configure_rx_ring(struct igb_adapter *adapter,
3027 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00003028{
3029 struct e1000_hw *hw = &adapter->hw;
3030 u64 rdba = ring->dma;
3031 int reg_idx = ring->reg_idx;
Alexander Duycka74420e2011-08-26 07:43:27 +00003032 u32 srrctl = 0, rxdctl = 0;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003033
3034 /* disable the queue */
Alexander Duycka74420e2011-08-26 07:43:27 +00003035 wr32(E1000_RXDCTL(reg_idx), 0);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003036
3037 /* Set DMA base address registers */
3038 wr32(E1000_RDBAL(reg_idx),
3039 rdba & 0x00000000ffffffffULL);
3040 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3041 wr32(E1000_RDLEN(reg_idx),
3042 ring->count * sizeof(union e1000_adv_rx_desc));
3043
3044 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00003045 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
Alexander Duycka74420e2011-08-26 07:43:27 +00003046 wr32(E1000_RDH(reg_idx), 0);
Alexander Duyckfce99e32009-10-27 15:51:27 +00003047 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003048
Alexander Duyck952f72a2009-10-27 15:51:07 +00003049 /* set descriptor configuration */
Alexander Duyck44390ca2011-08-26 07:43:38 +00003050 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003051#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
Alexander Duyck44390ca2011-08-26 07:43:38 +00003052 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003053#else
Alexander Duyck44390ca2011-08-26 07:43:38 +00003054 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003055#endif
Alexander Duyck44390ca2011-08-26 07:43:38 +00003056 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Nick Nunley757b77e2010-03-26 11:36:47 +00003057 if (hw->mac.type == e1000_82580)
3058 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003059 /* Only set Drop Enable if we are supporting multiple queues */
3060 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3061 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003062
3063 wr32(E1000_SRRCTL(reg_idx), srrctl);
3064
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003065 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003066 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003067
Alexander Duyck85b430b2009-10-27 15:50:29 +00003068 rxdctl |= IGB_RX_PTHRESH;
3069 rxdctl |= IGB_RX_HTHRESH << 8;
3070 rxdctl |= IGB_RX_WTHRESH << 16;
Alexander Duycka74420e2011-08-26 07:43:27 +00003071
3072 /* enable receive descriptor fetching */
3073 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
Alexander Duyck85b430b2009-10-27 15:50:29 +00003074 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3075}
3076
3077/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003078 * igb_configure_rx - Configure receive Unit after Reset
3079 * @adapter: board private structure
3080 *
3081 * Configure the Rx unit of the MAC after a reset.
3082 **/
3083static void igb_configure_rx(struct igb_adapter *adapter)
3084{
Hannes Eder91075842009-02-18 19:36:04 -08003085 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003086
Alexander Duyck68d480c2009-10-05 06:33:08 +00003087 /* set UTA to appropriate mode */
3088 igb_set_uta(adapter);
3089
Alexander Duyck26ad9172009-10-05 06:32:49 +00003090 /* set the correct pool for the PF default MAC address in entry 0 */
3091 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3092 adapter->vfs_allocated_count);
3093
Alexander Duyck06cf2662009-10-27 15:53:25 +00003094 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3095 * the Base and Length of the Rx Descriptor Ring */
3096 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003097 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003098}
3099
3100/**
3101 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003102 * @tx_ring: Tx descriptor ring for a specific queue
3103 *
3104 * Free all transmit software resources
3105 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003106void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003107{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003108 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003109
3110 vfree(tx_ring->buffer_info);
3111 tx_ring->buffer_info = NULL;
3112
Alexander Duyck439705e2009-10-27 23:49:20 +00003113 /* if not set, then don't free */
3114 if (!tx_ring->desc)
3115 return;
3116
Alexander Duyck59d71982010-04-27 13:09:25 +00003117 dma_free_coherent(tx_ring->dev, tx_ring->size,
3118 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003119
3120 tx_ring->desc = NULL;
3121}
3122
3123/**
3124 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3125 * @adapter: board private structure
3126 *
3127 * Free all transmit software resources
3128 **/
3129static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3130{
3131 int i;
3132
3133 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003134 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003135}
3136
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003137void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
3138 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003139{
Alexander Duyck6366ad32009-12-02 16:47:18 +00003140 if (buffer_info->dma) {
3141 if (buffer_info->mapped_as_page)
Alexander Duyck59d71982010-04-27 13:09:25 +00003142 dma_unmap_page(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003143 buffer_info->dma,
3144 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003145 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003146 else
Alexander Duyck59d71982010-04-27 13:09:25 +00003147 dma_unmap_single(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003148 buffer_info->dma,
3149 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003150 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003151 buffer_info->dma = 0;
3152 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003153 if (buffer_info->skb) {
3154 dev_kfree_skb_any(buffer_info->skb);
3155 buffer_info->skb = NULL;
3156 }
3157 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003158 buffer_info->length = 0;
3159 buffer_info->next_to_watch = 0;
3160 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08003161}
3162
3163/**
3164 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003165 * @tx_ring: ring to be cleaned
3166 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003167static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003168{
3169 struct igb_buffer *buffer_info;
3170 unsigned long size;
3171 unsigned int i;
3172
3173 if (!tx_ring->buffer_info)
3174 return;
3175 /* Free all the Tx ring sk_buffs */
3176
3177 for (i = 0; i < tx_ring->count; i++) {
3178 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003179 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003180 }
3181
3182 size = sizeof(struct igb_buffer) * tx_ring->count;
3183 memset(tx_ring->buffer_info, 0, size);
3184
3185 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003186 memset(tx_ring->desc, 0, tx_ring->size);
3187
3188 tx_ring->next_to_use = 0;
3189 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003190}
3191
3192/**
3193 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3194 * @adapter: board private structure
3195 **/
3196static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3197{
3198 int i;
3199
3200 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003201 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003202}
3203
3204/**
3205 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003206 * @rx_ring: ring to clean the resources from
3207 *
3208 * Free all receive software resources
3209 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003210void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003211{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003212 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003213
3214 vfree(rx_ring->buffer_info);
3215 rx_ring->buffer_info = NULL;
3216
Alexander Duyck439705e2009-10-27 23:49:20 +00003217 /* if not set, then don't free */
3218 if (!rx_ring->desc)
3219 return;
3220
Alexander Duyck59d71982010-04-27 13:09:25 +00003221 dma_free_coherent(rx_ring->dev, rx_ring->size,
3222 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003223
3224 rx_ring->desc = NULL;
3225}
3226
3227/**
3228 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3229 * @adapter: board private structure
3230 *
3231 * Free all receive software resources
3232 **/
3233static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3234{
3235 int i;
3236
3237 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003238 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003239}
3240
3241/**
3242 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 * @rx_ring: ring to free buffers from
3244 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003245static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003246{
Auke Kok9d5c8242008-01-24 02:22:38 -08003247 unsigned long size;
Alexander Duyckc023cd82011-08-26 07:43:43 +00003248 u16 i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003249
3250 if (!rx_ring->buffer_info)
3251 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003252
Auke Kok9d5c8242008-01-24 02:22:38 -08003253 /* Free all the Rx ring sk_buffs */
3254 for (i = 0; i < rx_ring->count; i++) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00003255 struct igb_buffer *buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08003256 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003257 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003258 buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00003259 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00003260 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003261 buffer_info->dma = 0;
3262 }
3263
3264 if (buffer_info->skb) {
3265 dev_kfree_skb(buffer_info->skb);
3266 buffer_info->skb = NULL;
3267 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003268 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003269 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003270 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003271 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003272 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003273 buffer_info->page_dma = 0;
3274 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003275 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003276 put_page(buffer_info->page);
3277 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003278 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003279 }
3280 }
3281
Auke Kok9d5c8242008-01-24 02:22:38 -08003282 size = sizeof(struct igb_buffer) * rx_ring->count;
3283 memset(rx_ring->buffer_info, 0, size);
3284
3285 /* Zero out the descriptor ring */
3286 memset(rx_ring->desc, 0, rx_ring->size);
3287
3288 rx_ring->next_to_clean = 0;
3289 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003290}
3291
3292/**
3293 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3294 * @adapter: board private structure
3295 **/
3296static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3297{
3298 int i;
3299
3300 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003301 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003302}
3303
3304/**
3305 * igb_set_mac - Change the Ethernet Address of the NIC
3306 * @netdev: network interface device structure
3307 * @p: pointer to an address structure
3308 *
3309 * Returns 0 on success, negative on failure
3310 **/
3311static int igb_set_mac(struct net_device *netdev, void *p)
3312{
3313 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003314 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003315 struct sockaddr *addr = p;
3316
3317 if (!is_valid_ether_addr(addr->sa_data))
3318 return -EADDRNOTAVAIL;
3319
3320 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003321 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003322
Alexander Duyck26ad9172009-10-05 06:32:49 +00003323 /* set the correct pool for the new PF MAC address in entry 0 */
3324 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3325 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003326
Auke Kok9d5c8242008-01-24 02:22:38 -08003327 return 0;
3328}
3329
3330/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003331 * igb_write_mc_addr_list - write multicast addresses to MTA
3332 * @netdev: network interface device structure
3333 *
3334 * Writes multicast address list to the MTA hash table.
3335 * Returns: -ENOMEM on failure
3336 * 0 on no addresses written
3337 * X on writing X addresses to MTA
3338 **/
3339static int igb_write_mc_addr_list(struct net_device *netdev)
3340{
3341 struct igb_adapter *adapter = netdev_priv(netdev);
3342 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003343 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003344 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003345 int i;
3346
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003347 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003348 /* nothing to program, so clear mc list */
3349 igb_update_mc_addr_list(hw, NULL, 0);
3350 igb_restore_vf_multicasts(adapter);
3351 return 0;
3352 }
3353
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003354 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003355 if (!mta_list)
3356 return -ENOMEM;
3357
Alexander Duyck68d480c2009-10-05 06:33:08 +00003358 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003359 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003360 netdev_for_each_mc_addr(ha, netdev)
3361 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003362
Alexander Duyck68d480c2009-10-05 06:33:08 +00003363 igb_update_mc_addr_list(hw, mta_list, i);
3364 kfree(mta_list);
3365
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003366 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003367}
3368
3369/**
3370 * igb_write_uc_addr_list - write unicast addresses to RAR table
3371 * @netdev: network interface device structure
3372 *
3373 * Writes unicast address list to the RAR table.
3374 * Returns: -ENOMEM on failure/insufficient address space
3375 * 0 on no addresses written
3376 * X on writing X addresses to the RAR table
3377 **/
3378static int igb_write_uc_addr_list(struct net_device *netdev)
3379{
3380 struct igb_adapter *adapter = netdev_priv(netdev);
3381 struct e1000_hw *hw = &adapter->hw;
3382 unsigned int vfn = adapter->vfs_allocated_count;
3383 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3384 int count = 0;
3385
3386 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003387 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003388 return -ENOMEM;
3389
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003390 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003391 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003392
3393 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003394 if (!rar_entries)
3395 break;
3396 igb_rar_set_qsel(adapter, ha->addr,
3397 rar_entries--,
3398 vfn);
3399 count++;
3400 }
3401 }
3402 /* write the addresses in reverse order to avoid write combining */
3403 for (; rar_entries > 0 ; rar_entries--) {
3404 wr32(E1000_RAH(rar_entries), 0);
3405 wr32(E1000_RAL(rar_entries), 0);
3406 }
3407 wrfl();
3408
3409 return count;
3410}
3411
3412/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003413 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003414 * @netdev: network interface device structure
3415 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003416 * The set_rx_mode entry point is called whenever the unicast or multicast
3417 * address lists or the network interface flags are updated. This routine is
3418 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003419 * promiscuous mode, and all-multi behavior.
3420 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003421static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003422{
3423 struct igb_adapter *adapter = netdev_priv(netdev);
3424 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003425 unsigned int vfn = adapter->vfs_allocated_count;
3426 u32 rctl, vmolr = 0;
3427 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003428
3429 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003430 rctl = rd32(E1000_RCTL);
3431
Alexander Duyck68d480c2009-10-05 06:33:08 +00003432 /* clear the effected bits */
3433 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3434
Patrick McHardy746b9f02008-07-16 20:15:45 -07003435 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003436 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003437 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003438 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003439 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003440 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003441 vmolr |= E1000_VMOLR_MPME;
3442 } else {
3443 /*
3444 * Write addresses to the MTA, if the attempt fails
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003445 * then we should just turn on promiscuous mode so
Alexander Duyck68d480c2009-10-05 06:33:08 +00003446 * that we can at least receive multicast traffic
3447 */
3448 count = igb_write_mc_addr_list(netdev);
3449 if (count < 0) {
3450 rctl |= E1000_RCTL_MPE;
3451 vmolr |= E1000_VMOLR_MPME;
3452 } else if (count) {
3453 vmolr |= E1000_VMOLR_ROMPE;
3454 }
3455 }
3456 /*
3457 * Write addresses to available RAR registers, if there is not
3458 * sufficient space to store all the addresses then enable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003459 * unicast promiscuous mode
Alexander Duyck68d480c2009-10-05 06:33:08 +00003460 */
3461 count = igb_write_uc_addr_list(netdev);
3462 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003463 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003464 vmolr |= E1000_VMOLR_ROPE;
3465 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003466 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003467 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003468 wr32(E1000_RCTL, rctl);
3469
Alexander Duyck68d480c2009-10-05 06:33:08 +00003470 /*
3471 * In order to support SR-IOV and eventually VMDq it is necessary to set
3472 * the VMOLR to enable the appropriate modes. Without this workaround
3473 * we will have issues with VLAN tag stripping not being done for frames
3474 * that are only arriving because we are the default pool
3475 */
3476 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003477 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003478
Alexander Duyck68d480c2009-10-05 06:33:08 +00003479 vmolr |= rd32(E1000_VMOLR(vfn)) &
3480 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3481 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003482 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003483}
3484
Greg Rose13800462010-11-06 02:08:26 +00003485static void igb_check_wvbr(struct igb_adapter *adapter)
3486{
3487 struct e1000_hw *hw = &adapter->hw;
3488 u32 wvbr = 0;
3489
3490 switch (hw->mac.type) {
3491 case e1000_82576:
3492 case e1000_i350:
3493 if (!(wvbr = rd32(E1000_WVBR)))
3494 return;
3495 break;
3496 default:
3497 break;
3498 }
3499
3500 adapter->wvbr |= wvbr;
3501}
3502
3503#define IGB_STAGGERED_QUEUE_OFFSET 8
3504
3505static void igb_spoof_check(struct igb_adapter *adapter)
3506{
3507 int j;
3508
3509 if (!adapter->wvbr)
3510 return;
3511
3512 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3513 if (adapter->wvbr & (1 << j) ||
3514 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3515 dev_warn(&adapter->pdev->dev,
3516 "Spoof event(s) detected on VF %d\n", j);
3517 adapter->wvbr &=
3518 ~((1 << j) |
3519 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3520 }
3521 }
3522}
3523
Auke Kok9d5c8242008-01-24 02:22:38 -08003524/* Need to wait a few seconds after link up to get diagnostic information from
3525 * the phy */
3526static void igb_update_phy_info(unsigned long data)
3527{
3528 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003529 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003530}
3531
3532/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003533 * igb_has_link - check shared code for link and determine up/down
3534 * @adapter: pointer to driver private info
3535 **/
Nick Nunley31455352010-02-17 01:01:21 +00003536bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003537{
3538 struct e1000_hw *hw = &adapter->hw;
3539 bool link_active = false;
3540 s32 ret_val = 0;
3541
3542 /* get_link_status is set on LSC (link status) interrupt or
3543 * rx sequence error interrupt. get_link_status will stay
3544 * false until the e1000_check_for_link establishes link
3545 * for copper adapters ONLY
3546 */
3547 switch (hw->phy.media_type) {
3548 case e1000_media_type_copper:
3549 if (hw->mac.get_link_status) {
3550 ret_val = hw->mac.ops.check_for_link(hw);
3551 link_active = !hw->mac.get_link_status;
3552 } else {
3553 link_active = true;
3554 }
3555 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003556 case e1000_media_type_internal_serdes:
3557 ret_val = hw->mac.ops.check_for_link(hw);
3558 link_active = hw->mac.serdes_has_link;
3559 break;
3560 default:
3561 case e1000_media_type_unknown:
3562 break;
3563 }
3564
3565 return link_active;
3566}
3567
Stefan Assmann563988d2011-04-05 04:27:15 +00003568static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3569{
3570 bool ret = false;
3571 u32 ctrl_ext, thstat;
3572
3573 /* check for thermal sensor event on i350, copper only */
3574 if (hw->mac.type == e1000_i350) {
3575 thstat = rd32(E1000_THSTAT);
3576 ctrl_ext = rd32(E1000_CTRL_EXT);
3577
3578 if ((hw->phy.media_type == e1000_media_type_copper) &&
3579 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3580 ret = !!(thstat & event);
3581 }
3582 }
3583
3584 return ret;
3585}
3586
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003587/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003588 * igb_watchdog - Timer Call-back
3589 * @data: pointer to adapter cast into an unsigned long
3590 **/
3591static void igb_watchdog(unsigned long data)
3592{
3593 struct igb_adapter *adapter = (struct igb_adapter *)data;
3594 /* Do the rest outside of interrupt context */
3595 schedule_work(&adapter->watchdog_task);
3596}
3597
3598static void igb_watchdog_task(struct work_struct *work)
3599{
3600 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003601 struct igb_adapter,
3602 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003603 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003604 struct net_device *netdev = adapter->netdev;
Stefan Assmann563988d2011-04-05 04:27:15 +00003605 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003606 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003607
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003608 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003609 if (link) {
3610 if (!netif_carrier_ok(netdev)) {
3611 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003612 hw->mac.ops.get_speed_and_duplex(hw,
3613 &adapter->link_speed,
3614 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003615
3616 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003617 /* Links status message must follow this format */
3618 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003619 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003620 netdev->name,
3621 adapter->link_speed,
3622 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003623 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003624 ((ctrl & E1000_CTRL_TFCE) &&
3625 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3626 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3627 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003628
Stefan Assmann563988d2011-04-05 04:27:15 +00003629 /* check for thermal sensor event */
3630 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3631 printk(KERN_INFO "igb: %s The network adapter "
3632 "link speed was downshifted "
3633 "because it overheated.\n",
3634 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003635 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003636
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003637 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003638 adapter->tx_timeout_factor = 1;
3639 switch (adapter->link_speed) {
3640 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003641 adapter->tx_timeout_factor = 14;
3642 break;
3643 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003644 /* maybe add some timeout factor ? */
3645 break;
3646 }
3647
3648 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003649
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003650 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003651 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003652
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003653 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003654 if (!test_bit(__IGB_DOWN, &adapter->state))
3655 mod_timer(&adapter->phy_info_timer,
3656 round_jiffies(jiffies + 2 * HZ));
3657 }
3658 } else {
3659 if (netif_carrier_ok(netdev)) {
3660 adapter->link_speed = 0;
3661 adapter->link_duplex = 0;
Stefan Assmann563988d2011-04-05 04:27:15 +00003662
3663 /* check for thermal sensor event */
3664 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3665 printk(KERN_ERR "igb: %s The network adapter "
3666 "was stopped because it "
3667 "overheated.\n",
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003668 netdev->name);
Carolyn Wyborny7ef5ed12011-03-12 08:59:47 +00003669 }
Stefan Assmann563988d2011-04-05 04:27:15 +00003670
Alexander Duyck527d47c2008-11-27 00:21:39 -08003671 /* Links status message must follow this format */
3672 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3673 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003674 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003675
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003676 igb_ping_all_vfs(adapter);
3677
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003678 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003679 if (!test_bit(__IGB_DOWN, &adapter->state))
3680 mod_timer(&adapter->phy_info_timer,
3681 round_jiffies(jiffies + 2 * HZ));
3682 }
3683 }
3684
Eric Dumazet12dcd862010-10-15 17:27:10 +00003685 spin_lock(&adapter->stats64_lock);
3686 igb_update_stats(adapter, &adapter->stats64);
3687 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003688
Alexander Duyckdbabb062009-11-12 18:38:16 +00003689 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003690 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003691 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003692 /* We've lost link, so the controller stops DMA,
3693 * but we've got queued Tx work that's never going
3694 * to get done, so reset controller to flush Tx.
3695 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003696 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3697 adapter->tx_timeout_count++;
3698 schedule_work(&adapter->reset_task);
3699 /* return immediately since reset is imminent */
3700 return;
3701 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003702 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003703
Alexander Duyckdbabb062009-11-12 18:38:16 +00003704 /* Force detection of hung controller every watchdog period */
3705 tx_ring->detect_tx_hung = true;
3706 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003707
Auke Kok9d5c8242008-01-24 02:22:38 -08003708 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003709 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003710 u32 eics = 0;
3711 for (i = 0; i < adapter->num_q_vectors; i++) {
3712 struct igb_q_vector *q_vector = adapter->q_vector[i];
3713 eics |= q_vector->eims_value;
3714 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003715 wr32(E1000_EICS, eics);
3716 } else {
3717 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3718 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003719
Greg Rose13800462010-11-06 02:08:26 +00003720 igb_spoof_check(adapter);
3721
Auke Kok9d5c8242008-01-24 02:22:38 -08003722 /* Reset the timer */
3723 if (!test_bit(__IGB_DOWN, &adapter->state))
3724 mod_timer(&adapter->watchdog_timer,
3725 round_jiffies(jiffies + 2 * HZ));
3726}
3727
3728enum latency_range {
3729 lowest_latency = 0,
3730 low_latency = 1,
3731 bulk_latency = 2,
3732 latency_invalid = 255
3733};
3734
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003735/**
3736 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3737 *
3738 * Stores a new ITR value based on strictly on packet size. This
3739 * algorithm is less sophisticated than that used in igb_update_itr,
3740 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003741 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003742 * were determined based on theoretical maximum wire speed and testing
3743 * data, in order to minimize response time while increasing bulk
3744 * throughput.
3745 * This functionality is controlled by the InterruptThrottleRate module
3746 * parameter (see igb_param.c)
3747 * NOTE: This function is called only when operating in a multiqueue
3748 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003749 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003750 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003751static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003752{
Alexander Duyck047e0032009-10-27 15:49:27 +00003753 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003754 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003755 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003756 struct igb_ring *ring;
3757 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003758
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003759 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3760 * ints/sec - ITR timer value of 120 ticks.
3761 */
3762 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003763 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003764 goto set_itr_val;
3765 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003766
Eric Dumazet12dcd862010-10-15 17:27:10 +00003767 ring = q_vector->rx_ring;
3768 if (ring) {
3769 packets = ACCESS_ONCE(ring->total_packets);
3770
3771 if (packets)
3772 avg_wire_size = ring->total_bytes / packets;
Alexander Duyck047e0032009-10-27 15:49:27 +00003773 }
3774
Eric Dumazet12dcd862010-10-15 17:27:10 +00003775 ring = q_vector->tx_ring;
3776 if (ring) {
3777 packets = ACCESS_ONCE(ring->total_packets);
3778
3779 if (packets)
3780 avg_wire_size = max_t(u32, avg_wire_size,
3781 ring->total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003782 }
3783
3784 /* if avg_wire_size isn't set no work was done */
3785 if (!avg_wire_size)
3786 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003787
3788 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3789 avg_wire_size += 24;
3790
3791 /* Don't starve jumbo frames */
3792 avg_wire_size = min(avg_wire_size, 3000);
3793
3794 /* Give a little boost to mid-size frames */
3795 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3796 new_val = avg_wire_size / 3;
3797 else
3798 new_val = avg_wire_size / 2;
3799
Nick Nunleyabe1c362010-02-17 01:03:19 +00003800 /* when in itr mode 3 do not exceed 20K ints/sec */
3801 if (adapter->rx_itr_setting == 3 && new_val < 196)
3802 new_val = 196;
3803
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003804set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003805 if (new_val != q_vector->itr_val) {
3806 q_vector->itr_val = new_val;
3807 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003808 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003809clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003810 if (q_vector->rx_ring) {
3811 q_vector->rx_ring->total_bytes = 0;
3812 q_vector->rx_ring->total_packets = 0;
3813 }
3814 if (q_vector->tx_ring) {
3815 q_vector->tx_ring->total_bytes = 0;
3816 q_vector->tx_ring->total_packets = 0;
3817 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003818}
3819
3820/**
3821 * igb_update_itr - update the dynamic ITR value based on statistics
3822 * Stores a new ITR value based on packets and byte
3823 * counts during the last interrupt. The advantage of per interrupt
3824 * computation is faster updates and more accurate ITR for the current
3825 * traffic pattern. Constants in this function were computed
3826 * based on theoretical maximum wire speed and thresholds were set based
3827 * on testing data as well as attempting to minimize response time
3828 * while increasing bulk throughput.
3829 * this functionality is controlled by the InterruptThrottleRate module
3830 * parameter (see igb_param.c)
3831 * NOTE: These calculations are only valid when operating in a single-
3832 * queue environment.
3833 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003834 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003835 * @packets: the number of packets during this measurement interval
3836 * @bytes: the number of bytes during this measurement interval
3837 **/
3838static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3839 int packets, int bytes)
3840{
3841 unsigned int retval = itr_setting;
3842
3843 if (packets == 0)
3844 goto update_itr_done;
3845
3846 switch (itr_setting) {
3847 case lowest_latency:
3848 /* handle TSO and jumbo frames */
3849 if (bytes/packets > 8000)
3850 retval = bulk_latency;
3851 else if ((packets < 5) && (bytes > 512))
3852 retval = low_latency;
3853 break;
3854 case low_latency: /* 50 usec aka 20000 ints/s */
3855 if (bytes > 10000) {
3856 /* this if handles the TSO accounting */
3857 if (bytes/packets > 8000) {
3858 retval = bulk_latency;
3859 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3860 retval = bulk_latency;
3861 } else if ((packets > 35)) {
3862 retval = lowest_latency;
3863 }
3864 } else if (bytes/packets > 2000) {
3865 retval = bulk_latency;
3866 } else if (packets <= 2 && bytes < 512) {
3867 retval = lowest_latency;
3868 }
3869 break;
3870 case bulk_latency: /* 250 usec aka 4000 ints/s */
3871 if (bytes > 25000) {
3872 if (packets > 35)
3873 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003874 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003875 retval = low_latency;
3876 }
3877 break;
3878 }
3879
3880update_itr_done:
3881 return retval;
3882}
3883
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003884static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003885{
Alexander Duyck047e0032009-10-27 15:49:27 +00003886 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003887 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003888 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003889
3890 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3891 if (adapter->link_speed != SPEED_1000) {
3892 current_itr = 0;
3893 new_itr = 4000;
3894 goto set_itr_now;
3895 }
3896
3897 adapter->rx_itr = igb_update_itr(adapter,
3898 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003899 q_vector->rx_ring->total_packets,
3900 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003901
Alexander Duyck047e0032009-10-27 15:49:27 +00003902 adapter->tx_itr = igb_update_itr(adapter,
3903 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003904 q_vector->tx_ring->total_packets,
3905 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003906 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003907
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003908 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003909 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003910 current_itr = low_latency;
3911
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 switch (current_itr) {
3913 /* counts and packets in update_itr are dependent on these numbers */
3914 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003915 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003916 break;
3917 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003918 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003919 break;
3920 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003921 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003922 break;
3923 default:
3924 break;
3925 }
3926
3927set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003928 q_vector->rx_ring->total_bytes = 0;
3929 q_vector->rx_ring->total_packets = 0;
3930 q_vector->tx_ring->total_bytes = 0;
3931 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003932
Alexander Duyck047e0032009-10-27 15:49:27 +00003933 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003934 /* this attempts to bias the interrupt rate towards Bulk
3935 * by adding intermediate steps when interrupt rate is
3936 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003937 new_itr = new_itr > q_vector->itr_val ?
3938 max((new_itr * q_vector->itr_val) /
3939 (new_itr + (q_vector->itr_val >> 2)),
3940 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003941 new_itr;
3942 /* Don't write the value here; it resets the adapter's
3943 * internal timer, and causes us to delay far longer than
3944 * we should between interrupts. Instead, we write the ITR
3945 * value at the beginning of the next interrupt so the timing
3946 * ends up being correct.
3947 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003948 q_vector->itr_val = new_itr;
3949 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003950 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003951}
3952
Auke Kok9d5c8242008-01-24 02:22:38 -08003953#define IGB_TX_FLAGS_CSUM 0x00000001
3954#define IGB_TX_FLAGS_VLAN 0x00000002
3955#define IGB_TX_FLAGS_TSO 0x00000004
3956#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003957#define IGB_TX_FLAGS_TSTAMP 0x00000010
3958#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3959#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003960
Alexander Duyckcd392f52011-08-26 07:43:59 +00003961static inline int igb_tso(struct igb_ring *tx_ring,
3962 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08003963{
3964 struct e1000_adv_tx_context_desc *context_desc;
3965 unsigned int i;
3966 int err;
3967 struct igb_buffer *buffer_info;
3968 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003969 u32 mss_l4len_idx;
3970 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003971
3972 if (skb_header_cloned(skb)) {
3973 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3974 if (err)
3975 return err;
3976 }
3977
3978 l4len = tcp_hdrlen(skb);
3979 *hdr_len += l4len;
3980
3981 if (skb->protocol == htons(ETH_P_IP)) {
3982 struct iphdr *iph = ip_hdr(skb);
3983 iph->tot_len = 0;
3984 iph->check = 0;
3985 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3986 iph->daddr, 0,
3987 IPPROTO_TCP,
3988 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003989 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003990 ipv6_hdr(skb)->payload_len = 0;
3991 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3992 &ipv6_hdr(skb)->daddr,
3993 0, IPPROTO_TCP, 0);
3994 }
3995
3996 i = tx_ring->next_to_use;
3997
3998 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00003999 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004000 /* VLAN MACLEN IPLEN */
4001 if (tx_flags & IGB_TX_FLAGS_VLAN)
4002 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4003 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4004 *hdr_len += skb_network_offset(skb);
4005 info |= skb_network_header_len(skb);
4006 *hdr_len += skb_network_header_len(skb);
4007 context_desc->vlan_macip_lens = cpu_to_le32(info);
4008
4009 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4010 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4011
4012 if (skb->protocol == htons(ETH_P_IP))
4013 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
4014 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4015
4016 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4017
4018 /* MSS L4LEN IDX */
4019 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
4020 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
4021
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004022 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004023 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
4024 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004025
4026 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4027 context_desc->seqnum_seed = 0;
4028
4029 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004030 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004031 buffer_info->dma = 0;
4032 i++;
4033 if (i == tx_ring->count)
4034 i = 0;
4035
4036 tx_ring->next_to_use = i;
4037
4038 return true;
4039}
4040
Alexander Duyckcd392f52011-08-26 07:43:59 +00004041static inline bool igb_tx_csum(struct igb_ring *tx_ring,
4042 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08004043{
4044 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck59d71982010-04-27 13:09:25 +00004045 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004046 struct igb_buffer *buffer_info;
4047 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00004048 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004049
4050 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
4051 (tx_flags & IGB_TX_FLAGS_VLAN)) {
4052 i = tx_ring->next_to_use;
4053 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00004054 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004055
4056 if (tx_flags & IGB_TX_FLAGS_VLAN)
4057 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004058
Auke Kok9d5c8242008-01-24 02:22:38 -08004059 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4060 if (skb->ip_summed == CHECKSUM_PARTIAL)
4061 info |= skb_network_header_len(skb);
4062
4063 context_desc->vlan_macip_lens = cpu_to_le32(info);
4064
4065 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4066
4067 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07004068 __be16 protocol;
4069
4070 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
4071 const struct vlan_ethhdr *vhdr =
4072 (const struct vlan_ethhdr*)skb->data;
4073
4074 protocol = vhdr->h_vlan_encapsulated_proto;
4075 } else {
4076 protocol = skb->protocol;
4077 }
4078
4079 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004080 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08004081 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004082 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4083 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004084 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4085 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004086 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004087 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08004088 /* XXX what about other V6 headers?? */
4089 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4090 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004091 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4092 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004093 break;
4094 default:
4095 if (unlikely(net_ratelimit()))
Alexander Duyck59d71982010-04-27 13:09:25 +00004096 dev_warn(dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08004097 "partial checksum but proto=%x!\n",
4098 skb->protocol);
4099 break;
4100 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004101 }
4102
4103 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4104 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004105 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004106 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004107 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08004108
4109 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004110 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004111 buffer_info->dma = 0;
4112
4113 i++;
4114 if (i == tx_ring->count)
4115 i = 0;
4116 tx_ring->next_to_use = i;
4117
4118 return true;
4119 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004120 return false;
4121}
4122
4123#define IGB_MAX_TXD_PWR 16
4124#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4125
Alexander Duyckcd392f52011-08-26 07:43:59 +00004126static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4127 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004128{
4129 struct igb_buffer *buffer_info;
Alexander Duyck59d71982010-04-27 13:09:25 +00004130 struct device *dev = tx_ring->dev;
Nick Nunley28739572010-05-04 21:58:07 +00004131 unsigned int hlen = skb_headlen(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004132 unsigned int count = 0, i;
4133 unsigned int f;
Nick Nunley28739572010-05-04 21:58:07 +00004134 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004135
4136 i = tx_ring->next_to_use;
4137
4138 buffer_info = &tx_ring->buffer_info[i];
Nick Nunley28739572010-05-04 21:58:07 +00004139 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4140 buffer_info->length = hlen;
Auke Kok9d5c8242008-01-24 02:22:38 -08004141 /* set time_stamp *before* dma to help avoid a possible race */
4142 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004143 buffer_info->next_to_watch = i;
Nick Nunley28739572010-05-04 21:58:07 +00004144 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
Alexander Duyck59d71982010-04-27 13:09:25 +00004145 DMA_TO_DEVICE);
4146 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004147 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004148
4149 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Nick Nunley28739572010-05-04 21:58:07 +00004150 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
4151 unsigned int len = frag->size;
Auke Kok9d5c8242008-01-24 02:22:38 -08004152
Alexander Duyck85811452010-01-23 01:35:00 -08004153 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004154 i++;
4155 if (i == tx_ring->count)
4156 i = 0;
4157
Auke Kok9d5c8242008-01-24 02:22:38 -08004158 buffer_info = &tx_ring->buffer_info[i];
4159 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4160 buffer_info->length = len;
4161 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004162 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004163 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00004164 buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
Alexander Duyck59d71982010-04-27 13:09:25 +00004165 DMA_TO_DEVICE);
4166 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004167 goto dma_error;
4168
Auke Kok9d5c8242008-01-24 02:22:38 -08004169 }
4170
Auke Kok9d5c8242008-01-24 02:22:38 -08004171 tx_ring->buffer_info[i].skb = skb;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004172 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
Nick Nunley28739572010-05-04 21:58:07 +00004173 /* multiply data chunks by size of headers */
4174 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
4175 tx_ring->buffer_info[i].gso_segs = gso_segs;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004176 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004177
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004178 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004179
4180dma_error:
Alexander Duyck59d71982010-04-27 13:09:25 +00004181 dev_err(dev, "TX DMA map failed\n");
Alexander Duyck6366ad32009-12-02 16:47:18 +00004182
4183 /* clear timestamp and dma mappings for failed buffer_info mapping */
4184 buffer_info->dma = 0;
4185 buffer_info->time_stamp = 0;
4186 buffer_info->length = 0;
4187 buffer_info->next_to_watch = 0;
4188 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004189
4190 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00004191 while (count--) {
4192 if (i == 0)
4193 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004194 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004195 buffer_info = &tx_ring->buffer_info[i];
4196 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4197 }
4198
4199 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004200}
4201
Alexander Duyckcd392f52011-08-26 07:43:59 +00004202static inline void igb_tx_queue(struct igb_ring *tx_ring,
4203 u32 tx_flags, int count, u32 paylen,
4204 u8 hdr_len)
Auke Kok9d5c8242008-01-24 02:22:38 -08004205{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004206 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004207 struct igb_buffer *buffer_info;
4208 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004209 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08004210
4211 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
4212 E1000_ADVTXD_DCMD_DEXT);
4213
4214 if (tx_flags & IGB_TX_FLAGS_VLAN)
4215 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
4216
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004217 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4218 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
4219
Auke Kok9d5c8242008-01-24 02:22:38 -08004220 if (tx_flags & IGB_TX_FLAGS_TSO) {
4221 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
4222
4223 /* insert tcp checksum */
4224 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4225
4226 /* insert ip checksum */
4227 if (tx_flags & IGB_TX_FLAGS_IPV4)
4228 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4229
4230 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
4231 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4232 }
4233
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004234 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4235 (tx_flags & (IGB_TX_FLAGS_CSUM |
4236 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004237 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004238 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004239
4240 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4241
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004242 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08004243 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck601369062011-08-26 07:44:05 +00004244 tx_desc = IGB_TX_DESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08004245 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4246 tx_desc->read.cmd_type_len =
4247 cpu_to_le32(cmd_type_len | buffer_info->length);
4248 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004249 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08004250 i++;
4251 if (i == tx_ring->count)
4252 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004253 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004254
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004255 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004256 /* Force memory writes to complete before letting h/w
4257 * know there are new descriptors to fetch. (Only
4258 * applicable for weak-ordered memory model archs,
4259 * such as IA-64). */
4260 wmb();
4261
4262 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00004263 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08004264 /* we need this if more than one processor can write to our tail
4265 * at a time, it syncronizes IO on IA64/Altix systems */
4266 mmiowb();
4267}
4268
Alexander Duycke694e962009-10-27 15:53:06 +00004269static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004270{
Alexander Duycke694e962009-10-27 15:53:06 +00004271 struct net_device *netdev = tx_ring->netdev;
4272
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004273 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004274
Auke Kok9d5c8242008-01-24 02:22:38 -08004275 /* Herbert's original patch had:
4276 * smp_mb__after_netif_stop_queue();
4277 * but since that doesn't exist yet, just open code it. */
4278 smp_mb();
4279
4280 /* We need to check again in a case another CPU has just
4281 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004282 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004283 return -EBUSY;
4284
4285 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004286 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004287
4288 u64_stats_update_begin(&tx_ring->tx_syncp2);
4289 tx_ring->tx_stats.restart_queue2++;
4290 u64_stats_update_end(&tx_ring->tx_syncp2);
4291
Auke Kok9d5c8242008-01-24 02:22:38 -08004292 return 0;
4293}
4294
Nick Nunley717ba0892010-02-17 01:04:18 +00004295static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004296{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004297 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004298 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004299 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004300}
4301
Alexander Duyckcd392f52011-08-26 07:43:59 +00004302netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4303 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004304{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004305 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004306 u32 tx_flags = 0;
4307 u16 first;
4308 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004309
Auke Kok9d5c8242008-01-24 02:22:38 -08004310 /* need: 1 descriptor per page,
4311 * + 2 desc gap to keep tail from touching head,
4312 * + 1 desc for skb->data,
4313 * + 1 desc for context descriptor,
4314 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004315 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004316 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004317 return NETDEV_TX_BUSY;
4318 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004319
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004320 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4321 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004322 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004323 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004324
Jesse Grosseab6d182010-10-20 13:56:03 +00004325 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004326 tx_flags |= IGB_TX_FLAGS_VLAN;
4327 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4328 }
4329
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004330 if (skb->protocol == htons(ETH_P_IP))
4331 tx_flags |= IGB_TX_FLAGS_IPV4;
4332
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004333 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004334 if (skb_is_gso(skb)) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00004335 tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004336
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004337 if (tso < 0) {
4338 dev_kfree_skb_any(skb);
4339 return NETDEV_TX_OK;
4340 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004341 }
4342
4343 if (tso)
4344 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyckcd392f52011-08-26 07:43:59 +00004345 else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00004346 (skb->ip_summed == CHECKSUM_PARTIAL))
4347 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004348
Alexander Duyck65689fe2009-03-20 00:17:43 +00004349 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004350 * count reflects descriptors mapped, if 0 or less then mapping error
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004351 * has occurred and we need to rewind the descriptor queue
Alexander Duyck65689fe2009-03-20 00:17:43 +00004352 */
Alexander Duyckcd392f52011-08-26 07:43:59 +00004353 count = igb_tx_map(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004354 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00004355 dev_kfree_skb_any(skb);
4356 tx_ring->buffer_info[first].time_stamp = 0;
4357 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004358 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004359 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004360
Alexander Duyckcd392f52011-08-26 07:43:59 +00004361 igb_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004362
4363 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004364 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004365
Auke Kok9d5c8242008-01-24 02:22:38 -08004366 return NETDEV_TX_OK;
4367}
4368
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004369static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4370 struct sk_buff *skb)
4371{
4372 unsigned int r_idx = skb->queue_mapping;
4373
4374 if (r_idx >= adapter->num_tx_queues)
4375 r_idx = r_idx % adapter->num_tx_queues;
4376
4377 return adapter->tx_ring[r_idx];
4378}
4379
Alexander Duyckcd392f52011-08-26 07:43:59 +00004380static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4381 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004382{
4383 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004384
4385 if (test_bit(__IGB_DOWN, &adapter->state)) {
4386 dev_kfree_skb_any(skb);
4387 return NETDEV_TX_OK;
4388 }
4389
4390 if (skb->len <= 0) {
4391 dev_kfree_skb_any(skb);
4392 return NETDEV_TX_OK;
4393 }
4394
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004395 /*
4396 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4397 * in order to meet this minimum size requirement.
4398 */
4399 if (skb->len < 17) {
4400 if (skb_padto(skb, 17))
4401 return NETDEV_TX_OK;
4402 skb->len = 17;
4403 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004404
Alexander Duyck1cc3bd82011-08-26 07:44:10 +00004405 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
Auke Kok9d5c8242008-01-24 02:22:38 -08004406}
4407
4408/**
4409 * igb_tx_timeout - Respond to a Tx Hang
4410 * @netdev: network interface device structure
4411 **/
4412static void igb_tx_timeout(struct net_device *netdev)
4413{
4414 struct igb_adapter *adapter = netdev_priv(netdev);
4415 struct e1000_hw *hw = &adapter->hw;
4416
4417 /* Do the reset outside of interrupt context */
4418 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004419
Alexander Duyck55cac242009-11-19 12:42:21 +00004420 if (hw->mac.type == e1000_82580)
4421 hw->dev_spec._82575.global_device_reset = true;
4422
Auke Kok9d5c8242008-01-24 02:22:38 -08004423 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004424 wr32(E1000_EICS,
4425 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004426}
4427
4428static void igb_reset_task(struct work_struct *work)
4429{
4430 struct igb_adapter *adapter;
4431 adapter = container_of(work, struct igb_adapter, reset_task);
4432
Taku Izumic97ec422010-04-27 14:39:30 +00004433 igb_dump(adapter);
4434 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004435 igb_reinit_locked(adapter);
4436}
4437
4438/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004439 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004440 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004441 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004442 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004443 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004444static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4445 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004446{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004447 struct igb_adapter *adapter = netdev_priv(netdev);
4448
4449 spin_lock(&adapter->stats64_lock);
4450 igb_update_stats(adapter, &adapter->stats64);
4451 memcpy(stats, &adapter->stats64, sizeof(*stats));
4452 spin_unlock(&adapter->stats64_lock);
4453
4454 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004455}
4456
4457/**
4458 * igb_change_mtu - Change the Maximum Transfer Unit
4459 * @netdev: network interface device structure
4460 * @new_mtu: new value for maximum frame size
4461 *
4462 * Returns 0 on success, negative on failure
4463 **/
4464static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4465{
4466 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004467 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck153285f2011-08-26 07:43:32 +00004468 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Auke Kok9d5c8242008-01-24 02:22:38 -08004469
Alexander Duyckc809d222009-10-27 23:52:13 +00004470 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004471 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004472 return -EINVAL;
4473 }
4474
Alexander Duyck153285f2011-08-26 07:43:32 +00004475#define MAX_STD_JUMBO_FRAME_SIZE 9238
Auke Kok9d5c8242008-01-24 02:22:38 -08004476 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004477 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004478 return -EINVAL;
4479 }
4480
4481 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4482 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004483
Auke Kok9d5c8242008-01-24 02:22:38 -08004484 /* igb_down has a dependency on max_frame_size */
4485 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004486
Alexander Duyck4c844852009-10-27 15:52:07 +00004487 if (netif_running(netdev))
4488 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004489
Alexander Duyck090b1792009-10-27 23:51:55 +00004490 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004491 netdev->mtu, new_mtu);
4492 netdev->mtu = new_mtu;
4493
4494 if (netif_running(netdev))
4495 igb_up(adapter);
4496 else
4497 igb_reset(adapter);
4498
4499 clear_bit(__IGB_RESETTING, &adapter->state);
4500
4501 return 0;
4502}
4503
4504/**
4505 * igb_update_stats - Update the board statistics counters
4506 * @adapter: board private structure
4507 **/
4508
Eric Dumazet12dcd862010-10-15 17:27:10 +00004509void igb_update_stats(struct igb_adapter *adapter,
4510 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004511{
4512 struct e1000_hw *hw = &adapter->hw;
4513 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004514 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004515 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004516 int i;
4517 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004518 unsigned int start;
4519 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004520
4521#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4522
4523 /*
4524 * Prevent stats update while adapter is being reset, or if the pci
4525 * connection is down.
4526 */
4527 if (adapter->link_speed == 0)
4528 return;
4529 if (pci_channel_offline(pdev))
4530 return;
4531
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004532 bytes = 0;
4533 packets = 0;
4534 for (i = 0; i < adapter->num_rx_queues; i++) {
4535 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004536 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004537
Alexander Duyck3025a442010-02-17 01:02:39 +00004538 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004539 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004540
4541 do {
4542 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4543 _bytes = ring->rx_stats.bytes;
4544 _packets = ring->rx_stats.packets;
4545 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4546 bytes += _bytes;
4547 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004548 }
4549
Alexander Duyck128e45e2009-11-12 18:37:38 +00004550 net_stats->rx_bytes = bytes;
4551 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004552
4553 bytes = 0;
4554 packets = 0;
4555 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004556 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004557 do {
4558 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4559 _bytes = ring->tx_stats.bytes;
4560 _packets = ring->tx_stats.packets;
4561 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4562 bytes += _bytes;
4563 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004564 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004565 net_stats->tx_bytes = bytes;
4566 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004567
4568 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004569 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4570 adapter->stats.gprc += rd32(E1000_GPRC);
4571 adapter->stats.gorc += rd32(E1000_GORCL);
4572 rd32(E1000_GORCH); /* clear GORCL */
4573 adapter->stats.bprc += rd32(E1000_BPRC);
4574 adapter->stats.mprc += rd32(E1000_MPRC);
4575 adapter->stats.roc += rd32(E1000_ROC);
4576
4577 adapter->stats.prc64 += rd32(E1000_PRC64);
4578 adapter->stats.prc127 += rd32(E1000_PRC127);
4579 adapter->stats.prc255 += rd32(E1000_PRC255);
4580 adapter->stats.prc511 += rd32(E1000_PRC511);
4581 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4582 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4583 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4584 adapter->stats.sec += rd32(E1000_SEC);
4585
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004586 mpc = rd32(E1000_MPC);
4587 adapter->stats.mpc += mpc;
4588 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004589 adapter->stats.scc += rd32(E1000_SCC);
4590 adapter->stats.ecol += rd32(E1000_ECOL);
4591 adapter->stats.mcc += rd32(E1000_MCC);
4592 adapter->stats.latecol += rd32(E1000_LATECOL);
4593 adapter->stats.dc += rd32(E1000_DC);
4594 adapter->stats.rlec += rd32(E1000_RLEC);
4595 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4596 adapter->stats.xontxc += rd32(E1000_XONTXC);
4597 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4598 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4599 adapter->stats.fcruc += rd32(E1000_FCRUC);
4600 adapter->stats.gptc += rd32(E1000_GPTC);
4601 adapter->stats.gotc += rd32(E1000_GOTCL);
4602 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004603 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004604 adapter->stats.ruc += rd32(E1000_RUC);
4605 adapter->stats.rfc += rd32(E1000_RFC);
4606 adapter->stats.rjc += rd32(E1000_RJC);
4607 adapter->stats.tor += rd32(E1000_TORH);
4608 adapter->stats.tot += rd32(E1000_TOTH);
4609 adapter->stats.tpr += rd32(E1000_TPR);
4610
4611 adapter->stats.ptc64 += rd32(E1000_PTC64);
4612 adapter->stats.ptc127 += rd32(E1000_PTC127);
4613 adapter->stats.ptc255 += rd32(E1000_PTC255);
4614 adapter->stats.ptc511 += rd32(E1000_PTC511);
4615 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4616 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4617
4618 adapter->stats.mptc += rd32(E1000_MPTC);
4619 adapter->stats.bptc += rd32(E1000_BPTC);
4620
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004621 adapter->stats.tpt += rd32(E1000_TPT);
4622 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004623
4624 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004625 /* read internal phy specific stats */
4626 reg = rd32(E1000_CTRL_EXT);
4627 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4628 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4629 adapter->stats.tncrs += rd32(E1000_TNCRS);
4630 }
4631
Auke Kok9d5c8242008-01-24 02:22:38 -08004632 adapter->stats.tsctc += rd32(E1000_TSCTC);
4633 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4634
4635 adapter->stats.iac += rd32(E1000_IAC);
4636 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4637 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4638 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4639 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4640 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4641 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4642 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4643 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4644
4645 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004646 net_stats->multicast = adapter->stats.mprc;
4647 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004648
4649 /* Rx Errors */
4650
4651 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004652 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004653 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004654 adapter->stats.crcerrs + adapter->stats.algnerrc +
4655 adapter->stats.ruc + adapter->stats.roc +
4656 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004657 net_stats->rx_length_errors = adapter->stats.ruc +
4658 adapter->stats.roc;
4659 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4660 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4661 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004662
4663 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004664 net_stats->tx_errors = adapter->stats.ecol +
4665 adapter->stats.latecol;
4666 net_stats->tx_aborted_errors = adapter->stats.ecol;
4667 net_stats->tx_window_errors = adapter->stats.latecol;
4668 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004669
4670 /* Tx Dropped needs to be maintained elsewhere */
4671
4672 /* Phy Stats */
4673 if (hw->phy.media_type == e1000_media_type_copper) {
4674 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004675 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004676 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4677 adapter->phy_stats.idle_errors += phy_tmp;
4678 }
4679 }
4680
4681 /* Management Stats */
4682 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4683 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4684 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004685
4686 /* OS2BMC Stats */
4687 reg = rd32(E1000_MANC);
4688 if (reg & E1000_MANC_EN_BMC2OS) {
4689 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4690 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4691 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4692 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4693 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004694}
4695
Auke Kok9d5c8242008-01-24 02:22:38 -08004696static irqreturn_t igb_msix_other(int irq, void *data)
4697{
Alexander Duyck047e0032009-10-27 15:49:27 +00004698 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004699 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004700 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004701 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004702
Alexander Duyck7f081d42010-01-07 17:41:00 +00004703 if (icr & E1000_ICR_DRSTA)
4704 schedule_work(&adapter->reset_task);
4705
Alexander Duyck047e0032009-10-27 15:49:27 +00004706 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004707 /* HW is reporting DMA is out of sync */
4708 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004709 /* The DMA Out of Sync is also indication of a spoof event
4710 * in IOV mode. Check the Wrong VM Behavior register to
4711 * see if it is really a spoof event. */
4712 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004713 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004714
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004715 /* Check for a mailbox event */
4716 if (icr & E1000_ICR_VMMB)
4717 igb_msg_task(adapter);
4718
4719 if (icr & E1000_ICR_LSC) {
4720 hw->mac.get_link_status = 1;
4721 /* guard against interrupt when we're going down */
4722 if (!test_bit(__IGB_DOWN, &adapter->state))
4723 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4724 }
4725
Alexander Duyck25568a52009-10-27 23:49:59 +00004726 if (adapter->vfs_allocated_count)
4727 wr32(E1000_IMS, E1000_IMS_LSC |
4728 E1000_IMS_VMMB |
4729 E1000_IMS_DOUTSYNC);
4730 else
4731 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004732 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004733
4734 return IRQ_HANDLED;
4735}
4736
Alexander Duyck047e0032009-10-27 15:49:27 +00004737static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004738{
Alexander Duyck26b39272010-02-17 01:00:41 +00004739 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004740 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004741
Alexander Duyck047e0032009-10-27 15:49:27 +00004742 if (!q_vector->set_itr)
4743 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004744
Alexander Duyck047e0032009-10-27 15:49:27 +00004745 if (!itr_val)
4746 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004747
Alexander Duyck26b39272010-02-17 01:00:41 +00004748 if (adapter->hw.mac.type == e1000_82575)
4749 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004750 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004751 itr_val |= 0x8000000;
4752
4753 writel(itr_val, q_vector->itr_register);
4754 q_vector->set_itr = 0;
4755}
4756
4757static irqreturn_t igb_msix_ring(int irq, void *data)
4758{
4759 struct igb_q_vector *q_vector = data;
4760
4761 /* Write the ITR value calculated from the previous interrupt. */
4762 igb_write_itr(q_vector);
4763
4764 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004765
Auke Kok9d5c8242008-01-24 02:22:38 -08004766 return IRQ_HANDLED;
4767}
4768
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004769#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004770static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004771{
Alexander Duyck047e0032009-10-27 15:49:27 +00004772 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004773 struct e1000_hw *hw = &adapter->hw;
4774 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004775
Alexander Duyck047e0032009-10-27 15:49:27 +00004776 if (q_vector->cpu == cpu)
4777 goto out_no_update;
4778
4779 if (q_vector->tx_ring) {
4780 int q = q_vector->tx_ring->reg_idx;
4781 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4782 if (hw->mac.type == e1000_82575) {
4783 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4784 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4785 } else {
4786 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4787 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4788 E1000_DCA_TXCTRL_CPUID_SHIFT;
4789 }
4790 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4791 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4792 }
4793 if (q_vector->rx_ring) {
4794 int q = q_vector->rx_ring->reg_idx;
4795 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4796 if (hw->mac.type == e1000_82575) {
4797 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4798 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4799 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004800 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004801 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004802 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004803 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004804 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4805 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4806 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4807 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004808 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004809 q_vector->cpu = cpu;
4810out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004811 put_cpu();
4812}
4813
4814static void igb_setup_dca(struct igb_adapter *adapter)
4815{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004816 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004817 int i;
4818
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004819 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004820 return;
4821
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004822 /* Always use CB2 mode, difference is masked in the CB driver. */
4823 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4824
Alexander Duyck047e0032009-10-27 15:49:27 +00004825 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004826 adapter->q_vector[i]->cpu = -1;
4827 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004828 }
4829}
4830
4831static int __igb_notify_dca(struct device *dev, void *data)
4832{
4833 struct net_device *netdev = dev_get_drvdata(dev);
4834 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004835 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004836 struct e1000_hw *hw = &adapter->hw;
4837 unsigned long event = *(unsigned long *)data;
4838
4839 switch (event) {
4840 case DCA_PROVIDER_ADD:
4841 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004842 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004843 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004844 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004845 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004846 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004847 igb_setup_dca(adapter);
4848 break;
4849 }
4850 /* Fall Through since DCA is disabled. */
4851 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004852 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004853 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004854 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004855 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004856 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004857 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004858 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004859 }
4860 break;
4861 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004862
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004863 return 0;
4864}
4865
4866static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4867 void *p)
4868{
4869 int ret_val;
4870
4871 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4872 __igb_notify_dca);
4873
4874 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4875}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004876#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004877
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004878static void igb_ping_all_vfs(struct igb_adapter *adapter)
4879{
4880 struct e1000_hw *hw = &adapter->hw;
4881 u32 ping;
4882 int i;
4883
4884 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4885 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004886 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004887 ping |= E1000_VT_MSGTYPE_CTS;
4888 igb_write_mbx(hw, &ping, 1, i);
4889 }
4890}
4891
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004892static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4893{
4894 struct e1000_hw *hw = &adapter->hw;
4895 u32 vmolr = rd32(E1000_VMOLR(vf));
4896 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4897
Alexander Duyckd85b90042010-09-22 17:56:20 +00004898 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004899 IGB_VF_FLAG_MULTI_PROMISC);
4900 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4901
4902 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4903 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004904 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004905 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4906 } else {
4907 /*
4908 * if we have hashes and we are clearing a multicast promisc
4909 * flag we need to write the hashes to the MTA as this step
4910 * was previously skipped
4911 */
4912 if (vf_data->num_vf_mc_hashes > 30) {
4913 vmolr |= E1000_VMOLR_MPME;
4914 } else if (vf_data->num_vf_mc_hashes) {
4915 int j;
4916 vmolr |= E1000_VMOLR_ROMPE;
4917 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4918 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4919 }
4920 }
4921
4922 wr32(E1000_VMOLR(vf), vmolr);
4923
4924 /* there are flags left unprocessed, likely not supported */
4925 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4926 return -EINVAL;
4927
4928 return 0;
4929
4930}
4931
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004932static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4933 u32 *msgbuf, u32 vf)
4934{
4935 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4936 u16 *hash_list = (u16 *)&msgbuf[1];
4937 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4938 int i;
4939
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004940 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004941 * to this VF for later use to restore when the PF multi cast
4942 * list changes
4943 */
4944 vf_data->num_vf_mc_hashes = n;
4945
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004946 /* only up to 30 hash values supported */
4947 if (n > 30)
4948 n = 30;
4949
4950 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004951 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004952 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004953
4954 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004955 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004956
4957 return 0;
4958}
4959
4960static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4961{
4962 struct e1000_hw *hw = &adapter->hw;
4963 struct vf_data_storage *vf_data;
4964 int i, j;
4965
4966 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004967 u32 vmolr = rd32(E1000_VMOLR(i));
4968 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4969
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004970 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004971
4972 if ((vf_data->num_vf_mc_hashes > 30) ||
4973 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4974 vmolr |= E1000_VMOLR_MPME;
4975 } else if (vf_data->num_vf_mc_hashes) {
4976 vmolr |= E1000_VMOLR_ROMPE;
4977 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4978 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4979 }
4980 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004981 }
4982}
4983
4984static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4985{
4986 struct e1000_hw *hw = &adapter->hw;
4987 u32 pool_mask, reg, vid;
4988 int i;
4989
4990 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4991
4992 /* Find the vlan filter for this id */
4993 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4994 reg = rd32(E1000_VLVF(i));
4995
4996 /* remove the vf from the pool */
4997 reg &= ~pool_mask;
4998
4999 /* if pool is empty then remove entry from vfta */
5000 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5001 (reg & E1000_VLVF_VLANID_ENABLE)) {
5002 reg = 0;
5003 vid = reg & E1000_VLVF_VLANID_MASK;
5004 igb_vfta_set(hw, vid, false);
5005 }
5006
5007 wr32(E1000_VLVF(i), reg);
5008 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005009
5010 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005011}
5012
5013static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5014{
5015 struct e1000_hw *hw = &adapter->hw;
5016 u32 reg, i;
5017
Alexander Duyck51466232009-10-27 23:47:35 +00005018 /* The vlvf table only exists on 82576 hardware and newer */
5019 if (hw->mac.type < e1000_82576)
5020 return -1;
5021
5022 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005023 if (!adapter->vfs_allocated_count)
5024 return -1;
5025
5026 /* Find the vlan filter for this id */
5027 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5028 reg = rd32(E1000_VLVF(i));
5029 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5030 vid == (reg & E1000_VLVF_VLANID_MASK))
5031 break;
5032 }
5033
5034 if (add) {
5035 if (i == E1000_VLVF_ARRAY_SIZE) {
5036 /* Did not find a matching VLAN ID entry that was
5037 * enabled. Search for a free filter entry, i.e.
5038 * one without the enable bit set
5039 */
5040 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5041 reg = rd32(E1000_VLVF(i));
5042 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5043 break;
5044 }
5045 }
5046 if (i < E1000_VLVF_ARRAY_SIZE) {
5047 /* Found an enabled/available entry */
5048 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5049
5050 /* if !enabled we need to set this up in vfta */
5051 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00005052 /* add VID to filter table */
5053 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005054 reg |= E1000_VLVF_VLANID_ENABLE;
5055 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005056 reg &= ~E1000_VLVF_VLANID_MASK;
5057 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005058 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005059
5060 /* do not modify RLPML for PF devices */
5061 if (vf >= adapter->vfs_allocated_count)
5062 return 0;
5063
5064 if (!adapter->vf_data[vf].vlans_enabled) {
5065 u32 size;
5066 reg = rd32(E1000_VMOLR(vf));
5067 size = reg & E1000_VMOLR_RLPML_MASK;
5068 size += 4;
5069 reg &= ~E1000_VMOLR_RLPML_MASK;
5070 reg |= size;
5071 wr32(E1000_VMOLR(vf), reg);
5072 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005073
Alexander Duyck51466232009-10-27 23:47:35 +00005074 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005075 return 0;
5076 }
5077 } else {
5078 if (i < E1000_VLVF_ARRAY_SIZE) {
5079 /* remove vf from the pool */
5080 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5081 /* if pool is empty then remove entry from vfta */
5082 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5083 reg = 0;
5084 igb_vfta_set(hw, vid, false);
5085 }
5086 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005087
5088 /* do not modify RLPML for PF devices */
5089 if (vf >= adapter->vfs_allocated_count)
5090 return 0;
5091
5092 adapter->vf_data[vf].vlans_enabled--;
5093 if (!adapter->vf_data[vf].vlans_enabled) {
5094 u32 size;
5095 reg = rd32(E1000_VMOLR(vf));
5096 size = reg & E1000_VMOLR_RLPML_MASK;
5097 size -= 4;
5098 reg &= ~E1000_VMOLR_RLPML_MASK;
5099 reg |= size;
5100 wr32(E1000_VMOLR(vf), reg);
5101 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005102 }
5103 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005104 return 0;
5105}
5106
5107static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5108{
5109 struct e1000_hw *hw = &adapter->hw;
5110
5111 if (vid)
5112 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5113 else
5114 wr32(E1000_VMVIR(vf), 0);
5115}
5116
5117static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5118 int vf, u16 vlan, u8 qos)
5119{
5120 int err = 0;
5121 struct igb_adapter *adapter = netdev_priv(netdev);
5122
5123 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5124 return -EINVAL;
5125 if (vlan || qos) {
5126 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5127 if (err)
5128 goto out;
5129 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5130 igb_set_vmolr(adapter, vf, !vlan);
5131 adapter->vf_data[vf].pf_vlan = vlan;
5132 adapter->vf_data[vf].pf_qos = qos;
5133 dev_info(&adapter->pdev->dev,
5134 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5135 if (test_bit(__IGB_DOWN, &adapter->state)) {
5136 dev_warn(&adapter->pdev->dev,
5137 "The VF VLAN has been set,"
5138 " but the PF device is not up.\n");
5139 dev_warn(&adapter->pdev->dev,
5140 "Bring the PF device up before"
5141 " attempting to use the VF device.\n");
5142 }
5143 } else {
5144 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5145 false, vf);
5146 igb_set_vmvir(adapter, vlan, vf);
5147 igb_set_vmolr(adapter, vf, true);
5148 adapter->vf_data[vf].pf_vlan = 0;
5149 adapter->vf_data[vf].pf_qos = 0;
5150 }
5151out:
5152 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005153}
5154
5155static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5156{
5157 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5158 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5159
5160 return igb_vlvf_set(adapter, vid, add, vf);
5161}
5162
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005163static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005164{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005165 /* clear flags - except flag that indicates PF has set the MAC */
5166 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005167 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005168
5169 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005170 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005171
5172 /* reset vlans for device */
5173 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005174 if (adapter->vf_data[vf].pf_vlan)
5175 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5176 adapter->vf_data[vf].pf_vlan,
5177 adapter->vf_data[vf].pf_qos);
5178 else
5179 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005180
5181 /* reset multicast table array for vf */
5182 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5183
5184 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005185 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005186}
5187
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005188static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5189{
5190 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5191
5192 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005193 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5194 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005195
5196 /* process remaining reset events */
5197 igb_vf_reset(adapter, vf);
5198}
5199
5200static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005201{
5202 struct e1000_hw *hw = &adapter->hw;
5203 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005204 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005205 u32 reg, msgbuf[3];
5206 u8 *addr = (u8 *)(&msgbuf[1]);
5207
5208 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005209 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005210
5211 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005212 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005213
5214 /* enable transmit and receive for vf */
5215 reg = rd32(E1000_VFTE);
5216 wr32(E1000_VFTE, reg | (1 << vf));
5217 reg = rd32(E1000_VFRE);
5218 wr32(E1000_VFRE, reg | (1 << vf));
5219
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005220 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005221
5222 /* reply to reset with ack and vf mac address */
5223 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5224 memcpy(addr, vf_mac, 6);
5225 igb_write_mbx(hw, msgbuf, 3, vf);
5226}
5227
5228static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5229{
Greg Rosede42edd2010-07-01 13:39:23 +00005230 /*
5231 * The VF MAC Address is stored in a packed array of bytes
5232 * starting at the second 32 bit word of the msg array
5233 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005234 unsigned char *addr = (char *)&msg[1];
5235 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005236
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005237 if (is_valid_ether_addr(addr))
5238 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005239
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005240 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005241}
5242
5243static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5244{
5245 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005246 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005247 u32 msg = E1000_VT_MSGTYPE_NACK;
5248
5249 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005250 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5251 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005252 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005253 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005254 }
5255}
5256
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005257static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005258{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005259 struct pci_dev *pdev = adapter->pdev;
5260 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005261 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005262 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005263 s32 retval;
5264
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005265 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005266
Alexander Duyckfef45f42009-12-11 22:57:34 -08005267 if (retval) {
5268 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005269 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005270 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5271 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5272 return;
5273 goto out;
5274 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005275
5276 /* this is a message we already processed, do nothing */
5277 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005278 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005279
5280 /*
5281 * until the vf completes a reset it should not be
5282 * allowed to start any configuration.
5283 */
5284
5285 if (msgbuf[0] == E1000_VF_RESET) {
5286 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005287 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005288 }
5289
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005290 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005291 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5292 return;
5293 retval = -1;
5294 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005295 }
5296
5297 switch ((msgbuf[0] & 0xFFFF)) {
5298 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005299 retval = -EINVAL;
5300 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5301 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5302 else
5303 dev_warn(&pdev->dev,
5304 "VF %d attempted to override administratively "
5305 "set MAC address\nReload the VF driver to "
5306 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005307 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005308 case E1000_VF_SET_PROMISC:
5309 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5310 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005311 case E1000_VF_SET_MULTICAST:
5312 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5313 break;
5314 case E1000_VF_SET_LPE:
5315 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5316 break;
5317 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005318 retval = -1;
5319 if (vf_data->pf_vlan)
5320 dev_warn(&pdev->dev,
5321 "VF %d attempted to override administratively "
5322 "set VLAN tag\nReload the VF driver to "
5323 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005324 else
5325 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005326 break;
5327 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005328 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005329 retval = -1;
5330 break;
5331 }
5332
Alexander Duyckfef45f42009-12-11 22:57:34 -08005333 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5334out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005335 /* notify the VF of the results of what it sent us */
5336 if (retval)
5337 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5338 else
5339 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5340
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005341 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005342}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005343
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005344static void igb_msg_task(struct igb_adapter *adapter)
5345{
5346 struct e1000_hw *hw = &adapter->hw;
5347 u32 vf;
5348
5349 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5350 /* process any reset requests */
5351 if (!igb_check_for_rst(hw, vf))
5352 igb_vf_reset_event(adapter, vf);
5353
5354 /* process any messages pending */
5355 if (!igb_check_for_msg(hw, vf))
5356 igb_rcv_msg_from_vf(adapter, vf);
5357
5358 /* process any acks */
5359 if (!igb_check_for_ack(hw, vf))
5360 igb_rcv_ack_from_vf(adapter, vf);
5361 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005362}
5363
Auke Kok9d5c8242008-01-24 02:22:38 -08005364/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005365 * igb_set_uta - Set unicast filter table address
5366 * @adapter: board private structure
5367 *
5368 * The unicast table address is a register array of 32-bit registers.
5369 * The table is meant to be used in a way similar to how the MTA is used
5370 * however due to certain limitations in the hardware it is necessary to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005371 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5372 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
Alexander Duyck68d480c2009-10-05 06:33:08 +00005373 **/
5374static void igb_set_uta(struct igb_adapter *adapter)
5375{
5376 struct e1000_hw *hw = &adapter->hw;
5377 int i;
5378
5379 /* The UTA table only exists on 82576 hardware and newer */
5380 if (hw->mac.type < e1000_82576)
5381 return;
5382
5383 /* we only need to do this if VMDq is enabled */
5384 if (!adapter->vfs_allocated_count)
5385 return;
5386
5387 for (i = 0; i < hw->mac.uta_reg_count; i++)
5388 array_wr32(E1000_UTA, i, ~0);
5389}
5390
5391/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005392 * igb_intr_msi - Interrupt Handler
5393 * @irq: interrupt number
5394 * @data: pointer to a network interface device structure
5395 **/
5396static irqreturn_t igb_intr_msi(int irq, void *data)
5397{
Alexander Duyck047e0032009-10-27 15:49:27 +00005398 struct igb_adapter *adapter = data;
5399 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005400 struct e1000_hw *hw = &adapter->hw;
5401 /* read ICR disables interrupts using IAM */
5402 u32 icr = rd32(E1000_ICR);
5403
Alexander Duyck047e0032009-10-27 15:49:27 +00005404 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005405
Alexander Duyck7f081d42010-01-07 17:41:00 +00005406 if (icr & E1000_ICR_DRSTA)
5407 schedule_work(&adapter->reset_task);
5408
Alexander Duyck047e0032009-10-27 15:49:27 +00005409 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005410 /* HW is reporting DMA is out of sync */
5411 adapter->stats.doosync++;
5412 }
5413
Auke Kok9d5c8242008-01-24 02:22:38 -08005414 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5415 hw->mac.get_link_status = 1;
5416 if (!test_bit(__IGB_DOWN, &adapter->state))
5417 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5418 }
5419
Alexander Duyck047e0032009-10-27 15:49:27 +00005420 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005421
5422 return IRQ_HANDLED;
5423}
5424
5425/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005426 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005427 * @irq: interrupt number
5428 * @data: pointer to a network interface device structure
5429 **/
5430static irqreturn_t igb_intr(int irq, void *data)
5431{
Alexander Duyck047e0032009-10-27 15:49:27 +00005432 struct igb_adapter *adapter = data;
5433 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005434 struct e1000_hw *hw = &adapter->hw;
5435 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5436 * need for the IMC write */
5437 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005438 if (!icr)
5439 return IRQ_NONE; /* Not our interrupt */
5440
Alexander Duyck047e0032009-10-27 15:49:27 +00005441 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005442
5443 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5444 * not set, then the adapter didn't send an interrupt */
5445 if (!(icr & E1000_ICR_INT_ASSERTED))
5446 return IRQ_NONE;
5447
Alexander Duyck7f081d42010-01-07 17:41:00 +00005448 if (icr & E1000_ICR_DRSTA)
5449 schedule_work(&adapter->reset_task);
5450
Alexander Duyck047e0032009-10-27 15:49:27 +00005451 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005452 /* HW is reporting DMA is out of sync */
5453 adapter->stats.doosync++;
5454 }
5455
Auke Kok9d5c8242008-01-24 02:22:38 -08005456 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5457 hw->mac.get_link_status = 1;
5458 /* guard against interrupt when we're going down */
5459 if (!test_bit(__IGB_DOWN, &adapter->state))
5460 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5461 }
5462
Alexander Duyck047e0032009-10-27 15:49:27 +00005463 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005464
5465 return IRQ_HANDLED;
5466}
5467
Alexander Duyck047e0032009-10-27 15:49:27 +00005468static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005469{
Alexander Duyck047e0032009-10-27 15:49:27 +00005470 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005471 struct e1000_hw *hw = &adapter->hw;
5472
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00005473 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5474 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005475 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08005476 igb_set_itr(adapter);
5477 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005478 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005479 }
5480
5481 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5482 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005483 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005484 else
5485 igb_irq_enable(adapter);
5486 }
5487}
5488
Auke Kok9d5c8242008-01-24 02:22:38 -08005489/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005490 * igb_poll - NAPI Rx polling callback
5491 * @napi: napi polling structure
5492 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005493 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005494static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005495{
Alexander Duyck047e0032009-10-27 15:49:27 +00005496 struct igb_q_vector *q_vector = container_of(napi,
5497 struct igb_q_vector,
5498 napi);
Alexander Duyck16eb8812011-08-26 07:43:54 +00005499 bool clean_complete = true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005500
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005501#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005502 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5503 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005504#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00005505 if (q_vector->tx_ring)
Alexander Duyck13fde972011-10-05 13:35:24 +00005506 clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005507
Alexander Duyck047e0032009-10-27 15:49:27 +00005508 if (q_vector->rx_ring)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005509 clean_complete &= igb_clean_rx_irq(q_vector, budget);
Alexander Duyck047e0032009-10-27 15:49:27 +00005510
Alexander Duyck16eb8812011-08-26 07:43:54 +00005511 /* If all work not completed, return budget and keep polling */
5512 if (!clean_complete)
5513 return budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005514
Alexander Duyck46544252009-02-19 20:39:04 -08005515 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck16eb8812011-08-26 07:43:54 +00005516 napi_complete(napi);
5517 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005518
Alexander Duyck16eb8812011-08-26 07:43:54 +00005519 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005520}
Al Viro6d8126f2008-03-16 22:23:24 +00005521
Auke Kok9d5c8242008-01-24 02:22:38 -08005522/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005523 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005524 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005525 * @shhwtstamps: timestamp structure to update
5526 * @regval: unsigned 64bit system time value.
5527 *
5528 * We need to convert the system time value stored in the RX/TXSTMP registers
5529 * into a hwtstamp which can be used by the upper level timestamping functions
5530 */
5531static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5532 struct skb_shared_hwtstamps *shhwtstamps,
5533 u64 regval)
5534{
5535 u64 ns;
5536
Alexander Duyck55cac242009-11-19 12:42:21 +00005537 /*
5538 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5539 * 24 to match clock shift we setup earlier.
5540 */
5541 if (adapter->hw.mac.type == e1000_82580)
5542 regval <<= IGB_82580_TSYNC_SHIFT;
5543
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005544 ns = timecounter_cyc2time(&adapter->clock, regval);
5545 timecompare_update(&adapter->compare, ns);
5546 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5547 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5548 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5549}
5550
5551/**
5552 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5553 * @q_vector: pointer to q_vector containing needed info
Nick Nunley28739572010-05-04 21:58:07 +00005554 * @buffer: pointer to igb_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005555 *
5556 * If we were asked to do hardware stamping and such a time stamp is
5557 * available, then it must have been for this skb here because we only
5558 * allow only one such packet into the queue.
5559 */
Nick Nunley28739572010-05-04 21:58:07 +00005560static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005561{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005562 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005563 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005564 struct skb_shared_hwtstamps shhwtstamps;
5565 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005566
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005567 /* if skb does not support hw timestamp or TX stamp not valid exit */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005568 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005569 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5570 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005571
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005572 regval = rd32(E1000_TXSTMPL);
5573 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5574
5575 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005576 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005577}
5578
5579/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005580 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005581 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005582 * returns true if ring is completely cleaned
5583 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005584static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005585{
Alexander Duyck047e0032009-10-27 15:49:27 +00005586 struct igb_adapter *adapter = q_vector->adapter;
5587 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duyck13fde972011-10-05 13:35:24 +00005588 struct igb_buffer *tx_buffer;
5589 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005590 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck13fde972011-10-05 13:35:24 +00005591 unsigned int budget = q_vector->tx_work_limit;
5592 u16 i = tx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005593
Alexander Duyck13fde972011-10-05 13:35:24 +00005594 if (test_bit(__IGB_DOWN, &adapter->state))
5595 return true;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005596
Alexander Duyck13fde972011-10-05 13:35:24 +00005597 tx_buffer = &tx_ring->buffer_info[i];
5598 tx_desc = IGB_TX_DESC(tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005599
Alexander Duyck13fde972011-10-05 13:35:24 +00005600 for (; budget; budget--) {
5601 u16 eop = tx_buffer->next_to_watch;
5602 union e1000_adv_tx_desc *eop_desc;
5603
5604 eop_desc = IGB_TX_DESC(tx_ring, eop);
5605
5606 /* if DD is not set pending work has not been completed */
5607 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5608 break;
5609
5610 /* prevent any other reads prior to eop_desc being verified */
5611 rmb();
5612
5613 do {
5614 tx_desc->wb.status = 0;
5615 if (likely(tx_desc == eop_desc)) {
5616 eop_desc = NULL;
5617
5618 total_bytes += tx_buffer->bytecount;
5619 total_packets += tx_buffer->gso_segs;
5620 igb_tx_hwtstamp(q_vector, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005621 }
5622
Alexander Duyck13fde972011-10-05 13:35:24 +00005623 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
Auke Kok9d5c8242008-01-24 02:22:38 -08005624
Alexander Duyck13fde972011-10-05 13:35:24 +00005625 tx_buffer++;
5626 tx_desc++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005627 i++;
Alexander Duyck13fde972011-10-05 13:35:24 +00005628 if (unlikely(i == tx_ring->count)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005629 i = 0;
Alexander Duyck13fde972011-10-05 13:35:24 +00005630 tx_buffer = tx_ring->buffer_info;
5631 tx_desc = IGB_TX_DESC(tx_ring, 0);
5632 }
5633 } while (eop_desc);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005634 }
5635
Auke Kok9d5c8242008-01-24 02:22:38 -08005636 tx_ring->next_to_clean = i;
Alexander Duyck13fde972011-10-05 13:35:24 +00005637 u64_stats_update_begin(&tx_ring->tx_syncp);
5638 tx_ring->tx_stats.bytes += total_bytes;
5639 tx_ring->tx_stats.packets += total_packets;
5640 u64_stats_update_end(&tx_ring->tx_syncp);
5641 tx_ring->total_bytes += total_bytes;
5642 tx_ring->total_packets += total_packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08005643
5644 if (tx_ring->detect_tx_hung) {
Alexander Duyck13fde972011-10-05 13:35:24 +00005645 struct e1000_hw *hw = &adapter->hw;
5646 u16 eop = tx_ring->buffer_info[i].next_to_watch;
5647 union e1000_adv_tx_desc *eop_desc;
5648
5649 eop_desc = IGB_TX_DESC(tx_ring, eop);
5650
Auke Kok9d5c8242008-01-24 02:22:38 -08005651 /* Detect a transmit hang in hardware, this serializes the
5652 * check with the clearing of time_stamp and movement of i */
5653 tx_ring->detect_tx_hung = false;
5654 if (tx_ring->buffer_info[i].time_stamp &&
5655 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005656 (adapter->tx_timeout_factor * HZ)) &&
5657 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005658
Auke Kok9d5c8242008-01-24 02:22:38 -08005659 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005660 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005661 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005662 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005663 " TDH <%x>\n"
5664 " TDT <%x>\n"
5665 " next_to_use <%x>\n"
5666 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005667 "buffer_info[next_to_clean]\n"
5668 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005669 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005670 " jiffies <%lx>\n"
5671 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005672 tx_ring->queue_index,
Alexander Duyck238ac812011-08-26 07:43:48 +00005673 rd32(E1000_TDH(tx_ring->reg_idx)),
Alexander Duyckfce99e32009-10-27 15:51:27 +00005674 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005675 tx_ring->next_to_use,
5676 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005677 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005678 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005679 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005680 eop_desc->wb.status);
Alexander Duyck13fde972011-10-05 13:35:24 +00005681 netif_stop_subqueue(tx_ring->netdev,
5682 tx_ring->queue_index);
5683
5684 /* we are about to reset, no point in enabling stuff */
5685 return true;
Auke Kok9d5c8242008-01-24 02:22:38 -08005686 }
5687 }
Alexander Duyck13fde972011-10-05 13:35:24 +00005688
5689 if (unlikely(total_packets &&
5690 netif_carrier_ok(tx_ring->netdev) &&
5691 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5692 /* Make sure that anybody stopping the queue after this
5693 * sees the new next_to_clean.
5694 */
5695 smp_mb();
5696 if (__netif_subqueue_stopped(tx_ring->netdev,
5697 tx_ring->queue_index) &&
5698 !(test_bit(__IGB_DOWN, &adapter->state))) {
5699 netif_wake_subqueue(tx_ring->netdev,
5700 tx_ring->queue_index);
5701
5702 u64_stats_update_begin(&tx_ring->tx_syncp);
5703 tx_ring->tx_stats.restart_queue++;
5704 u64_stats_update_end(&tx_ring->tx_syncp);
5705 }
5706 }
5707
5708 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005709}
5710
Alexander Duyckcd392f52011-08-26 07:43:59 +00005711static inline void igb_rx_checksum(struct igb_ring *ring,
5712 u32 status_err, struct sk_buff *skb)
Auke Kok9d5c8242008-01-24 02:22:38 -08005713{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005714 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005715
5716 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005717 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5718 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005719 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005720
Auke Kok9d5c8242008-01-24 02:22:38 -08005721 /* TCP/UDP checksum error bit is set */
5722 if (status_err &
5723 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005724 /*
5725 * work around errata with sctp packets where the TCPE aka
5726 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5727 * packets, (aka let the stack check the crc32c)
5728 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005729 if ((skb->len == 60) &&
Eric Dumazet12dcd862010-10-15 17:27:10 +00005730 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5731 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005732 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005733 u64_stats_update_end(&ring->rx_syncp);
5734 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005735 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005736 return;
5737 }
5738 /* It must be a TCP or UDP packet with a valid checksum */
5739 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5740 skb->ip_summed = CHECKSUM_UNNECESSARY;
5741
Alexander Duyck59d71982010-04-27 13:09:25 +00005742 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005743}
5744
Nick Nunley757b77e2010-03-26 11:36:47 +00005745static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005746 struct sk_buff *skb)
5747{
5748 struct igb_adapter *adapter = q_vector->adapter;
5749 struct e1000_hw *hw = &adapter->hw;
5750 u64 regval;
5751
5752 /*
5753 * If this bit is set, then the RX registers contain the time stamp. No
5754 * other packet will be time stamped until we read these registers, so
5755 * read the registers to make them available again. Because only one
5756 * packet can be time stamped at a time, we know that the register
5757 * values must belong to this one here and therefore we don't need to
5758 * compare any of the additional attributes stored for it.
5759 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005760 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005761 * can turn into a skb_shared_hwtstamps.
5762 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005763 if (staterr & E1000_RXDADV_STAT_TSIP) {
5764 u32 *stamp = (u32 *)skb->data;
5765 regval = le32_to_cpu(*(stamp + 2));
5766 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5767 skb_pull(skb, IGB_TS_HDR_LEN);
5768 } else {
5769 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5770 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005771
Nick Nunley757b77e2010-03-26 11:36:47 +00005772 regval = rd32(E1000_RXSTMPL);
5773 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5774 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005775
5776 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5777}
Alexander Duyck44390ca2011-08-26 07:43:38 +00005778static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005779{
5780 /* HW will not DMA in data larger than the given buffer, even if it
5781 * parses the (NFS, of course) header to be larger. In that case, it
5782 * fills the header buffer and spills the rest into the page.
5783 */
5784 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5785 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck44390ca2011-08-26 07:43:38 +00005786 if (hlen > IGB_RX_HDR_LEN)
5787 hlen = IGB_RX_HDR_LEN;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005788 return hlen;
5789}
5790
Alexander Duyckcd392f52011-08-26 07:43:59 +00005791static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005792{
Alexander Duyck047e0032009-10-27 15:49:27 +00005793 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005794 union e1000_adv_rx_desc *rx_desc;
5795 const int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005796 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005797 u32 staterr;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005798 u16 cleaned_count = igb_desc_unused(rx_ring);
5799 u16 i = rx_ring->next_to_clean;
Auke Kok9d5c8242008-01-24 02:22:38 -08005800
Alexander Duyck601369062011-08-26 07:44:05 +00005801 rx_desc = IGB_RX_DESC(rx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005802 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5803
5804 while (staterr & E1000_RXD_STAT_DD) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005805 struct igb_buffer *buffer_info = &rx_ring->buffer_info[i];
5806 struct sk_buff *skb = buffer_info->skb;
5807 union e1000_adv_rx_desc *next_rxd;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005808
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005809 buffer_info->skb = NULL;
Alexander Duyck16eb8812011-08-26 07:43:54 +00005810 prefetch(skb->data);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005811
5812 i++;
5813 if (i == rx_ring->count)
5814 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005815
Alexander Duyck601369062011-08-26 07:44:05 +00005816 next_rxd = IGB_RX_DESC(rx_ring, i);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005817 prefetch(next_rxd);
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005818
Alexander Duyck16eb8812011-08-26 07:43:54 +00005819 /*
5820 * This memory barrier is needed to keep us from reading
5821 * any other fields out of the rx_desc until we know the
5822 * RXD_STAT_DD bit is set
5823 */
5824 rmb();
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005825
Alexander Duyck16eb8812011-08-26 07:43:54 +00005826 if (!skb_is_nonlinear(skb)) {
5827 __skb_put(skb, igb_get_hlen(rx_desc));
5828 dma_unmap_single(rx_ring->dev, buffer_info->dma,
Alexander Duyck44390ca2011-08-26 07:43:38 +00005829 IGB_RX_HDR_LEN,
Alexander Duyck59d71982010-04-27 13:09:25 +00005830 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005831 buffer_info->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005832 }
5833
Alexander Duyck16eb8812011-08-26 07:43:54 +00005834 if (rx_desc->wb.upper.length) {
5835 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005836
Koki Sanagiaa913402010-04-27 01:01:19 +00005837 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005838 buffer_info->page,
5839 buffer_info->page_offset,
5840 length);
5841
Alexander Duyck16eb8812011-08-26 07:43:54 +00005842 skb->len += length;
5843 skb->data_len += length;
5844 skb->truesize += length;
5845
Alexander Duyckd1eff352009-11-12 18:38:35 +00005846 if ((page_count(buffer_info->page) != 1) ||
5847 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005848 buffer_info->page = NULL;
5849 else
5850 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005851
Alexander Duyck16eb8812011-08-26 07:43:54 +00005852 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5853 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5854 buffer_info->page_dma = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005855 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005856
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005857 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005858 struct igb_buffer *next_buffer;
5859 next_buffer = &rx_ring->buffer_info[i];
Alexander Duyckb2d56532008-11-20 00:47:34 -08005860 buffer_info->skb = next_buffer->skb;
5861 buffer_info->dma = next_buffer->dma;
5862 next_buffer->skb = skb;
5863 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005864 goto next_desc;
5865 }
Alexander Duyck44390ca2011-08-26 07:43:38 +00005866
Auke Kok9d5c8242008-01-24 02:22:38 -08005867 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
Alexander Duyck16eb8812011-08-26 07:43:54 +00005868 dev_kfree_skb_any(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005869 goto next_desc;
5870 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005871
Nick Nunley757b77e2010-03-26 11:36:47 +00005872 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5873 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005874 total_bytes += skb->len;
5875 total_packets++;
5876
Alexander Duyckcd392f52011-08-26 07:43:59 +00005877 igb_rx_checksum(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005878
Alexander Duyck16eb8812011-08-26 07:43:54 +00005879 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08005880
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005881 if (staterr & E1000_RXD_STAT_VP) {
5882 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
Alexander Duyck047e0032009-10-27 15:49:27 +00005883
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00005884 __vlan_hwaccel_put_tag(skb, vid);
5885 }
5886 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005887
Alexander Duyck16eb8812011-08-26 07:43:54 +00005888 budget--;
Auke Kok9d5c8242008-01-24 02:22:38 -08005889next_desc:
Alexander Duyck16eb8812011-08-26 07:43:54 +00005890 if (!budget)
5891 break;
5892
5893 cleaned_count++;
Auke Kok9d5c8242008-01-24 02:22:38 -08005894 /* return some buffers to hardware, one at a time is too slow */
5895 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Alexander Duyckcd392f52011-08-26 07:43:59 +00005896 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005897 cleaned_count = 0;
5898 }
5899
5900 /* use prefetched values */
5901 rx_desc = next_rxd;
Auke Kok9d5c8242008-01-24 02:22:38 -08005902 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5903 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005904
Auke Kok9d5c8242008-01-24 02:22:38 -08005905 rx_ring->next_to_clean = i;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005906 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005907 rx_ring->rx_stats.packets += total_packets;
5908 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005909 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005910 rx_ring->total_packets += total_packets;
5911 rx_ring->total_bytes += total_bytes;
5912
5913 if (cleaned_count)
Alexander Duyckcd392f52011-08-26 07:43:59 +00005914 igb_alloc_rx_buffers(rx_ring, cleaned_count);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005915
Alexander Duyck16eb8812011-08-26 07:43:54 +00005916 return !!budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005917}
5918
Alexander Duyckc023cd82011-08-26 07:43:43 +00005919static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
5920 struct igb_buffer *bi)
5921{
5922 struct sk_buff *skb = bi->skb;
5923 dma_addr_t dma = bi->dma;
5924
5925 if (dma)
5926 return true;
5927
5928 if (likely(!skb)) {
5929 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5930 IGB_RX_HDR_LEN);
5931 bi->skb = skb;
5932 if (!skb) {
5933 rx_ring->rx_stats.alloc_failed++;
5934 return false;
5935 }
5936
5937 /* initialize skb for ring */
5938 skb_record_rx_queue(skb, rx_ring->queue_index);
5939 }
5940
5941 dma = dma_map_single(rx_ring->dev, skb->data,
5942 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
5943
5944 if (dma_mapping_error(rx_ring->dev, dma)) {
5945 rx_ring->rx_stats.alloc_failed++;
5946 return false;
5947 }
5948
5949 bi->dma = dma;
5950 return true;
5951}
5952
5953static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
5954 struct igb_buffer *bi)
5955{
5956 struct page *page = bi->page;
5957 dma_addr_t page_dma = bi->page_dma;
5958 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
5959
5960 if (page_dma)
5961 return true;
5962
5963 if (!page) {
5964 page = netdev_alloc_page(rx_ring->netdev);
5965 bi->page = page;
5966 if (unlikely(!page)) {
5967 rx_ring->rx_stats.alloc_failed++;
5968 return false;
5969 }
5970 }
5971
5972 page_dma = dma_map_page(rx_ring->dev, page,
5973 page_offset, PAGE_SIZE / 2,
5974 DMA_FROM_DEVICE);
5975
5976 if (dma_mapping_error(rx_ring->dev, page_dma)) {
5977 rx_ring->rx_stats.alloc_failed++;
5978 return false;
5979 }
5980
5981 bi->page_dma = page_dma;
5982 bi->page_offset = page_offset;
5983 return true;
5984}
5985
Auke Kok9d5c8242008-01-24 02:22:38 -08005986/**
Alexander Duyckcd392f52011-08-26 07:43:59 +00005987 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
Auke Kok9d5c8242008-01-24 02:22:38 -08005988 * @adapter: address of board private structure
5989 **/
Alexander Duyckcd392f52011-08-26 07:43:59 +00005990void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005991{
Auke Kok9d5c8242008-01-24 02:22:38 -08005992 union e1000_adv_rx_desc *rx_desc;
Alexander Duyckc023cd82011-08-26 07:43:43 +00005993 struct igb_buffer *bi;
5994 u16 i = rx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08005995
Alexander Duyck601369062011-08-26 07:44:05 +00005996 rx_desc = IGB_RX_DESC(rx_ring, i);
Alexander Duyckc023cd82011-08-26 07:43:43 +00005997 bi = &rx_ring->buffer_info[i];
5998 i -= rx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005999
6000 while (cleaned_count--) {
Alexander Duyckc023cd82011-08-26 07:43:43 +00006001 if (!igb_alloc_mapped_skb(rx_ring, bi))
6002 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006003
Alexander Duyckc023cd82011-08-26 07:43:43 +00006004 /* Refresh the desc even if buffer_addrs didn't change
6005 * because each write-back erases this info. */
6006 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006007
Alexander Duyckc023cd82011-08-26 07:43:43 +00006008 if (!igb_alloc_mapped_page(rx_ring, bi))
6009 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08006010
Alexander Duyckc023cd82011-08-26 07:43:43 +00006011 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08006012
Alexander Duyckc023cd82011-08-26 07:43:43 +00006013 rx_desc++;
6014 bi++;
Auke Kok9d5c8242008-01-24 02:22:38 -08006015 i++;
Alexander Duyckc023cd82011-08-26 07:43:43 +00006016 if (unlikely(!i)) {
Alexander Duyck601369062011-08-26 07:44:05 +00006017 rx_desc = IGB_RX_DESC(rx_ring, 0);
Alexander Duyckc023cd82011-08-26 07:43:43 +00006018 bi = rx_ring->buffer_info;
6019 i -= rx_ring->count;
6020 }
6021
6022 /* clear the hdr_addr for the next_to_use descriptor */
6023 rx_desc->read.hdr_addr = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08006024 }
6025
Alexander Duyckc023cd82011-08-26 07:43:43 +00006026 i += rx_ring->count;
6027
Auke Kok9d5c8242008-01-24 02:22:38 -08006028 if (rx_ring->next_to_use != i) {
6029 rx_ring->next_to_use = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006030
6031 /* Force memory writes to complete before letting h/w
6032 * know there are new descriptors to fetch. (Only
6033 * applicable for weak-ordered memory model archs,
6034 * such as IA-64). */
6035 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00006036 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08006037 }
6038}
6039
6040/**
6041 * igb_mii_ioctl -
6042 * @netdev:
6043 * @ifreq:
6044 * @cmd:
6045 **/
6046static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6047{
6048 struct igb_adapter *adapter = netdev_priv(netdev);
6049 struct mii_ioctl_data *data = if_mii(ifr);
6050
6051 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6052 return -EOPNOTSUPP;
6053
6054 switch (cmd) {
6055 case SIOCGMIIPHY:
6056 data->phy_id = adapter->hw.phy.addr;
6057 break;
6058 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08006059 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6060 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006061 return -EIO;
6062 break;
6063 case SIOCSMIIREG:
6064 default:
6065 return -EOPNOTSUPP;
6066 }
6067 return 0;
6068}
6069
6070/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006071 * igb_hwtstamp_ioctl - control hardware time stamping
6072 * @netdev:
6073 * @ifreq:
6074 * @cmd:
6075 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006076 * Outgoing time stamping can be enabled and disabled. Play nice and
6077 * disable it when requested, although it shouldn't case any overhead
6078 * when no packet needs it. At most one packet in the queue may be
6079 * marked for time stamping, otherwise it would be impossible to tell
6080 * for sure to which packet the hardware time stamp belongs.
6081 *
6082 * Incoming time stamping has to be configured via the hardware
6083 * filters. Not all combinations are supported, in particular event
6084 * type has to be specified. Matching the kind of event packet is
6085 * not supported, with the exception of "all V2 events regardless of
6086 * level 2 or 4".
6087 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006088 **/
6089static int igb_hwtstamp_ioctl(struct net_device *netdev,
6090 struct ifreq *ifr, int cmd)
6091{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006092 struct igb_adapter *adapter = netdev_priv(netdev);
6093 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006094 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006095 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6096 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006097 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006098 bool is_l4 = false;
6099 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006100 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006101
6102 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6103 return -EFAULT;
6104
6105 /* reserved for future extensions */
6106 if (config.flags)
6107 return -EINVAL;
6108
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006109 switch (config.tx_type) {
6110 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006111 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006112 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006113 break;
6114 default:
6115 return -ERANGE;
6116 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006117
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006118 switch (config.rx_filter) {
6119 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006120 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006121 break;
6122 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6123 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6124 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6125 case HWTSTAMP_FILTER_ALL:
6126 /*
6127 * register TSYNCRXCFG must be set, therefore it is not
6128 * possible to time stamp both Sync and Delay_Req messages
6129 * => fall back to time stamping all packets
6130 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006131 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006132 config.rx_filter = HWTSTAMP_FILTER_ALL;
6133 break;
6134 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006135 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006136 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006137 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006138 break;
6139 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006140 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006141 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006142 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006143 break;
6144 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006146 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006147 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006148 is_l2 = true;
6149 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006150 config.rx_filter = HWTSTAMP_FILTER_SOME;
6151 break;
6152 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6153 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006154 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006155 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006156 is_l2 = true;
6157 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006158 config.rx_filter = HWTSTAMP_FILTER_SOME;
6159 break;
6160 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6161 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6162 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006163 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006164 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006165 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006166 break;
6167 default:
6168 return -ERANGE;
6169 }
6170
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006171 if (hw->mac.type == e1000_82575) {
6172 if (tsync_rx_ctl | tsync_tx_ctl)
6173 return -EINVAL;
6174 return 0;
6175 }
6176
Nick Nunley757b77e2010-03-26 11:36:47 +00006177 /*
6178 * Per-packet timestamping only works if all packets are
6179 * timestamped, so enable timestamping in all packets as
6180 * long as one rx filter was configured.
6181 */
6182 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6183 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6184 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6185 }
6186
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006187 /* enable/disable TX */
6188 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006189 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6190 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006191 wr32(E1000_TSYNCTXCTL, regval);
6192
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006193 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006194 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006195 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6196 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006197 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006198
6199 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006200 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6201
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006202 /* define ethertype filter for timestamped packets */
6203 if (is_l2)
6204 wr32(E1000_ETQF(3),
6205 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6206 E1000_ETQF_1588 | /* enable timestamping */
6207 ETH_P_1588)); /* 1588 eth protocol type */
6208 else
6209 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006210
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006211#define PTP_PORT 319
6212 /* L4 Queue Filter[3]: filter by destination port and protocol */
6213 if (is_l4) {
6214 u32 ftqf = (IPPROTO_UDP /* UDP */
6215 | E1000_FTQF_VF_BP /* VF not compared */
6216 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6217 | E1000_FTQF_MASK); /* mask all inputs */
6218 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006219
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006220 wr32(E1000_IMIR(3), htons(PTP_PORT));
6221 wr32(E1000_IMIREXT(3),
6222 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6223 if (hw->mac.type == e1000_82576) {
6224 /* enable source port check */
6225 wr32(E1000_SPQF(3), htons(PTP_PORT));
6226 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6227 }
6228 wr32(E1000_FTQF(3), ftqf);
6229 } else {
6230 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6231 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006232 wrfl();
6233
6234 adapter->hwtstamp_config = config;
6235
6236 /* clear TX/RX time stamp registers, just to be sure */
6237 regval = rd32(E1000_TXSTMPH);
6238 regval = rd32(E1000_RXSTMPH);
6239
6240 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6241 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006242}
6243
6244/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006245 * igb_ioctl -
6246 * @netdev:
6247 * @ifreq:
6248 * @cmd:
6249 **/
6250static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6251{
6252 switch (cmd) {
6253 case SIOCGMIIPHY:
6254 case SIOCGMIIREG:
6255 case SIOCSMIIREG:
6256 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006257 case SIOCSHWTSTAMP:
6258 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006259 default:
6260 return -EOPNOTSUPP;
6261 }
6262}
6263
Alexander Duyck009bc062009-07-23 18:08:35 +00006264s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6265{
6266 struct igb_adapter *adapter = hw->back;
6267 u16 cap_offset;
6268
Jon Masonbdaae042011-06-27 07:44:01 +00006269 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006270 if (!cap_offset)
6271 return -E1000_ERR_CONFIG;
6272
6273 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6274
6275 return 0;
6276}
6277
6278s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6279{
6280 struct igb_adapter *adapter = hw->back;
6281 u16 cap_offset;
6282
Jon Masonbdaae042011-06-27 07:44:01 +00006283 cap_offset = adapter->pdev->pcie_cap;
Alexander Duyck009bc062009-07-23 18:08:35 +00006284 if (!cap_offset)
6285 return -E1000_ERR_CONFIG;
6286
6287 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6288
6289 return 0;
6290}
6291
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006292static void igb_vlan_mode(struct net_device *netdev, u32 features)
Auke Kok9d5c8242008-01-24 02:22:38 -08006293{
6294 struct igb_adapter *adapter = netdev_priv(netdev);
6295 struct e1000_hw *hw = &adapter->hw;
6296 u32 ctrl, rctl;
6297
6298 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006299
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006300 if (features & NETIF_F_HW_VLAN_RX) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006301 /* enable VLAN tag insert/strip */
6302 ctrl = rd32(E1000_CTRL);
6303 ctrl |= E1000_CTRL_VME;
6304 wr32(E1000_CTRL, ctrl);
6305
Alexander Duyck51466232009-10-27 23:47:35 +00006306 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006307 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006308 rctl &= ~E1000_RCTL_CFIEN;
6309 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006310 } else {
6311 /* disable VLAN tag insert/strip */
6312 ctrl = rd32(E1000_CTRL);
6313 ctrl &= ~E1000_CTRL_VME;
6314 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006315 }
6316
Alexander Duycke1739522009-02-19 20:39:44 -08006317 igb_rlpml_set(adapter);
6318
Auke Kok9d5c8242008-01-24 02:22:38 -08006319 if (!test_bit(__IGB_DOWN, &adapter->state))
6320 igb_irq_enable(adapter);
6321}
6322
6323static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6324{
6325 struct igb_adapter *adapter = netdev_priv(netdev);
6326 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006327 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006328
Alexander Duyck51466232009-10-27 23:47:35 +00006329 /* attempt to add filter to vlvf array */
6330 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006331
Alexander Duyck51466232009-10-27 23:47:35 +00006332 /* add the filter since PF can receive vlans w/o entry in vlvf */
6333 igb_vfta_set(hw, vid, true);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006334
6335 set_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006336}
6337
6338static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6339{
6340 struct igb_adapter *adapter = netdev_priv(netdev);
6341 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006342 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006343 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006344
6345 igb_irq_disable(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006346
6347 if (!test_bit(__IGB_DOWN, &adapter->state))
6348 igb_irq_enable(adapter);
6349
Alexander Duyck51466232009-10-27 23:47:35 +00006350 /* remove vlan from VLVF table array */
6351 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006352
Alexander Duyck51466232009-10-27 23:47:35 +00006353 /* if vid was not present in VLVF just remove it from table */
6354 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006355 igb_vfta_set(hw, vid, false);
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006356
6357 clear_bit(vid, adapter->active_vlans);
Auke Kok9d5c8242008-01-24 02:22:38 -08006358}
6359
6360static void igb_restore_vlan(struct igb_adapter *adapter)
6361{
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006362 u16 vid;
Auke Kok9d5c8242008-01-24 02:22:38 -08006363
Jiri Pirkob2cb09b2011-07-21 03:27:27 +00006364 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6365 igb_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9d5c8242008-01-24 02:22:38 -08006366}
6367
David Decotigny14ad2512011-04-27 18:32:43 +00006368int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
Auke Kok9d5c8242008-01-24 02:22:38 -08006369{
Alexander Duyck090b1792009-10-27 23:51:55 +00006370 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006371 struct e1000_mac_info *mac = &adapter->hw.mac;
6372
6373 mac->autoneg = 0;
6374
David Decotigny14ad2512011-04-27 18:32:43 +00006375 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6376 * for the switch() below to work */
6377 if ((spd & 1) || (dplx & ~1))
6378 goto err_inval;
6379
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006380 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6381 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
David Decotigny14ad2512011-04-27 18:32:43 +00006382 spd != SPEED_1000 &&
6383 dplx != DUPLEX_FULL)
6384 goto err_inval;
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006385
David Decotigny14ad2512011-04-27 18:32:43 +00006386 switch (spd + dplx) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006387 case SPEED_10 + DUPLEX_HALF:
6388 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6389 break;
6390 case SPEED_10 + DUPLEX_FULL:
6391 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6392 break;
6393 case SPEED_100 + DUPLEX_HALF:
6394 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6395 break;
6396 case SPEED_100 + DUPLEX_FULL:
6397 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6398 break;
6399 case SPEED_1000 + DUPLEX_FULL:
6400 mac->autoneg = 1;
6401 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6402 break;
6403 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6404 default:
David Decotigny14ad2512011-04-27 18:32:43 +00006405 goto err_inval;
Auke Kok9d5c8242008-01-24 02:22:38 -08006406 }
6407 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00006408
6409err_inval:
6410 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6411 return -EINVAL;
Auke Kok9d5c8242008-01-24 02:22:38 -08006412}
6413
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006414static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006415{
6416 struct net_device *netdev = pci_get_drvdata(pdev);
6417 struct igb_adapter *adapter = netdev_priv(netdev);
6418 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006419 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006420 u32 wufc = adapter->wol;
6421#ifdef CONFIG_PM
6422 int retval = 0;
6423#endif
6424
6425 netif_device_detach(netdev);
6426
Alexander Duycka88f10e2008-07-08 15:13:38 -07006427 if (netif_running(netdev))
6428 igb_close(netdev);
6429
Alexander Duyck047e0032009-10-27 15:49:27 +00006430 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006431
6432#ifdef CONFIG_PM
6433 retval = pci_save_state(pdev);
6434 if (retval)
6435 return retval;
6436#endif
6437
6438 status = rd32(E1000_STATUS);
6439 if (status & E1000_STATUS_LU)
6440 wufc &= ~E1000_WUFC_LNKC;
6441
6442 if (wufc) {
6443 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006444 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006445
6446 /* turn on all-multi mode if wake on multicast is enabled */
6447 if (wufc & E1000_WUFC_MC) {
6448 rctl = rd32(E1000_RCTL);
6449 rctl |= E1000_RCTL_MPE;
6450 wr32(E1000_RCTL, rctl);
6451 }
6452
6453 ctrl = rd32(E1000_CTRL);
6454 /* advertise wake from D3Cold */
6455 #define E1000_CTRL_ADVD3WUC 0x00100000
6456 /* phy power management enable */
6457 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6458 ctrl |= E1000_CTRL_ADVD3WUC;
6459 wr32(E1000_CTRL, ctrl);
6460
Auke Kok9d5c8242008-01-24 02:22:38 -08006461 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006462 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006463
6464 wr32(E1000_WUC, E1000_WUC_PME_EN);
6465 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006466 } else {
6467 wr32(E1000_WUC, 0);
6468 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006469 }
6470
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006471 *enable_wake = wufc || adapter->en_mng_pt;
6472 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006473 igb_power_down_link(adapter);
6474 else
6475 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006476
6477 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6478 * would have already happened in close and is redundant. */
6479 igb_release_hw_control(adapter);
6480
6481 pci_disable_device(pdev);
6482
Auke Kok9d5c8242008-01-24 02:22:38 -08006483 return 0;
6484}
6485
6486#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006487static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6488{
6489 int retval;
6490 bool wake;
6491
6492 retval = __igb_shutdown(pdev, &wake);
6493 if (retval)
6494 return retval;
6495
6496 if (wake) {
6497 pci_prepare_to_sleep(pdev);
6498 } else {
6499 pci_wake_from_d3(pdev, false);
6500 pci_set_power_state(pdev, PCI_D3hot);
6501 }
6502
6503 return 0;
6504}
6505
Auke Kok9d5c8242008-01-24 02:22:38 -08006506static int igb_resume(struct pci_dev *pdev)
6507{
6508 struct net_device *netdev = pci_get_drvdata(pdev);
6509 struct igb_adapter *adapter = netdev_priv(netdev);
6510 struct e1000_hw *hw = &adapter->hw;
6511 u32 err;
6512
6513 pci_set_power_state(pdev, PCI_D0);
6514 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006515 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006516
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006517 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006518 if (err) {
6519 dev_err(&pdev->dev,
6520 "igb: Cannot enable PCI device from suspend\n");
6521 return err;
6522 }
6523 pci_set_master(pdev);
6524
6525 pci_enable_wake(pdev, PCI_D3hot, 0);
6526 pci_enable_wake(pdev, PCI_D3cold, 0);
6527
Alexander Duyck047e0032009-10-27 15:49:27 +00006528 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006529 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6530 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006531 }
6532
Auke Kok9d5c8242008-01-24 02:22:38 -08006533 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006534
6535 /* let the f/w know that the h/w is now under the control of the
6536 * driver. */
6537 igb_get_hw_control(adapter);
6538
Auke Kok9d5c8242008-01-24 02:22:38 -08006539 wr32(E1000_WUS, ~0);
6540
Alexander Duycka88f10e2008-07-08 15:13:38 -07006541 if (netif_running(netdev)) {
6542 err = igb_open(netdev);
6543 if (err)
6544 return err;
6545 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006546
6547 netif_device_attach(netdev);
6548
Auke Kok9d5c8242008-01-24 02:22:38 -08006549 return 0;
6550}
6551#endif
6552
6553static void igb_shutdown(struct pci_dev *pdev)
6554{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006555 bool wake;
6556
6557 __igb_shutdown(pdev, &wake);
6558
6559 if (system_state == SYSTEM_POWER_OFF) {
6560 pci_wake_from_d3(pdev, wake);
6561 pci_set_power_state(pdev, PCI_D3hot);
6562 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006563}
6564
6565#ifdef CONFIG_NET_POLL_CONTROLLER
6566/*
6567 * Polling 'interrupt' - used by things like netconsole to send skbs
6568 * without having to re-enable interrupts. It's not called while
6569 * the interrupt routine is executing.
6570 */
6571static void igb_netpoll(struct net_device *netdev)
6572{
6573 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006574 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006575 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006576
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006577 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006578 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006579 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006580 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006581 return;
6582 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006583
Alexander Duyck047e0032009-10-27 15:49:27 +00006584 for (i = 0; i < adapter->num_q_vectors; i++) {
6585 struct igb_q_vector *q_vector = adapter->q_vector[i];
6586 wr32(E1000_EIMC, q_vector->eims_value);
6587 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006588 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006589}
6590#endif /* CONFIG_NET_POLL_CONTROLLER */
6591
6592/**
6593 * igb_io_error_detected - called when PCI error is detected
6594 * @pdev: Pointer to PCI device
6595 * @state: The current pci connection state
6596 *
6597 * This function is called after a PCI bus error affecting
6598 * this device has been detected.
6599 */
6600static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6601 pci_channel_state_t state)
6602{
6603 struct net_device *netdev = pci_get_drvdata(pdev);
6604 struct igb_adapter *adapter = netdev_priv(netdev);
6605
6606 netif_device_detach(netdev);
6607
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006608 if (state == pci_channel_io_perm_failure)
6609 return PCI_ERS_RESULT_DISCONNECT;
6610
Auke Kok9d5c8242008-01-24 02:22:38 -08006611 if (netif_running(netdev))
6612 igb_down(adapter);
6613 pci_disable_device(pdev);
6614
6615 /* Request a slot slot reset. */
6616 return PCI_ERS_RESULT_NEED_RESET;
6617}
6618
6619/**
6620 * igb_io_slot_reset - called after the pci bus has been reset.
6621 * @pdev: Pointer to PCI device
6622 *
6623 * Restart the card from scratch, as if from a cold-boot. Implementation
6624 * resembles the first-half of the igb_resume routine.
6625 */
6626static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6627{
6628 struct net_device *netdev = pci_get_drvdata(pdev);
6629 struct igb_adapter *adapter = netdev_priv(netdev);
6630 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006631 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006632 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006633
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006634 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006635 dev_err(&pdev->dev,
6636 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006637 result = PCI_ERS_RESULT_DISCONNECT;
6638 } else {
6639 pci_set_master(pdev);
6640 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006641 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006642
6643 pci_enable_wake(pdev, PCI_D3hot, 0);
6644 pci_enable_wake(pdev, PCI_D3cold, 0);
6645
6646 igb_reset(adapter);
6647 wr32(E1000_WUS, ~0);
6648 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006649 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006650
Jeff Kirsherea943d42008-12-11 20:34:19 -08006651 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6652 if (err) {
6653 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6654 "failed 0x%0x\n", err);
6655 /* non-fatal, continue */
6656 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006657
Alexander Duyck40a914f2008-11-27 00:24:37 -08006658 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006659}
6660
6661/**
6662 * igb_io_resume - called when traffic can start flowing again.
6663 * @pdev: Pointer to PCI device
6664 *
6665 * This callback is called when the error recovery driver tells us that
6666 * its OK to resume normal operation. Implementation resembles the
6667 * second-half of the igb_resume routine.
6668 */
6669static void igb_io_resume(struct pci_dev *pdev)
6670{
6671 struct net_device *netdev = pci_get_drvdata(pdev);
6672 struct igb_adapter *adapter = netdev_priv(netdev);
6673
Auke Kok9d5c8242008-01-24 02:22:38 -08006674 if (netif_running(netdev)) {
6675 if (igb_up(adapter)) {
6676 dev_err(&pdev->dev, "igb_up failed after reset\n");
6677 return;
6678 }
6679 }
6680
6681 netif_device_attach(netdev);
6682
6683 /* let the f/w know that the h/w is now under the control of the
6684 * driver. */
6685 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006686}
6687
Alexander Duyck26ad9172009-10-05 06:32:49 +00006688static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6689 u8 qsel)
6690{
6691 u32 rar_low, rar_high;
6692 struct e1000_hw *hw = &adapter->hw;
6693
6694 /* HW expects these in little endian so we reverse the byte order
6695 * from network order (big endian) to little endian
6696 */
6697 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6698 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6699 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6700
6701 /* Indicate to hardware the Address is Valid. */
6702 rar_high |= E1000_RAH_AV;
6703
6704 if (hw->mac.type == e1000_82575)
6705 rar_high |= E1000_RAH_POOL_1 * qsel;
6706 else
6707 rar_high |= E1000_RAH_POOL_1 << qsel;
6708
6709 wr32(E1000_RAL(index), rar_low);
6710 wrfl();
6711 wr32(E1000_RAH(index), rar_high);
6712 wrfl();
6713}
6714
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006715static int igb_set_vf_mac(struct igb_adapter *adapter,
6716 int vf, unsigned char *mac_addr)
6717{
6718 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006719 /* VF MAC addresses start at end of receive addresses and moves
6720 * torwards the first, as a result a collision should not be possible */
6721 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006722
Alexander Duyck37680112009-02-19 20:40:30 -08006723 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006724
Alexander Duyck26ad9172009-10-05 06:32:49 +00006725 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006726
6727 return 0;
6728}
6729
Williams, Mitch A8151d292010-02-10 01:44:24 +00006730static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6731{
6732 struct igb_adapter *adapter = netdev_priv(netdev);
6733 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6734 return -EINVAL;
6735 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6736 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6737 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6738 " change effective.");
6739 if (test_bit(__IGB_DOWN, &adapter->state)) {
6740 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6741 " but the PF device is not up.\n");
6742 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6743 " attempting to use the VF device.\n");
6744 }
6745 return igb_set_vf_mac(adapter, vf, mac);
6746}
6747
Lior Levy17dc5662011-02-08 02:28:46 +00006748static int igb_link_mbps(int internal_link_speed)
6749{
6750 switch (internal_link_speed) {
6751 case SPEED_100:
6752 return 100;
6753 case SPEED_1000:
6754 return 1000;
6755 default:
6756 return 0;
6757 }
6758}
6759
6760static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6761 int link_speed)
6762{
6763 int rf_dec, rf_int;
6764 u32 bcnrc_val;
6765
6766 if (tx_rate != 0) {
6767 /* Calculate the rate factor values to set */
6768 rf_int = link_speed / tx_rate;
6769 rf_dec = (link_speed - (rf_int * tx_rate));
6770 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6771
6772 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6773 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6774 E1000_RTTBCNRC_RF_INT_MASK);
6775 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6776 } else {
6777 bcnrc_val = 0;
6778 }
6779
6780 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6781 wr32(E1000_RTTBCNRC, bcnrc_val);
6782}
6783
6784static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6785{
6786 int actual_link_speed, i;
6787 bool reset_rate = false;
6788
6789 /* VF TX rate limit was not set or not supported */
6790 if ((adapter->vf_rate_link_speed == 0) ||
6791 (adapter->hw.mac.type != e1000_82576))
6792 return;
6793
6794 actual_link_speed = igb_link_mbps(adapter->link_speed);
6795 if (actual_link_speed != adapter->vf_rate_link_speed) {
6796 reset_rate = true;
6797 adapter->vf_rate_link_speed = 0;
6798 dev_info(&adapter->pdev->dev,
6799 "Link speed has been changed. VF Transmit "
6800 "rate is disabled\n");
6801 }
6802
6803 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6804 if (reset_rate)
6805 adapter->vf_data[i].tx_rate = 0;
6806
6807 igb_set_vf_rate_limit(&adapter->hw, i,
6808 adapter->vf_data[i].tx_rate,
6809 actual_link_speed);
6810 }
6811}
6812
Williams, Mitch A8151d292010-02-10 01:44:24 +00006813static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6814{
Lior Levy17dc5662011-02-08 02:28:46 +00006815 struct igb_adapter *adapter = netdev_priv(netdev);
6816 struct e1000_hw *hw = &adapter->hw;
6817 int actual_link_speed;
6818
6819 if (hw->mac.type != e1000_82576)
6820 return -EOPNOTSUPP;
6821
6822 actual_link_speed = igb_link_mbps(adapter->link_speed);
6823 if ((vf >= adapter->vfs_allocated_count) ||
6824 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6825 (tx_rate < 0) || (tx_rate > actual_link_speed))
6826 return -EINVAL;
6827
6828 adapter->vf_rate_link_speed = actual_link_speed;
6829 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6830 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6831
6832 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006833}
6834
6835static int igb_ndo_get_vf_config(struct net_device *netdev,
6836 int vf, struct ifla_vf_info *ivi)
6837{
6838 struct igb_adapter *adapter = netdev_priv(netdev);
6839 if (vf >= adapter->vfs_allocated_count)
6840 return -EINVAL;
6841 ivi->vf = vf;
6842 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006843 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006844 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6845 ivi->qos = adapter->vf_data[vf].pf_qos;
6846 return 0;
6847}
6848
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006849static void igb_vmm_control(struct igb_adapter *adapter)
6850{
6851 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006852 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006853
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006854 switch (hw->mac.type) {
6855 case e1000_82575:
6856 default:
6857 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006858 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006859 case e1000_82576:
6860 /* notify HW that the MAC is adding vlan tags */
6861 reg = rd32(E1000_DTXCTL);
6862 reg |= E1000_DTXCTL_VLAN_ADDED;
6863 wr32(E1000_DTXCTL, reg);
6864 case e1000_82580:
6865 /* enable replication vlan tag stripping */
6866 reg = rd32(E1000_RPLOLR);
6867 reg |= E1000_RPLOLR_STRVLAN;
6868 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006869 case e1000_i350:
6870 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006871 break;
6872 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006873
Alexander Duyckd4960302009-10-27 15:53:45 +00006874 if (adapter->vfs_allocated_count) {
6875 igb_vmdq_set_loopback_pf(hw, true);
6876 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006877 igb_vmdq_set_anti_spoofing_pf(hw, true,
6878 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006879 } else {
6880 igb_vmdq_set_loopback_pf(hw, false);
6881 igb_vmdq_set_replication_pf(hw, false);
6882 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006883}
6884
Auke Kok9d5c8242008-01-24 02:22:38 -08006885/* igb_main.c */