blob: b4f92b06f2ace5ff334622b7a45ddc805bc60ae4 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080036#include <net/checksum.h>
37#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070043#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080044#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080047#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070048#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070049#include <linux/dca.h>
50#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include "igb.h"
52
Carolyn Wyborny0d1fe822011-03-11 20:58:19 -080053#define MAJ 3
54#define MIN 0
55#define BUILD 6
56#define KFIX 2
57#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
58__stringify(BUILD) "-k" __stringify(KFIX)
Auke Kok9d5c8242008-01-24 02:22:38 -080059char igb_driver_name[] = "igb";
60char igb_driver_version[] = DRV_VERSION;
61static const char igb_driver_string[] =
62 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000063static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080064
Auke Kok9d5c8242008-01-24 02:22:38 -080065static const struct e1000_info *igb_info_tbl[] = {
66 [board_82575] = &e1000_82575_info,
67};
68
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000069static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000090 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
95 /* required last entry */
96 {0, }
97};
98
99MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
100
101void igb_reset(struct igb_adapter *);
102static int igb_setup_all_tx_resources(struct igb_adapter *);
103static int igb_setup_all_rx_resources(struct igb_adapter *);
104static void igb_free_all_tx_resources(struct igb_adapter *);
105static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000106static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800107static int igb_probe(struct pci_dev *, const struct pci_device_id *);
108static void __devexit igb_remove(struct pci_dev *pdev);
109static int igb_sw_init(struct igb_adapter *);
110static int igb_open(struct net_device *);
111static int igb_close(struct net_device *);
112static void igb_configure_tx(struct igb_adapter *);
113static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static void igb_clean_all_tx_rings(struct igb_adapter *);
115static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700116static void igb_clean_tx_ring(struct igb_ring *);
117static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000118static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800119static void igb_update_phy_info(unsigned long);
120static void igb_watchdog(unsigned long);
121static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000122static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000123static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
124 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800125static int igb_change_mtu(struct net_device *, int);
126static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000127static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800128static irqreturn_t igb_intr(int irq, void *);
129static irqreturn_t igb_intr_msi(int irq, void *);
130static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000131static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700132#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000133static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700134static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700135#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000136static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700137static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000138static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800139static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
140static void igb_tx_timeout(struct net_device *);
141static void igb_reset_task(struct work_struct *);
142static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
143static void igb_vlan_rx_add_vid(struct net_device *, u16);
144static void igb_vlan_rx_kill_vid(struct net_device *, u16);
145static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000146static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800147static void igb_ping_all_vfs(struct igb_adapter *);
148static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800149static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000150static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800151static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000152static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
153static int igb_ndo_set_vf_vlan(struct net_device *netdev,
154 int vf, u16 vlan, u8 qos);
155static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
156static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
157 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000158static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800159
Auke Kok9d5c8242008-01-24 02:22:38 -0800160#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000161static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800162static int igb_resume(struct pci_dev *);
163#endif
164static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700165#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700166static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
167static struct notifier_block dca_notifier = {
168 .notifier_call = igb_notify_dca,
169 .next = NULL,
170 .priority = 0
171};
172#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800173#ifdef CONFIG_NET_POLL_CONTROLLER
174/* for netdump / net console */
175static void igb_netpoll(struct net_device *);
176#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800177#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000178static unsigned int max_vfs = 0;
179module_param(max_vfs, uint, 0);
180MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
181 "per physical function");
182#endif /* CONFIG_PCI_IOV */
183
Auke Kok9d5c8242008-01-24 02:22:38 -0800184static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
185 pci_channel_state_t);
186static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
187static void igb_io_resume(struct pci_dev *);
188
189static struct pci_error_handlers igb_err_handler = {
190 .error_detected = igb_io_error_detected,
191 .slot_reset = igb_io_slot_reset,
192 .resume = igb_io_resume,
193};
194
195
196static struct pci_driver igb_driver = {
197 .name = igb_driver_name,
198 .id_table = igb_pci_tbl,
199 .probe = igb_probe,
200 .remove = __devexit_p(igb_remove),
201#ifdef CONFIG_PM
202 /* Power Managment Hooks */
203 .suspend = igb_suspend,
204 .resume = igb_resume,
205#endif
206 .shutdown = igb_shutdown,
207 .err_handler = &igb_err_handler
208};
209
210MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
211MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
212MODULE_LICENSE("GPL");
213MODULE_VERSION(DRV_VERSION);
214
Taku Izumic97ec422010-04-27 14:39:30 +0000215struct igb_reg_info {
216 u32 ofs;
217 char *name;
218};
219
220static const struct igb_reg_info igb_reg_info_tbl[] = {
221
222 /* General Registers */
223 {E1000_CTRL, "CTRL"},
224 {E1000_STATUS, "STATUS"},
225 {E1000_CTRL_EXT, "CTRL_EXT"},
226
227 /* Interrupt Registers */
228 {E1000_ICR, "ICR"},
229
230 /* RX Registers */
231 {E1000_RCTL, "RCTL"},
232 {E1000_RDLEN(0), "RDLEN"},
233 {E1000_RDH(0), "RDH"},
234 {E1000_RDT(0), "RDT"},
235 {E1000_RXDCTL(0), "RXDCTL"},
236 {E1000_RDBAL(0), "RDBAL"},
237 {E1000_RDBAH(0), "RDBAH"},
238
239 /* TX Registers */
240 {E1000_TCTL, "TCTL"},
241 {E1000_TDBAL(0), "TDBAL"},
242 {E1000_TDBAH(0), "TDBAH"},
243 {E1000_TDLEN(0), "TDLEN"},
244 {E1000_TDH(0), "TDH"},
245 {E1000_TDT(0), "TDT"},
246 {E1000_TXDCTL(0), "TXDCTL"},
247 {E1000_TDFH, "TDFH"},
248 {E1000_TDFT, "TDFT"},
249 {E1000_TDFHS, "TDFHS"},
250 {E1000_TDFPC, "TDFPC"},
251
252 /* List Terminator */
253 {}
254};
255
256/*
257 * igb_regdump - register printout routine
258 */
259static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
260{
261 int n = 0;
262 char rname[16];
263 u32 regs[8];
264
265 switch (reginfo->ofs) {
266 case E1000_RDLEN(0):
267 for (n = 0; n < 4; n++)
268 regs[n] = rd32(E1000_RDLEN(n));
269 break;
270 case E1000_RDH(0):
271 for (n = 0; n < 4; n++)
272 regs[n] = rd32(E1000_RDH(n));
273 break;
274 case E1000_RDT(0):
275 for (n = 0; n < 4; n++)
276 regs[n] = rd32(E1000_RDT(n));
277 break;
278 case E1000_RXDCTL(0):
279 for (n = 0; n < 4; n++)
280 regs[n] = rd32(E1000_RXDCTL(n));
281 break;
282 case E1000_RDBAL(0):
283 for (n = 0; n < 4; n++)
284 regs[n] = rd32(E1000_RDBAL(n));
285 break;
286 case E1000_RDBAH(0):
287 for (n = 0; n < 4; n++)
288 regs[n] = rd32(E1000_RDBAH(n));
289 break;
290 case E1000_TDBAL(0):
291 for (n = 0; n < 4; n++)
292 regs[n] = rd32(E1000_RDBAL(n));
293 break;
294 case E1000_TDBAH(0):
295 for (n = 0; n < 4; n++)
296 regs[n] = rd32(E1000_TDBAH(n));
297 break;
298 case E1000_TDLEN(0):
299 for (n = 0; n < 4; n++)
300 regs[n] = rd32(E1000_TDLEN(n));
301 break;
302 case E1000_TDH(0):
303 for (n = 0; n < 4; n++)
304 regs[n] = rd32(E1000_TDH(n));
305 break;
306 case E1000_TDT(0):
307 for (n = 0; n < 4; n++)
308 regs[n] = rd32(E1000_TDT(n));
309 break;
310 case E1000_TXDCTL(0):
311 for (n = 0; n < 4; n++)
312 regs[n] = rd32(E1000_TXDCTL(n));
313 break;
314 default:
315 printk(KERN_INFO "%-15s %08x\n",
316 reginfo->name, rd32(reginfo->ofs));
317 return;
318 }
319
320 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
321 printk(KERN_INFO "%-15s ", rname);
322 for (n = 0; n < 4; n++)
323 printk(KERN_CONT "%08x ", regs[n]);
324 printk(KERN_CONT "\n");
325}
326
327/*
328 * igb_dump - Print registers, tx-rings and rx-rings
329 */
330static void igb_dump(struct igb_adapter *adapter)
331{
332 struct net_device *netdev = adapter->netdev;
333 struct e1000_hw *hw = &adapter->hw;
334 struct igb_reg_info *reginfo;
335 int n = 0;
336 struct igb_ring *tx_ring;
337 union e1000_adv_tx_desc *tx_desc;
338 struct my_u0 { u64 a; u64 b; } *u0;
339 struct igb_buffer *buffer_info;
340 struct igb_ring *rx_ring;
341 union e1000_adv_rx_desc *rx_desc;
342 u32 staterr;
343 int i = 0;
344
345 if (!netif_msg_hw(adapter))
346 return;
347
348 /* Print netdevice Info */
349 if (netdev) {
350 dev_info(&adapter->pdev->dev, "Net device Info\n");
351 printk(KERN_INFO "Device Name state "
352 "trans_start last_rx\n");
353 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
354 netdev->name,
355 netdev->state,
356 netdev->trans_start,
357 netdev->last_rx);
358 }
359
360 /* Print Registers */
361 dev_info(&adapter->pdev->dev, "Register Dump\n");
362 printk(KERN_INFO " Register Name Value\n");
363 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
364 reginfo->name; reginfo++) {
365 igb_regdump(hw, reginfo);
366 }
367
368 /* Print TX Ring Summary */
369 if (!netdev || !netif_running(netdev))
370 goto exit;
371
372 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
373 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
374 " leng ntw timestamp\n");
375 for (n = 0; n < adapter->num_tx_queues; n++) {
376 tx_ring = adapter->tx_ring[n];
377 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
378 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
379 n, tx_ring->next_to_use, tx_ring->next_to_clean,
380 (u64)buffer_info->dma,
381 buffer_info->length,
382 buffer_info->next_to_watch,
383 (u64)buffer_info->time_stamp);
384 }
385
386 /* Print TX Rings */
387 if (!netif_msg_tx_done(adapter))
388 goto rx_ring_summary;
389
390 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
391
392 /* Transmit Descriptor Formats
393 *
394 * Advanced Transmit Descriptor
395 * +--------------------------------------------------------------+
396 * 0 | Buffer Address [63:0] |
397 * +--------------------------------------------------------------+
398 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
399 * +--------------------------------------------------------------+
400 * 63 46 45 40 39 38 36 35 32 31 24 15 0
401 */
402
403 for (n = 0; n < adapter->num_tx_queues; n++) {
404 tx_ring = adapter->tx_ring[n];
405 printk(KERN_INFO "------------------------------------\n");
406 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
407 printk(KERN_INFO "------------------------------------\n");
408 printk(KERN_INFO "T [desc] [address 63:0 ] "
409 "[PlPOCIStDDM Ln] [bi->dma ] "
410 "leng ntw timestamp bi->skb\n");
411
412 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
413 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
414 buffer_info = &tx_ring->buffer_info[i];
415 u0 = (struct my_u0 *)tx_desc;
416 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
417 " %04X %3X %016llX %p", i,
418 le64_to_cpu(u0->a),
419 le64_to_cpu(u0->b),
420 (u64)buffer_info->dma,
421 buffer_info->length,
422 buffer_info->next_to_watch,
423 (u64)buffer_info->time_stamp,
424 buffer_info->skb);
425 if (i == tx_ring->next_to_use &&
426 i == tx_ring->next_to_clean)
427 printk(KERN_CONT " NTC/U\n");
428 else if (i == tx_ring->next_to_use)
429 printk(KERN_CONT " NTU\n");
430 else if (i == tx_ring->next_to_clean)
431 printk(KERN_CONT " NTC\n");
432 else
433 printk(KERN_CONT "\n");
434
435 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
436 print_hex_dump(KERN_INFO, "",
437 DUMP_PREFIX_ADDRESS,
438 16, 1, phys_to_virt(buffer_info->dma),
439 buffer_info->length, true);
440 }
441 }
442
443 /* Print RX Rings Summary */
444rx_ring_summary:
445 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
446 printk(KERN_INFO "Queue [NTU] [NTC]\n");
447 for (n = 0; n < adapter->num_rx_queues; n++) {
448 rx_ring = adapter->rx_ring[n];
449 printk(KERN_INFO " %5d %5X %5X\n", n,
450 rx_ring->next_to_use, rx_ring->next_to_clean);
451 }
452
453 /* Print RX Rings */
454 if (!netif_msg_rx_status(adapter))
455 goto exit;
456
457 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
458
459 /* Advanced Receive Descriptor (Read) Format
460 * 63 1 0
461 * +-----------------------------------------------------+
462 * 0 | Packet Buffer Address [63:1] |A0/NSE|
463 * +----------------------------------------------+------+
464 * 8 | Header Buffer Address [63:1] | DD |
465 * +-----------------------------------------------------+
466 *
467 *
468 * Advanced Receive Descriptor (Write-Back) Format
469 *
470 * 63 48 47 32 31 30 21 20 17 16 4 3 0
471 * +------------------------------------------------------+
472 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
473 * | Checksum Ident | | | | Type | Type |
474 * +------------------------------------------------------+
475 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
476 * +------------------------------------------------------+
477 * 63 48 47 32 31 20 19 0
478 */
479
480 for (n = 0; n < adapter->num_rx_queues; n++) {
481 rx_ring = adapter->rx_ring[n];
482 printk(KERN_INFO "------------------------------------\n");
483 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
484 printk(KERN_INFO "------------------------------------\n");
485 printk(KERN_INFO "R [desc] [ PktBuf A0] "
486 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
487 "<-- Adv Rx Read format\n");
488 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
489 "[vl er S cks ln] ---------------- [bi->skb] "
490 "<-- Adv Rx Write-Back format\n");
491
492 for (i = 0; i < rx_ring->count; i++) {
493 buffer_info = &rx_ring->buffer_info[i];
494 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
495 u0 = (struct my_u0 *)rx_desc;
496 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
497 if (staterr & E1000_RXD_STAT_DD) {
498 /* Descriptor Done */
499 printk(KERN_INFO "RWB[0x%03X] %016llX "
500 "%016llX ---------------- %p", i,
501 le64_to_cpu(u0->a),
502 le64_to_cpu(u0->b),
503 buffer_info->skb);
504 } else {
505 printk(KERN_INFO "R [0x%03X] %016llX "
506 "%016llX %016llX %p", i,
507 le64_to_cpu(u0->a),
508 le64_to_cpu(u0->b),
509 (u64)buffer_info->dma,
510 buffer_info->skb);
511
512 if (netif_msg_pktdata(adapter)) {
513 print_hex_dump(KERN_INFO, "",
514 DUMP_PREFIX_ADDRESS,
515 16, 1,
516 phys_to_virt(buffer_info->dma),
517 rx_ring->rx_buffer_len, true);
518 if (rx_ring->rx_buffer_len
519 < IGB_RXBUFFER_1024)
520 print_hex_dump(KERN_INFO, "",
521 DUMP_PREFIX_ADDRESS,
522 16, 1,
523 phys_to_virt(
524 buffer_info->page_dma +
525 buffer_info->page_offset),
526 PAGE_SIZE/2, true);
527 }
528 }
529
530 if (i == rx_ring->next_to_use)
531 printk(KERN_CONT " NTU\n");
532 else if (i == rx_ring->next_to_clean)
533 printk(KERN_CONT " NTC\n");
534 else
535 printk(KERN_CONT "\n");
536
537 }
538 }
539
540exit:
541 return;
542}
543
544
Patrick Ohly38c845c2009-02-12 05:03:41 +0000545/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000546 * igb_read_clock - read raw cycle counter (to be used by time counter)
547 */
548static cycle_t igb_read_clock(const struct cyclecounter *tc)
549{
550 struct igb_adapter *adapter =
551 container_of(tc, struct igb_adapter, cycles);
552 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000553 u64 stamp = 0;
554 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000555
Alexander Duyck55cac242009-11-19 12:42:21 +0000556 /*
557 * The timestamp latches on lowest register read. For the 82580
558 * the lowest register is SYSTIMR instead of SYSTIML. However we never
559 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
560 */
561 if (hw->mac.type == e1000_82580) {
562 stamp = rd32(E1000_SYSTIMR) >> 8;
563 shift = IGB_82580_TSYNC_SHIFT;
564 }
565
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000566 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
567 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000568 return stamp;
569}
570
Auke Kok9d5c8242008-01-24 02:22:38 -0800571/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000572 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800573 * used by hardware layer to print debugging information
574 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000575struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800576{
577 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000578 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800579}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000580
581/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800582 * igb_init_module - Driver Registration Routine
583 *
584 * igb_init_module is the first routine called when the driver is
585 * loaded. All it does is register with the PCI subsystem.
586 **/
587static int __init igb_init_module(void)
588{
589 int ret;
590 printk(KERN_INFO "%s - version %s\n",
591 igb_driver_string, igb_driver_version);
592
593 printk(KERN_INFO "%s\n", igb_copyright);
594
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700595#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700596 dca_register_notify(&dca_notifier);
597#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800598 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800599 return ret;
600}
601
602module_init(igb_init_module);
603
604/**
605 * igb_exit_module - Driver Exit Cleanup Routine
606 *
607 * igb_exit_module is called just before the driver is removed
608 * from memory.
609 **/
610static void __exit igb_exit_module(void)
611{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700612#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700613 dca_unregister_notify(&dca_notifier);
614#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800615 pci_unregister_driver(&igb_driver);
616}
617
618module_exit(igb_exit_module);
619
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800620#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
621/**
622 * igb_cache_ring_register - Descriptor ring to register mapping
623 * @adapter: board private structure to initialize
624 *
625 * Once we know the feature-set enabled for the device, we'll cache
626 * the register offset the descriptor ring is assigned to.
627 **/
628static void igb_cache_ring_register(struct igb_adapter *adapter)
629{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000630 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000631 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800632
633 switch (adapter->hw.mac.type) {
634 case e1000_82576:
635 /* The queues are allocated for virtualization such that VF 0
636 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
637 * In order to avoid collision we start at the first free queue
638 * and continue consuming queues in the same sequence
639 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000640 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000641 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000642 adapter->rx_ring[i]->reg_idx = rbase_offset +
643 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000644 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800645 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000646 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000647 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800648 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000649 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000650 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000651 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000652 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800653 break;
654 }
655}
656
Alexander Duyck047e0032009-10-27 15:49:27 +0000657static void igb_free_queues(struct igb_adapter *adapter)
658{
Alexander Duyck3025a442010-02-17 01:02:39 +0000659 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000660
Alexander Duyck3025a442010-02-17 01:02:39 +0000661 for (i = 0; i < adapter->num_tx_queues; i++) {
662 kfree(adapter->tx_ring[i]);
663 adapter->tx_ring[i] = NULL;
664 }
665 for (i = 0; i < adapter->num_rx_queues; i++) {
666 kfree(adapter->rx_ring[i]);
667 adapter->rx_ring[i] = NULL;
668 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000669 adapter->num_rx_queues = 0;
670 adapter->num_tx_queues = 0;
671}
672
Auke Kok9d5c8242008-01-24 02:22:38 -0800673/**
674 * igb_alloc_queues - Allocate memory for all rings
675 * @adapter: board private structure to initialize
676 *
677 * We allocate one ring per queue at run-time since we don't know the
678 * number of queues at compile-time.
679 **/
680static int igb_alloc_queues(struct igb_adapter *adapter)
681{
Alexander Duyck3025a442010-02-17 01:02:39 +0000682 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800683 int i;
684
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700685 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000686 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
687 if (!ring)
688 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800689 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700690 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000691 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000692 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000693 /* For 82575, context index must be unique per ring. */
694 if (adapter->hw.mac.type == e1000_82575)
695 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000696 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700697 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000698
Auke Kok9d5c8242008-01-24 02:22:38 -0800699 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000700 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
701 if (!ring)
702 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800703 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700704 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000705 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000706 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000707 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000708 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
709 /* set flag indicating ring supports SCTP checksum offload */
710 if (adapter->hw.mac.type >= e1000_82576)
711 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000712 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800713 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800714
715 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000716
Auke Kok9d5c8242008-01-24 02:22:38 -0800717 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800718
Alexander Duyck047e0032009-10-27 15:49:27 +0000719err:
720 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700721
Alexander Duyck047e0032009-10-27 15:49:27 +0000722 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700723}
724
Auke Kok9d5c8242008-01-24 02:22:38 -0800725#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000726static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800727{
728 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000729 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800730 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700731 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000732 int rx_queue = IGB_N0_QUEUE;
733 int tx_queue = IGB_N0_QUEUE;
734
735 if (q_vector->rx_ring)
736 rx_queue = q_vector->rx_ring->reg_idx;
737 if (q_vector->tx_ring)
738 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700739
740 switch (hw->mac.type) {
741 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800742 /* The 82575 assigns vectors using a bitmask, which matches the
743 bitmask for the EICR/EIMS/EIMC registers. To assign one
744 or more queues to a vector, we write the appropriate bits
745 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000746 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000748 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800749 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000750 if (!adapter->msix_entries && msix_vector == 0)
751 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800752 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000753 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700754 break;
755 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800756 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700757 Each queue has a single entry in the table to which we write
758 a vector number along with a "valid" bit. Sadly, the layout
759 of the table is somewhat counterintuitive. */
760 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000761 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700762 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000763 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800764 /* vector goes into low byte of register */
765 ivar = ivar & 0xFFFFFF00;
766 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000767 } else {
768 /* vector goes into third byte of register */
769 ivar = ivar & 0xFF00FFFF;
770 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700771 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700772 array_wr32(E1000_IVAR0, index, ivar);
773 }
774 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000775 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700776 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000777 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800778 /* vector goes into second byte of register */
779 ivar = ivar & 0xFFFF00FF;
780 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000781 } else {
782 /* vector goes into high byte of register */
783 ivar = ivar & 0x00FFFFFF;
784 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700785 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700786 array_wr32(E1000_IVAR0, index, ivar);
787 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000788 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700789 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000790 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000791 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +0000792 /* 82580 uses the same table-based approach as 82576 but has fewer
793 entries as a result we carry over for queues greater than 4. */
794 if (rx_queue > IGB_N0_QUEUE) {
795 index = (rx_queue >> 1);
796 ivar = array_rd32(E1000_IVAR0, index);
797 if (rx_queue & 0x1) {
798 /* vector goes into third byte of register */
799 ivar = ivar & 0xFF00FFFF;
800 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
801 } else {
802 /* vector goes into low byte of register */
803 ivar = ivar & 0xFFFFFF00;
804 ivar |= msix_vector | E1000_IVAR_VALID;
805 }
806 array_wr32(E1000_IVAR0, index, ivar);
807 }
808 if (tx_queue > IGB_N0_QUEUE) {
809 index = (tx_queue >> 1);
810 ivar = array_rd32(E1000_IVAR0, index);
811 if (tx_queue & 0x1) {
812 /* vector goes into high byte of register */
813 ivar = ivar & 0x00FFFFFF;
814 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
815 } else {
816 /* vector goes into second byte of register */
817 ivar = ivar & 0xFFFF00FF;
818 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
819 }
820 array_wr32(E1000_IVAR0, index, ivar);
821 }
822 q_vector->eims_value = 1 << msix_vector;
823 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700824 default:
825 BUG();
826 break;
827 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000828
829 /* add q_vector eims value to global eims_enable_mask */
830 adapter->eims_enable_mask |= q_vector->eims_value;
831
832 /* configure q_vector to set itr on first interrupt */
833 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800834}
835
836/**
837 * igb_configure_msix - Configure MSI-X hardware
838 *
839 * igb_configure_msix sets up the hardware to properly
840 * generate MSI-X interrupts.
841 **/
842static void igb_configure_msix(struct igb_adapter *adapter)
843{
844 u32 tmp;
845 int i, vector = 0;
846 struct e1000_hw *hw = &adapter->hw;
847
848 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800849
850 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700851 switch (hw->mac.type) {
852 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800853 tmp = rd32(E1000_CTRL_EXT);
854 /* enable MSI-X PBA support*/
855 tmp |= E1000_CTRL_EXT_PBA_CLR;
856
857 /* Auto-Mask interrupts upon ICR read. */
858 tmp |= E1000_CTRL_EXT_EIAME;
859 tmp |= E1000_CTRL_EXT_IRCA;
860
861 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000862
863 /* enable msix_other interrupt */
864 array_wr32(E1000_MSIXBM(0), vector++,
865 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700866 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800867
Alexander Duyck2d064c02008-07-08 15:10:12 -0700868 break;
869
870 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000871 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000872 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000873 /* Turn on MSI-X capability first, or our settings
874 * won't stick. And it will take days to debug. */
875 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
876 E1000_GPIE_PBA | E1000_GPIE_EIAME |
877 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700878
Alexander Duyck047e0032009-10-27 15:49:27 +0000879 /* enable msix_other interrupt */
880 adapter->eims_other = 1 << vector;
881 tmp = (vector++ | E1000_IVAR_VALID) << 8;
882
883 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700884 break;
885 default:
886 /* do nothing, since nothing else supports MSI-X */
887 break;
888 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000889
890 adapter->eims_enable_mask |= adapter->eims_other;
891
Alexander Duyck26b39272010-02-17 01:00:41 +0000892 for (i = 0; i < adapter->num_q_vectors; i++)
893 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000894
Auke Kok9d5c8242008-01-24 02:22:38 -0800895 wrfl();
896}
897
898/**
899 * igb_request_msix - Initialize MSI-X interrupts
900 *
901 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
902 * kernel.
903 **/
904static int igb_request_msix(struct igb_adapter *adapter)
905{
906 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000907 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800908 int i, err = 0, vector = 0;
909
Auke Kok9d5c8242008-01-24 02:22:38 -0800910 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800911 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800912 if (err)
913 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000914 vector++;
915
916 for (i = 0; i < adapter->num_q_vectors; i++) {
917 struct igb_q_vector *q_vector = adapter->q_vector[i];
918
919 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
920
921 if (q_vector->rx_ring && q_vector->tx_ring)
922 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
923 q_vector->rx_ring->queue_index);
924 else if (q_vector->tx_ring)
925 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
926 q_vector->tx_ring->queue_index);
927 else if (q_vector->rx_ring)
928 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
929 q_vector->rx_ring->queue_index);
930 else
931 sprintf(q_vector->name, "%s-unused", netdev->name);
932
933 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800934 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000935 q_vector);
936 if (err)
937 goto out;
938 vector++;
939 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800940
Auke Kok9d5c8242008-01-24 02:22:38 -0800941 igb_configure_msix(adapter);
942 return 0;
943out:
944 return err;
945}
946
947static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
948{
949 if (adapter->msix_entries) {
950 pci_disable_msix(adapter->pdev);
951 kfree(adapter->msix_entries);
952 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000953 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800954 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000955 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800956}
957
Alexander Duyck047e0032009-10-27 15:49:27 +0000958/**
959 * igb_free_q_vectors - Free memory allocated for interrupt vectors
960 * @adapter: board private structure to initialize
961 *
962 * This function frees the memory allocated to the q_vectors. In addition if
963 * NAPI is enabled it will delete any references to the NAPI struct prior
964 * to freeing the q_vector.
965 **/
966static void igb_free_q_vectors(struct igb_adapter *adapter)
967{
968 int v_idx;
969
970 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
971 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
972 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000973 if (!q_vector)
974 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000975 netif_napi_del(&q_vector->napi);
976 kfree(q_vector);
977 }
978 adapter->num_q_vectors = 0;
979}
980
981/**
982 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
983 *
984 * This function resets the device so that it has 0 rx queues, tx queues, and
985 * MSI-X interrupts allocated.
986 */
987static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
988{
989 igb_free_queues(adapter);
990 igb_free_q_vectors(adapter);
991 igb_reset_interrupt_capability(adapter);
992}
Auke Kok9d5c8242008-01-24 02:22:38 -0800993
994/**
995 * igb_set_interrupt_capability - set MSI or MSI-X if supported
996 *
997 * Attempt to configure interrupts using the best available
998 * capabilities of the hardware and kernel.
999 **/
Ben Hutchings21adef32010-09-27 08:28:39 +00001000static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08001001{
1002 int err;
1003 int numvecs, i;
1004
Alexander Duyck83b71802009-02-06 23:15:45 +00001005 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001006 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001007 if (adapter->vfs_allocated_count)
1008 adapter->num_tx_queues = 1;
1009 else
1010 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001011
Alexander Duyck047e0032009-10-27 15:49:27 +00001012 /* start with one vector for every rx queue */
1013 numvecs = adapter->num_rx_queues;
1014
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001015 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001016 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1017 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001018
1019 /* store the number of vectors reserved for queues */
1020 adapter->num_q_vectors = numvecs;
1021
1022 /* add 1 vector for link status interrupts */
1023 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001024 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1025 GFP_KERNEL);
1026 if (!adapter->msix_entries)
1027 goto msi_only;
1028
1029 for (i = 0; i < numvecs; i++)
1030 adapter->msix_entries[i].entry = i;
1031
1032 err = pci_enable_msix(adapter->pdev,
1033 adapter->msix_entries,
1034 numvecs);
1035 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001036 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001037
1038 igb_reset_interrupt_capability(adapter);
1039
1040 /* If we can't do MSI-X, try MSI */
1041msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001042#ifdef CONFIG_PCI_IOV
1043 /* disable SR-IOV for non MSI-X configurations */
1044 if (adapter->vf_data) {
1045 struct e1000_hw *hw = &adapter->hw;
1046 /* disable iov and allow time for transactions to clear */
1047 pci_disable_sriov(adapter->pdev);
1048 msleep(500);
1049
1050 kfree(adapter->vf_data);
1051 adapter->vf_data = NULL;
1052 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1053 msleep(100);
1054 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1055 }
1056#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001057 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001058 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001059 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001060 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001061 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001062 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001063 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001064 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001065out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001066 /* Notify the stack of the (possibly) reduced queue counts. */
1067 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1068 return netif_set_real_num_rx_queues(adapter->netdev,
1069 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001070}
1071
1072/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001073 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1074 * @adapter: board private structure to initialize
1075 *
1076 * We allocate one q_vector per queue interrupt. If allocation fails we
1077 * return -ENOMEM.
1078 **/
1079static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1080{
1081 struct igb_q_vector *q_vector;
1082 struct e1000_hw *hw = &adapter->hw;
1083 int v_idx;
1084
1085 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1086 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1087 if (!q_vector)
1088 goto err_out;
1089 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001090 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1091 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001092 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1093 adapter->q_vector[v_idx] = q_vector;
1094 }
1095 return 0;
1096
1097err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001098 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001099 return -ENOMEM;
1100}
1101
1102static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1103 int ring_idx, int v_idx)
1104{
Alexander Duyck3025a442010-02-17 01:02:39 +00001105 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001106
Alexander Duyck3025a442010-02-17 01:02:39 +00001107 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001108 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001109 q_vector->itr_val = adapter->rx_itr_setting;
1110 if (q_vector->itr_val && q_vector->itr_val <= 3)
1111 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001112}
1113
1114static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1115 int ring_idx, int v_idx)
1116{
Alexander Duyck3025a442010-02-17 01:02:39 +00001117 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001118
Alexander Duyck3025a442010-02-17 01:02:39 +00001119 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001120 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001121 q_vector->itr_val = adapter->tx_itr_setting;
1122 if (q_vector->itr_val && q_vector->itr_val <= 3)
1123 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001124}
1125
1126/**
1127 * igb_map_ring_to_vector - maps allocated queues to vectors
1128 *
1129 * This function maps the recently allocated queues to vectors.
1130 **/
1131static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1132{
1133 int i;
1134 int v_idx = 0;
1135
1136 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1137 (adapter->num_q_vectors < adapter->num_tx_queues))
1138 return -ENOMEM;
1139
1140 if (adapter->num_q_vectors >=
1141 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1142 for (i = 0; i < adapter->num_rx_queues; i++)
1143 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1144 for (i = 0; i < adapter->num_tx_queues; i++)
1145 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1146 } else {
1147 for (i = 0; i < adapter->num_rx_queues; i++) {
1148 if (i < adapter->num_tx_queues)
1149 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1150 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1151 }
1152 for (; i < adapter->num_tx_queues; i++)
1153 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1154 }
1155 return 0;
1156}
1157
1158/**
1159 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1160 *
1161 * This function initializes the interrupts and allocates all of the queues.
1162 **/
1163static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1164{
1165 struct pci_dev *pdev = adapter->pdev;
1166 int err;
1167
Ben Hutchings21adef32010-09-27 08:28:39 +00001168 err = igb_set_interrupt_capability(adapter);
1169 if (err)
1170 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001171
1172 err = igb_alloc_q_vectors(adapter);
1173 if (err) {
1174 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1175 goto err_alloc_q_vectors;
1176 }
1177
1178 err = igb_alloc_queues(adapter);
1179 if (err) {
1180 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1181 goto err_alloc_queues;
1182 }
1183
1184 err = igb_map_ring_to_vector(adapter);
1185 if (err) {
1186 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1187 goto err_map_queues;
1188 }
1189
1190
1191 return 0;
1192err_map_queues:
1193 igb_free_queues(adapter);
1194err_alloc_queues:
1195 igb_free_q_vectors(adapter);
1196err_alloc_q_vectors:
1197 igb_reset_interrupt_capability(adapter);
1198 return err;
1199}
1200
1201/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001202 * igb_request_irq - initialize interrupts
1203 *
1204 * Attempts to configure interrupts using the best available
1205 * capabilities of the hardware and kernel.
1206 **/
1207static int igb_request_irq(struct igb_adapter *adapter)
1208{
1209 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001210 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001211 int err = 0;
1212
1213 if (adapter->msix_entries) {
1214 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001215 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001216 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001217 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001218 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001219 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001220 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001221 igb_free_all_tx_resources(adapter);
1222 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001223 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001224 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001225 adapter->num_q_vectors = 1;
1226 err = igb_alloc_q_vectors(adapter);
1227 if (err) {
1228 dev_err(&pdev->dev,
1229 "Unable to allocate memory for vectors\n");
1230 goto request_done;
1231 }
1232 err = igb_alloc_queues(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev,
1235 "Unable to allocate memory for queues\n");
1236 igb_free_q_vectors(adapter);
1237 goto request_done;
1238 }
1239 igb_setup_all_tx_resources(adapter);
1240 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001241 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001242 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001243 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001244
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001245 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001246 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001247 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001248 if (!err)
1249 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001250
Auke Kok9d5c8242008-01-24 02:22:38 -08001251 /* fall back to legacy interrupts */
1252 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001253 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001254 }
1255
Joe Perchesa0607fd2009-11-18 23:29:17 -08001256 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001257 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001258
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001259 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001260 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1261 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001262
1263request_done:
1264 return err;
1265}
1266
1267static void igb_free_irq(struct igb_adapter *adapter)
1268{
Auke Kok9d5c8242008-01-24 02:22:38 -08001269 if (adapter->msix_entries) {
1270 int vector = 0, i;
1271
Alexander Duyck047e0032009-10-27 15:49:27 +00001272 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001273
Alexander Duyck047e0032009-10-27 15:49:27 +00001274 for (i = 0; i < adapter->num_q_vectors; i++) {
1275 struct igb_q_vector *q_vector = adapter->q_vector[i];
1276 free_irq(adapter->msix_entries[vector++].vector,
1277 q_vector);
1278 }
1279 } else {
1280 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001281 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001282}
1283
1284/**
1285 * igb_irq_disable - Mask off interrupt generation on the NIC
1286 * @adapter: board private structure
1287 **/
1288static void igb_irq_disable(struct igb_adapter *adapter)
1289{
1290 struct e1000_hw *hw = &adapter->hw;
1291
Alexander Duyck25568a52009-10-27 23:49:59 +00001292 /*
1293 * we need to be careful when disabling interrupts. The VFs are also
1294 * mapped into these registers and so clearing the bits can cause
1295 * issues on the VF drivers so we only need to clear what we set
1296 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001297 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001298 u32 regval = rd32(E1000_EIAM);
1299 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1300 wr32(E1000_EIMC, adapter->eims_enable_mask);
1301 regval = rd32(E1000_EIAC);
1302 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001303 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001304
1305 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001306 wr32(E1000_IMC, ~0);
1307 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001308 if (adapter->msix_entries) {
1309 int i;
1310 for (i = 0; i < adapter->num_q_vectors; i++)
1311 synchronize_irq(adapter->msix_entries[i].vector);
1312 } else {
1313 synchronize_irq(adapter->pdev->irq);
1314 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001315}
1316
1317/**
1318 * igb_irq_enable - Enable default interrupt generation settings
1319 * @adapter: board private structure
1320 **/
1321static void igb_irq_enable(struct igb_adapter *adapter)
1322{
1323 struct e1000_hw *hw = &adapter->hw;
1324
1325 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001326 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001327 u32 regval = rd32(E1000_EIAC);
1328 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1329 regval = rd32(E1000_EIAM);
1330 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001331 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001332 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001333 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001334 ims |= E1000_IMS_VMMB;
1335 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001336 if (adapter->hw.mac.type == e1000_82580)
1337 ims |= E1000_IMS_DRSTA;
1338
Alexander Duyck25568a52009-10-27 23:49:59 +00001339 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001340 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001341 wr32(E1000_IMS, IMS_ENABLE_MASK |
1342 E1000_IMS_DRSTA);
1343 wr32(E1000_IAM, IMS_ENABLE_MASK |
1344 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001345 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001346}
1347
1348static void igb_update_mng_vlan(struct igb_adapter *adapter)
1349{
Alexander Duyck51466232009-10-27 23:47:35 +00001350 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001351 u16 vid = adapter->hw.mng_cookie.vlan_id;
1352 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001353
Alexander Duyck51466232009-10-27 23:47:35 +00001354 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1355 /* add VID to filter table */
1356 igb_vfta_set(hw, vid, true);
1357 adapter->mng_vlan_id = vid;
1358 } else {
1359 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1360 }
1361
1362 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1363 (vid != old_vid) &&
1364 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1365 /* remove VID from filter table */
1366 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001367 }
1368}
1369
1370/**
1371 * igb_release_hw_control - release control of the h/w to f/w
1372 * @adapter: address of board private structure
1373 *
1374 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1375 * For ASF and Pass Through versions of f/w this means that the
1376 * driver is no longer loaded.
1377 *
1378 **/
1379static void igb_release_hw_control(struct igb_adapter *adapter)
1380{
1381 struct e1000_hw *hw = &adapter->hw;
1382 u32 ctrl_ext;
1383
1384 /* Let firmware take over control of h/w */
1385 ctrl_ext = rd32(E1000_CTRL_EXT);
1386 wr32(E1000_CTRL_EXT,
1387 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1388}
1389
Auke Kok9d5c8242008-01-24 02:22:38 -08001390/**
1391 * igb_get_hw_control - get control of the h/w from f/w
1392 * @adapter: address of board private structure
1393 *
1394 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1395 * For ASF and Pass Through versions of f/w this means that
1396 * the driver is loaded.
1397 *
1398 **/
1399static void igb_get_hw_control(struct igb_adapter *adapter)
1400{
1401 struct e1000_hw *hw = &adapter->hw;
1402 u32 ctrl_ext;
1403
1404 /* Let firmware know the driver has taken over */
1405 ctrl_ext = rd32(E1000_CTRL_EXT);
1406 wr32(E1000_CTRL_EXT,
1407 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1408}
1409
Auke Kok9d5c8242008-01-24 02:22:38 -08001410/**
1411 * igb_configure - configure the hardware for RX and TX
1412 * @adapter: private board structure
1413 **/
1414static void igb_configure(struct igb_adapter *adapter)
1415{
1416 struct net_device *netdev = adapter->netdev;
1417 int i;
1418
1419 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001420 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001421
1422 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001423
Alexander Duyck85b430b2009-10-27 15:50:29 +00001424 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001425 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001426 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001427
1428 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001429 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001430
1431 igb_rx_fifo_flush_82575(&adapter->hw);
1432
Alexander Duyckc493ea42009-03-20 00:16:50 +00001433 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001434 * at least 1 descriptor unused to make sure
1435 * next_to_use != next_to_clean */
1436 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001437 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001438 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001439 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001440}
1441
Nick Nunley88a268c2010-02-17 01:01:59 +00001442/**
1443 * igb_power_up_link - Power up the phy/serdes link
1444 * @adapter: address of board private structure
1445 **/
1446void igb_power_up_link(struct igb_adapter *adapter)
1447{
1448 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1449 igb_power_up_phy_copper(&adapter->hw);
1450 else
1451 igb_power_up_serdes_link_82575(&adapter->hw);
1452}
1453
1454/**
1455 * igb_power_down_link - Power down the phy/serdes link
1456 * @adapter: address of board private structure
1457 */
1458static void igb_power_down_link(struct igb_adapter *adapter)
1459{
1460 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1461 igb_power_down_phy_copper_82575(&adapter->hw);
1462 else
1463 igb_shutdown_serdes_link_82575(&adapter->hw);
1464}
Auke Kok9d5c8242008-01-24 02:22:38 -08001465
1466/**
1467 * igb_up - Open the interface and prepare it to handle traffic
1468 * @adapter: board private structure
1469 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001470int igb_up(struct igb_adapter *adapter)
1471{
1472 struct e1000_hw *hw = &adapter->hw;
1473 int i;
1474
1475 /* hardware has been reset, we need to reload some things */
1476 igb_configure(adapter);
1477
1478 clear_bit(__IGB_DOWN, &adapter->state);
1479
Alexander Duyck047e0032009-10-27 15:49:27 +00001480 for (i = 0; i < adapter->num_q_vectors; i++) {
1481 struct igb_q_vector *q_vector = adapter->q_vector[i];
1482 napi_enable(&q_vector->napi);
1483 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001484 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001485 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001486 else
1487 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001488
1489 /* Clear any pending interrupts. */
1490 rd32(E1000_ICR);
1491 igb_irq_enable(adapter);
1492
Alexander Duyckd4960302009-10-27 15:53:45 +00001493 /* notify VFs that reset has been completed */
1494 if (adapter->vfs_allocated_count) {
1495 u32 reg_data = rd32(E1000_CTRL_EXT);
1496 reg_data |= E1000_CTRL_EXT_PFRSTD;
1497 wr32(E1000_CTRL_EXT, reg_data);
1498 }
1499
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001500 netif_tx_start_all_queues(adapter->netdev);
1501
Alexander Duyck25568a52009-10-27 23:49:59 +00001502 /* start the watchdog. */
1503 hw->mac.get_link_status = 1;
1504 schedule_work(&adapter->watchdog_task);
1505
Auke Kok9d5c8242008-01-24 02:22:38 -08001506 return 0;
1507}
1508
1509void igb_down(struct igb_adapter *adapter)
1510{
Auke Kok9d5c8242008-01-24 02:22:38 -08001511 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001512 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001513 u32 tctl, rctl;
1514 int i;
1515
1516 /* signal that we're down so the interrupt handler does not
1517 * reschedule our watchdog timer */
1518 set_bit(__IGB_DOWN, &adapter->state);
1519
1520 /* disable receives in the hardware */
1521 rctl = rd32(E1000_RCTL);
1522 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1523 /* flush and sleep below */
1524
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001525 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001526
1527 /* disable transmits in the hardware */
1528 tctl = rd32(E1000_TCTL);
1529 tctl &= ~E1000_TCTL_EN;
1530 wr32(E1000_TCTL, tctl);
1531 /* flush both disables and wait for them to finish */
1532 wrfl();
1533 msleep(10);
1534
Alexander Duyck047e0032009-10-27 15:49:27 +00001535 for (i = 0; i < adapter->num_q_vectors; i++) {
1536 struct igb_q_vector *q_vector = adapter->q_vector[i];
1537 napi_disable(&q_vector->napi);
1538 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001539
Auke Kok9d5c8242008-01-24 02:22:38 -08001540 igb_irq_disable(adapter);
1541
1542 del_timer_sync(&adapter->watchdog_timer);
1543 del_timer_sync(&adapter->phy_info_timer);
1544
Auke Kok9d5c8242008-01-24 02:22:38 -08001545 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001546
1547 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001548 spin_lock(&adapter->stats64_lock);
1549 igb_update_stats(adapter, &adapter->stats64);
1550 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001551
Auke Kok9d5c8242008-01-24 02:22:38 -08001552 adapter->link_speed = 0;
1553 adapter->link_duplex = 0;
1554
Jeff Kirsher30236822008-06-24 17:01:15 -07001555 if (!pci_channel_offline(adapter->pdev))
1556 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001557 igb_clean_all_tx_rings(adapter);
1558 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001559#ifdef CONFIG_IGB_DCA
1560
1561 /* since we reset the hardware DCA settings were cleared */
1562 igb_setup_dca(adapter);
1563#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001564}
1565
1566void igb_reinit_locked(struct igb_adapter *adapter)
1567{
1568 WARN_ON(in_interrupt());
1569 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1570 msleep(1);
1571 igb_down(adapter);
1572 igb_up(adapter);
1573 clear_bit(__IGB_RESETTING, &adapter->state);
1574}
1575
1576void igb_reset(struct igb_adapter *adapter)
1577{
Alexander Duyck090b1792009-10-27 23:51:55 +00001578 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001579 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001580 struct e1000_mac_info *mac = &hw->mac;
1581 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001582 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1583 u16 hwm;
1584
1585 /* Repartition Pba for greater than 9k mtu
1586 * To take effect CTRL.RST is required.
1587 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001588 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001589 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001590 case e1000_82580:
1591 pba = rd32(E1000_RXPBS);
1592 pba = igb_rxpbs_adjust_82580(pba);
1593 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001594 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001595 pba = rd32(E1000_RXPBS);
1596 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001597 break;
1598 case e1000_82575:
1599 default:
1600 pba = E1000_PBA_34K;
1601 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001602 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001603
Alexander Duyck2d064c02008-07-08 15:10:12 -07001604 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1605 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001606 /* adjust PBA for jumbo frames */
1607 wr32(E1000_PBA, pba);
1608
1609 /* To maintain wire speed transmits, the Tx FIFO should be
1610 * large enough to accommodate two full transmit packets,
1611 * rounded up to the next 1KB and expressed in KB. Likewise,
1612 * the Rx FIFO should be large enough to accommodate at least
1613 * one full receive packet and is similarly rounded up and
1614 * expressed in KB. */
1615 pba = rd32(E1000_PBA);
1616 /* upper 16 bits has Tx packet buffer allocation size in KB */
1617 tx_space = pba >> 16;
1618 /* lower 16 bits has Rx packet buffer allocation size in KB */
1619 pba &= 0xffff;
1620 /* the tx fifo also stores 16 bytes of information about the tx
1621 * but don't include ethernet FCS because hardware appends it */
1622 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001623 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001624 ETH_FCS_LEN) * 2;
1625 min_tx_space = ALIGN(min_tx_space, 1024);
1626 min_tx_space >>= 10;
1627 /* software strips receive CRC, so leave room for it */
1628 min_rx_space = adapter->max_frame_size;
1629 min_rx_space = ALIGN(min_rx_space, 1024);
1630 min_rx_space >>= 10;
1631
1632 /* If current Tx allocation is less than the min Tx FIFO size,
1633 * and the min Tx FIFO size is less than the current Rx FIFO
1634 * allocation, take space away from current Rx allocation */
1635 if (tx_space < min_tx_space &&
1636 ((min_tx_space - tx_space) < pba)) {
1637 pba = pba - (min_tx_space - tx_space);
1638
1639 /* if short on rx space, rx wins and must trump tx
1640 * adjustment */
1641 if (pba < min_rx_space)
1642 pba = min_rx_space;
1643 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001644 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001645 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001646
1647 /* flow control settings */
1648 /* The high water mark must be low enough to fit one full frame
1649 * (or the size used for early receive) above it in the Rx FIFO.
1650 * Set it to the lower of:
1651 * - 90% of the Rx FIFO size, or
1652 * - the full Rx FIFO size minus one full frame */
1653 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001654 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001655
Alexander Duyckd405ea32009-12-23 13:21:27 +00001656 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1657 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001658 fc->pause_time = 0xFFFF;
1659 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001660 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001661
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001662 /* disable receive for all VFs and wait one second */
1663 if (adapter->vfs_allocated_count) {
1664 int i;
1665 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001666 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001667
1668 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001669 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001670
1671 /* disable transmits and receives */
1672 wr32(E1000_VFRE, 0);
1673 wr32(E1000_VFTE, 0);
1674 }
1675
Auke Kok9d5c8242008-01-24 02:22:38 -08001676 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001677 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001678 wr32(E1000_WUC, 0);
1679
Alexander Duyck330a6d62009-10-27 23:51:35 +00001680 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001681 dev_err(&pdev->dev, "Hardware Error\n");
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001682 if (hw->mac.type > e1000_82580) {
1683 if (adapter->flags & IGB_FLAG_DMAC) {
1684 u32 reg;
Auke Kok9d5c8242008-01-24 02:22:38 -08001685
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08001686 /*
1687 * DMA Coalescing high water mark needs to be higher
1688 * than * the * Rx threshold. The Rx threshold is
1689 * currently * pba - 6, so we * should use a high water
1690 * mark of pba * - 4. */
1691 hwm = (pba - 4) << 10;
1692
1693 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1694 & E1000_DMACR_DMACTHR_MASK);
1695
1696 /* transition to L0x or L1 if available..*/
1697 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1698
1699 /* watchdog timer= +-1000 usec in 32usec intervals */
1700 reg |= (1000 >> 5);
1701 wr32(E1000_DMACR, reg);
1702
1703 /* no lower threshold to disable coalescing(smart fifb)
1704 * -UTRESH=0*/
1705 wr32(E1000_DMCRTRH, 0);
1706
1707 /* set hwm to PBA - 2 * max frame size */
1708 wr32(E1000_FCRTC, hwm);
1709
1710 /*
1711 * This sets the time to wait before requesting tran-
1712 * sition to * low power state to number of usecs needed
1713 * to receive 1 512 * byte frame at gigabit line rate
1714 */
1715 reg = rd32(E1000_DMCTLX);
1716 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1717
1718 /* Delay 255 usec before entering Lx state. */
1719 reg |= 0xFF;
1720 wr32(E1000_DMCTLX, reg);
1721
1722 /* free space in Tx packet buffer to wake from DMAC */
1723 wr32(E1000_DMCTXTH,
1724 (IGB_MIN_TXPBSIZE -
1725 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1726 >> 6);
1727
1728 /* make low power state decision controlled by DMAC */
1729 reg = rd32(E1000_PCIEMISC);
1730 reg |= E1000_PCIEMISC_LX_DECISION;
1731 wr32(E1000_PCIEMISC, reg);
1732 } /* end if IGB_FLAG_DMAC set */
1733 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001734 if (hw->mac.type == e1000_82580) {
1735 u32 reg = rd32(E1000_PCIEMISC);
1736 wr32(E1000_PCIEMISC,
1737 reg & ~E1000_PCIEMISC_LX_DECISION);
1738 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001739 if (!netif_running(adapter->netdev))
1740 igb_power_down_link(adapter);
1741
Auke Kok9d5c8242008-01-24 02:22:38 -08001742 igb_update_mng_vlan(adapter);
1743
1744 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1745 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1746
Alexander Duyck330a6d62009-10-27 23:51:35 +00001747 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001748}
1749
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001750static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001751 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001752 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001753 .ndo_start_xmit = igb_xmit_frame_adv,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001754 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001755 .ndo_set_rx_mode = igb_set_rx_mode,
1756 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001757 .ndo_set_mac_address = igb_set_mac,
1758 .ndo_change_mtu = igb_change_mtu,
1759 .ndo_do_ioctl = igb_ioctl,
1760 .ndo_tx_timeout = igb_tx_timeout,
1761 .ndo_validate_addr = eth_validate_addr,
1762 .ndo_vlan_rx_register = igb_vlan_rx_register,
1763 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1764 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001765 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1766 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1767 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1768 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001769#ifdef CONFIG_NET_POLL_CONTROLLER
1770 .ndo_poll_controller = igb_netpoll,
1771#endif
1772};
1773
Taku Izumi42bfd332008-06-20 12:10:30 +09001774/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001775 * igb_probe - Device Initialization Routine
1776 * @pdev: PCI device information struct
1777 * @ent: entry in igb_pci_tbl
1778 *
1779 * Returns 0 on success, negative on failure
1780 *
1781 * igb_probe initializes an adapter identified by a pci_dev structure.
1782 * The OS initialization, configuring of the adapter private structure,
1783 * and a hardware reset occur.
1784 **/
1785static int __devinit igb_probe(struct pci_dev *pdev,
1786 const struct pci_device_id *ent)
1787{
1788 struct net_device *netdev;
1789 struct igb_adapter *adapter;
1790 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001791 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001792 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001793 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001794 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1795 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001796 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001797 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001798 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001799
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001800 /* Catch broken hardware that put the wrong VF device ID in
1801 * the PCIe SR-IOV capability.
1802 */
1803 if (pdev->is_virtfn) {
1804 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1805 pci_name(pdev), pdev->vendor, pdev->device);
1806 return -EINVAL;
1807 }
1808
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001809 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001810 if (err)
1811 return err;
1812
1813 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001814 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001815 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001816 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001817 if (!err)
1818 pci_using_dac = 1;
1819 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001820 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001821 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001822 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001823 if (err) {
1824 dev_err(&pdev->dev, "No usable DMA "
1825 "configuration, aborting\n");
1826 goto err_dma;
1827 }
1828 }
1829 }
1830
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001831 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1832 IORESOURCE_MEM),
1833 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001834 if (err)
1835 goto err_pci_reg;
1836
Frans Pop19d5afd2009-10-02 10:04:12 -07001837 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001838
Auke Kok9d5c8242008-01-24 02:22:38 -08001839 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001840 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001841
1842 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001843 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1844 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001845 if (!netdev)
1846 goto err_alloc_etherdev;
1847
1848 SET_NETDEV_DEV(netdev, &pdev->dev);
1849
1850 pci_set_drvdata(pdev, netdev);
1851 adapter = netdev_priv(netdev);
1852 adapter->netdev = netdev;
1853 adapter->pdev = pdev;
1854 hw = &adapter->hw;
1855 hw->back = adapter;
1856 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1857
1858 mmio_start = pci_resource_start(pdev, 0);
1859 mmio_len = pci_resource_len(pdev, 0);
1860
1861 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001862 hw->hw_addr = ioremap(mmio_start, mmio_len);
1863 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001864 goto err_ioremap;
1865
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001866 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001867 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001868 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001869
1870 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1871
1872 netdev->mem_start = mmio_start;
1873 netdev->mem_end = mmio_start + mmio_len;
1874
Auke Kok9d5c8242008-01-24 02:22:38 -08001875 /* PCI config space info */
1876 hw->vendor_id = pdev->vendor;
1877 hw->device_id = pdev->device;
1878 hw->revision_id = pdev->revision;
1879 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1880 hw->subsystem_device_id = pdev->subsystem_device;
1881
Auke Kok9d5c8242008-01-24 02:22:38 -08001882 /* Copy the default MAC, PHY and NVM function pointers */
1883 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1884 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1885 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1886 /* Initialize skew-specific constants */
1887 err = ei->get_invariants(hw);
1888 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001889 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001890
Alexander Duyck450c87c2009-02-06 23:22:11 +00001891 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001892 err = igb_sw_init(adapter);
1893 if (err)
1894 goto err_sw_init;
1895
1896 igb_get_bus_info_pcie(hw);
1897
1898 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001899
1900 /* Copper options */
1901 if (hw->phy.media_type == e1000_media_type_copper) {
1902 hw->phy.mdix = AUTO_ALL_MODES;
1903 hw->phy.disable_polarity_correction = false;
1904 hw->phy.ms_type = e1000_ms_hw_default;
1905 }
1906
1907 if (igb_check_reset_block(hw))
1908 dev_info(&pdev->dev,
1909 "PHY reset is blocked due to SOL/IDER session.\n");
1910
1911 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001912 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001913 NETIF_F_HW_VLAN_TX |
1914 NETIF_F_HW_VLAN_RX |
1915 NETIF_F_HW_VLAN_FILTER;
1916
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001917 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001918 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001919 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001920 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001921
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001922 netdev->vlan_features |= NETIF_F_TSO;
1923 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001924 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001925 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001926 netdev->vlan_features |= NETIF_F_SG;
1927
Yi Zou7b872a52010-09-22 17:57:58 +00001928 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001929 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001930 netdev->vlan_features |= NETIF_F_HIGHDMA;
1931 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001932
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001933 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001934 netdev->features |= NETIF_F_SCTP_CSUM;
1935
Alexander Duyck330a6d62009-10-27 23:51:35 +00001936 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001937
1938 /* before reading the NVM, reset the controller to put the device in a
1939 * known good starting state */
1940 hw->mac.ops.reset_hw(hw);
1941
1942 /* make sure the NVM is good */
Carolyn Wyborny4322e562011-03-11 20:43:18 -08001943 if (hw->nvm.ops.validate(hw) < 0) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001944 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1945 err = -EIO;
1946 goto err_eeprom;
1947 }
1948
1949 /* copy the MAC address out of the NVM */
1950 if (hw->mac.ops.read_mac_addr(hw))
1951 dev_err(&pdev->dev, "NVM Read Error\n");
1952
1953 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1954 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1955
1956 if (!is_valid_ether_addr(netdev->perm_addr)) {
1957 dev_err(&pdev->dev, "Invalid MAC Address\n");
1958 err = -EIO;
1959 goto err_eeprom;
1960 }
1961
Joe Perchesc061b182010-08-23 18:20:03 +00001962 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00001963 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00001964 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00001965 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001966
1967 INIT_WORK(&adapter->reset_task, igb_reset_task);
1968 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1969
Alexander Duyck450c87c2009-02-06 23:22:11 +00001970 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001971 adapter->fc_autoneg = true;
1972 hw->mac.autoneg = true;
1973 hw->phy.autoneg_advertised = 0x2f;
1974
Alexander Duyck0cce1192009-07-23 18:10:24 +00001975 hw->fc.requested_mode = e1000_fc_default;
1976 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001977
Auke Kok9d5c8242008-01-24 02:22:38 -08001978 igb_validate_mdi_setting(hw);
1979
Auke Kok9d5c8242008-01-24 02:22:38 -08001980 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1981 * enable the ACPI Magic Packet filter
1982 */
1983
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001984 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001985 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001986 else if (hw->mac.type == e1000_82580)
1987 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1988 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1989 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001990 else if (hw->bus.func == 1)
1991 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001992
1993 if (eeprom_data & eeprom_apme_mask)
1994 adapter->eeprom_wol |= E1000_WUFC_MAG;
1995
1996 /* now that we have the eeprom settings, apply the special cases where
1997 * the eeprom may be wrong or the board simply won't support wake on
1998 * lan on a particular port */
1999 switch (pdev->device) {
2000 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2001 adapter->eeprom_wol = 0;
2002 break;
2003 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07002004 case E1000_DEV_ID_82576_FIBER:
2005 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08002006 /* Wake events only supported on port A for dual fiber
2007 * regardless of eeprom setting */
2008 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2009 adapter->eeprom_wol = 0;
2010 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002011 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00002012 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00002013 /* if quad port adapter, disable WoL on all but port A */
2014 if (global_quad_port_a != 0)
2015 adapter->eeprom_wol = 0;
2016 else
2017 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2018 /* Reset for multiple quad port adapters */
2019 if (++global_quad_port_a == 4)
2020 global_quad_port_a = 0;
2021 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08002022 }
2023
2024 /* initialize the wol settings based on the eeprom settings */
2025 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00002026 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08002027
2028 /* reset the hardware with the new settings */
2029 igb_reset(adapter);
2030
2031 /* let the f/w know that the h/w is now under the control of the
2032 * driver. */
2033 igb_get_hw_control(adapter);
2034
Auke Kok9d5c8242008-01-24 02:22:38 -08002035 strcpy(netdev->name, "eth%d");
2036 err = register_netdev(netdev);
2037 if (err)
2038 goto err_register;
2039
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002040 /* carrier off reporting is important to ethtool even BEFORE open */
2041 netif_carrier_off(netdev);
2042
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002043#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08002044 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002045 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002046 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002047 igb_setup_dca(adapter);
2048 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00002049
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002050#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08002051 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2052 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07002053 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08002054 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00002055 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002056 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002057 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002058 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2059 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2060 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2061 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002062 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002063
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002064 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2065 if (ret_val)
2066 strcpy(part_str, "Unknown");
2067 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002068 dev_info(&pdev->dev,
2069 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2070 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002071 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002072 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002073 switch (hw->mac.type) {
2074 case e1000_i350:
2075 igb_set_eee_i350(hw);
2076 break;
2077 default:
2078 break;
2079 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002080 return 0;
2081
2082err_register:
2083 igb_release_hw_control(adapter);
2084err_eeprom:
2085 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002086 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002087
2088 if (hw->flash_address)
2089 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002090err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002091 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002092 iounmap(hw->hw_addr);
2093err_ioremap:
2094 free_netdev(netdev);
2095err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002096 pci_release_selected_regions(pdev,
2097 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002098err_pci_reg:
2099err_dma:
2100 pci_disable_device(pdev);
2101 return err;
2102}
2103
2104/**
2105 * igb_remove - Device Removal Routine
2106 * @pdev: PCI device information struct
2107 *
2108 * igb_remove is called by the PCI subsystem to alert the driver
2109 * that it should release a PCI device. The could be caused by a
2110 * Hot-Plug event, or because the driver is going to be removed from
2111 * memory.
2112 **/
2113static void __devexit igb_remove(struct pci_dev *pdev)
2114{
2115 struct net_device *netdev = pci_get_drvdata(pdev);
2116 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002117 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002118
Tejun Heo760141a2010-12-12 16:45:14 +01002119 /*
2120 * The watchdog timer may be rescheduled, so explicitly
2121 * disable watchdog from being rescheduled.
2122 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002123 set_bit(__IGB_DOWN, &adapter->state);
2124 del_timer_sync(&adapter->watchdog_timer);
2125 del_timer_sync(&adapter->phy_info_timer);
2126
Tejun Heo760141a2010-12-12 16:45:14 +01002127 cancel_work_sync(&adapter->reset_task);
2128 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002129
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002130#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002131 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002132 dev_info(&pdev->dev, "DCA disabled\n");
2133 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002134 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002135 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002136 }
2137#endif
2138
Auke Kok9d5c8242008-01-24 02:22:38 -08002139 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2140 * would have already happened in close and is redundant. */
2141 igb_release_hw_control(adapter);
2142
2143 unregister_netdev(netdev);
2144
Alexander Duyck047e0032009-10-27 15:49:27 +00002145 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002146
Alexander Duyck37680112009-02-19 20:40:30 -08002147#ifdef CONFIG_PCI_IOV
2148 /* reclaim resources allocated to VFs */
2149 if (adapter->vf_data) {
2150 /* disable iov and allow time for transactions to clear */
2151 pci_disable_sriov(pdev);
2152 msleep(500);
2153
2154 kfree(adapter->vf_data);
2155 adapter->vf_data = NULL;
2156 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2157 msleep(100);
2158 dev_info(&pdev->dev, "IOV Disabled\n");
2159 }
2160#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002161
Alexander Duyck28b07592009-02-06 23:20:31 +00002162 iounmap(hw->hw_addr);
2163 if (hw->flash_address)
2164 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002165 pci_release_selected_regions(pdev,
2166 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002167
2168 free_netdev(netdev);
2169
Frans Pop19d5afd2009-10-02 10:04:12 -07002170 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002171
Auke Kok9d5c8242008-01-24 02:22:38 -08002172 pci_disable_device(pdev);
2173}
2174
2175/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002176 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2177 * @adapter: board private structure to initialize
2178 *
2179 * This function initializes the vf specific data storage and then attempts to
2180 * allocate the VFs. The reason for ordering it this way is because it is much
2181 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2182 * the memory for the VFs.
2183 **/
2184static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2185{
2186#ifdef CONFIG_PCI_IOV
2187 struct pci_dev *pdev = adapter->pdev;
2188
Alexander Duycka6b623e2009-10-27 23:47:53 +00002189 if (adapter->vfs_allocated_count) {
2190 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2191 sizeof(struct vf_data_storage),
2192 GFP_KERNEL);
2193 /* if allocation failed then we do not support SR-IOV */
2194 if (!adapter->vf_data) {
2195 adapter->vfs_allocated_count = 0;
2196 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2197 "Data Storage\n");
2198 }
2199 }
2200
2201 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2202 kfree(adapter->vf_data);
2203 adapter->vf_data = NULL;
2204#endif /* CONFIG_PCI_IOV */
2205 adapter->vfs_allocated_count = 0;
2206#ifdef CONFIG_PCI_IOV
2207 } else {
2208 unsigned char mac_addr[ETH_ALEN];
2209 int i;
2210 dev_info(&pdev->dev, "%d vfs allocated\n",
2211 adapter->vfs_allocated_count);
2212 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2213 random_ether_addr(mac_addr);
2214 igb_set_vf_mac(adapter, i, mac_addr);
2215 }
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002216 /* DMA Coalescing is not supported in IOV mode. */
2217 if (adapter->flags & IGB_FLAG_DMAC)
2218 adapter->flags &= ~IGB_FLAG_DMAC;
Alexander Duycka6b623e2009-10-27 23:47:53 +00002219 }
2220#endif /* CONFIG_PCI_IOV */
2221}
2222
Alexander Duyck115f4592009-11-12 18:37:00 +00002223
2224/**
2225 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2226 * @adapter: board private structure to initialize
2227 *
2228 * igb_init_hw_timer initializes the function pointer and values for the hw
2229 * timer found in hardware.
2230 **/
2231static void igb_init_hw_timer(struct igb_adapter *adapter)
2232{
2233 struct e1000_hw *hw = &adapter->hw;
2234
2235 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002236 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002237 case e1000_82580:
2238 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2239 adapter->cycles.read = igb_read_clock;
2240 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2241 adapter->cycles.mult = 1;
2242 /*
2243 * The 82580 timesync updates the system timer every 8ns by 8ns
2244 * and the value cannot be shifted. Instead we need to shift
2245 * the registers to generate a 64bit timer value. As a result
2246 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2247 * 24 in order to generate a larger value for synchronization.
2248 */
2249 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2250 /* disable system timer temporarily by setting bit 31 */
2251 wr32(E1000_TSAUXC, 0x80000000);
2252 wrfl();
2253
2254 /* Set registers so that rollover occurs soon to test this. */
2255 wr32(E1000_SYSTIMR, 0x00000000);
2256 wr32(E1000_SYSTIML, 0x80000000);
2257 wr32(E1000_SYSTIMH, 0x000000FF);
2258 wrfl();
2259
2260 /* enable system timer by clearing bit 31 */
2261 wr32(E1000_TSAUXC, 0x0);
2262 wrfl();
2263
2264 timecounter_init(&adapter->clock,
2265 &adapter->cycles,
2266 ktime_to_ns(ktime_get_real()));
2267 /*
2268 * Synchronize our NIC clock against system wall clock. NIC
2269 * time stamp reading requires ~3us per sample, each sample
2270 * was pretty stable even under load => only require 10
2271 * samples for each offset comparison.
2272 */
2273 memset(&adapter->compare, 0, sizeof(adapter->compare));
2274 adapter->compare.source = &adapter->clock;
2275 adapter->compare.target = ktime_get_real;
2276 adapter->compare.num_samples = 10;
2277 timecompare_update(&adapter->compare, 0);
2278 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002279 case e1000_82576:
2280 /*
2281 * Initialize hardware timer: we keep it running just in case
2282 * that some program needs it later on.
2283 */
2284 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2285 adapter->cycles.read = igb_read_clock;
2286 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2287 adapter->cycles.mult = 1;
2288 /**
2289 * Scale the NIC clock cycle by a large factor so that
2290 * relatively small clock corrections can be added or
2291 * substracted at each clock tick. The drawbacks of a large
2292 * factor are a) that the clock register overflows more quickly
2293 * (not such a big deal) and b) that the increment per tick has
2294 * to fit into 24 bits. As a result we need to use a shift of
2295 * 19 so we can fit a value of 16 into the TIMINCA register.
2296 */
2297 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2298 wr32(E1000_TIMINCA,
2299 (1 << E1000_TIMINCA_16NS_SHIFT) |
2300 (16 << IGB_82576_TSYNC_SHIFT));
2301
2302 /* Set registers so that rollover occurs soon to test this. */
2303 wr32(E1000_SYSTIML, 0x00000000);
2304 wr32(E1000_SYSTIMH, 0xFF800000);
2305 wrfl();
2306
2307 timecounter_init(&adapter->clock,
2308 &adapter->cycles,
2309 ktime_to_ns(ktime_get_real()));
2310 /*
2311 * Synchronize our NIC clock against system wall clock. NIC
2312 * time stamp reading requires ~3us per sample, each sample
2313 * was pretty stable even under load => only require 10
2314 * samples for each offset comparison.
2315 */
2316 memset(&adapter->compare, 0, sizeof(adapter->compare));
2317 adapter->compare.source = &adapter->clock;
2318 adapter->compare.target = ktime_get_real;
2319 adapter->compare.num_samples = 10;
2320 timecompare_update(&adapter->compare, 0);
2321 break;
2322 case e1000_82575:
2323 /* 82575 does not support timesync */
2324 default:
2325 break;
2326 }
2327
2328}
2329
Alexander Duycka6b623e2009-10-27 23:47:53 +00002330/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002331 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2332 * @adapter: board private structure to initialize
2333 *
2334 * igb_sw_init initializes the Adapter private data structure.
2335 * Fields are initialized based on PCI device information and
2336 * OS network device settings (MTU size).
2337 **/
2338static int __devinit igb_sw_init(struct igb_adapter *adapter)
2339{
2340 struct e1000_hw *hw = &adapter->hw;
2341 struct net_device *netdev = adapter->netdev;
2342 struct pci_dev *pdev = adapter->pdev;
2343
2344 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2345
Alexander Duyck68fd9912008-11-20 00:48:10 -08002346 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2347 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002348 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2349 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2350
Auke Kok9d5c8242008-01-24 02:22:38 -08002351 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2352 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2353
Eric Dumazet12dcd862010-10-15 17:27:10 +00002354 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002355#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002356 switch (hw->mac.type) {
2357 case e1000_82576:
2358 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002359 if (max_vfs > 7) {
2360 dev_warn(&pdev->dev,
2361 "Maximum of 7 VFs per PF, using max\n");
2362 adapter->vfs_allocated_count = 7;
2363 } else
2364 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002365 break;
2366 default:
2367 break;
2368 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002369#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002370 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2371
2372 /*
2373 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2374 * then we should combine the queues into a queue pair in order to
2375 * conserve interrupts due to limited supply
2376 */
2377 if ((adapter->rss_queues > 4) ||
2378 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2379 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2380
Alexander Duycka6b623e2009-10-27 23:47:53 +00002381 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002382 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002383 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2384 return -ENOMEM;
2385 }
2386
Alexander Duyck115f4592009-11-12 18:37:00 +00002387 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002388 igb_probe_vfs(adapter);
2389
Auke Kok9d5c8242008-01-24 02:22:38 -08002390 /* Explicitly disable IRQ since the NIC can be in any state. */
2391 igb_irq_disable(adapter);
2392
Carolyn Wyborny831ec0b2011-03-11 20:43:54 -08002393 if (hw->mac.type == e1000_i350)
2394 adapter->flags &= ~IGB_FLAG_DMAC;
2395
Auke Kok9d5c8242008-01-24 02:22:38 -08002396 set_bit(__IGB_DOWN, &adapter->state);
2397 return 0;
2398}
2399
2400/**
2401 * igb_open - Called when a network interface is made active
2402 * @netdev: network interface device structure
2403 *
2404 * Returns 0 on success, negative value on failure
2405 *
2406 * The open entry point is called when a network interface is made
2407 * active by the system (IFF_UP). At this point all resources needed
2408 * for transmit and receive operations are allocated, the interrupt
2409 * handler is registered with the OS, the watchdog timer is started,
2410 * and the stack is notified that the interface is ready.
2411 **/
2412static int igb_open(struct net_device *netdev)
2413{
2414 struct igb_adapter *adapter = netdev_priv(netdev);
2415 struct e1000_hw *hw = &adapter->hw;
2416 int err;
2417 int i;
2418
2419 /* disallow open during test */
2420 if (test_bit(__IGB_TESTING, &adapter->state))
2421 return -EBUSY;
2422
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002423 netif_carrier_off(netdev);
2424
Auke Kok9d5c8242008-01-24 02:22:38 -08002425 /* allocate transmit descriptors */
2426 err = igb_setup_all_tx_resources(adapter);
2427 if (err)
2428 goto err_setup_tx;
2429
2430 /* allocate receive descriptors */
2431 err = igb_setup_all_rx_resources(adapter);
2432 if (err)
2433 goto err_setup_rx;
2434
Nick Nunley88a268c2010-02-17 01:01:59 +00002435 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002436
Auke Kok9d5c8242008-01-24 02:22:38 -08002437 /* before we allocate an interrupt, we must be ready to handle it.
2438 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2439 * as soon as we call pci_request_irq, so we have to setup our
2440 * clean_rx handler before we do so. */
2441 igb_configure(adapter);
2442
2443 err = igb_request_irq(adapter);
2444 if (err)
2445 goto err_req_irq;
2446
2447 /* From here on the code is the same as igb_up() */
2448 clear_bit(__IGB_DOWN, &adapter->state);
2449
Alexander Duyck047e0032009-10-27 15:49:27 +00002450 for (i = 0; i < adapter->num_q_vectors; i++) {
2451 struct igb_q_vector *q_vector = adapter->q_vector[i];
2452 napi_enable(&q_vector->napi);
2453 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002454
2455 /* Clear any pending interrupts. */
2456 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002457
2458 igb_irq_enable(adapter);
2459
Alexander Duyckd4960302009-10-27 15:53:45 +00002460 /* notify VFs that reset has been completed */
2461 if (adapter->vfs_allocated_count) {
2462 u32 reg_data = rd32(E1000_CTRL_EXT);
2463 reg_data |= E1000_CTRL_EXT_PFRSTD;
2464 wr32(E1000_CTRL_EXT, reg_data);
2465 }
2466
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002467 netif_tx_start_all_queues(netdev);
2468
Alexander Duyck25568a52009-10-27 23:49:59 +00002469 /* start the watchdog. */
2470 hw->mac.get_link_status = 1;
2471 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002472
2473 return 0;
2474
2475err_req_irq:
2476 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002477 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002478 igb_free_all_rx_resources(adapter);
2479err_setup_rx:
2480 igb_free_all_tx_resources(adapter);
2481err_setup_tx:
2482 igb_reset(adapter);
2483
2484 return err;
2485}
2486
2487/**
2488 * igb_close - Disables a network interface
2489 * @netdev: network interface device structure
2490 *
2491 * Returns 0, this is not allowed to fail
2492 *
2493 * The close entry point is called when an interface is de-activated
2494 * by the OS. The hardware is still under the driver's control, but
2495 * needs to be disabled. A global MAC reset is issued to stop the
2496 * hardware, and all transmit and receive resources are freed.
2497 **/
2498static int igb_close(struct net_device *netdev)
2499{
2500 struct igb_adapter *adapter = netdev_priv(netdev);
2501
2502 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2503 igb_down(adapter);
2504
2505 igb_free_irq(adapter);
2506
2507 igb_free_all_tx_resources(adapter);
2508 igb_free_all_rx_resources(adapter);
2509
Auke Kok9d5c8242008-01-24 02:22:38 -08002510 return 0;
2511}
2512
2513/**
2514 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002515 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2516 *
2517 * Return 0 on success, negative on failure
2518 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002519int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002520{
Alexander Duyck59d71982010-04-27 13:09:25 +00002521 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002522 int size;
2523
2524 size = sizeof(struct igb_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002525 tx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002526 if (!tx_ring->buffer_info)
2527 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002528
2529 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002530 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002531 tx_ring->size = ALIGN(tx_ring->size, 4096);
2532
Alexander Duyck59d71982010-04-27 13:09:25 +00002533 tx_ring->desc = dma_alloc_coherent(dev,
2534 tx_ring->size,
2535 &tx_ring->dma,
2536 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002537
2538 if (!tx_ring->desc)
2539 goto err;
2540
Auke Kok9d5c8242008-01-24 02:22:38 -08002541 tx_ring->next_to_use = 0;
2542 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002543 return 0;
2544
2545err:
2546 vfree(tx_ring->buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002547 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002548 "Unable to allocate memory for the transmit descriptor ring\n");
2549 return -ENOMEM;
2550}
2551
2552/**
2553 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2554 * (Descriptors) for all queues
2555 * @adapter: board private structure
2556 *
2557 * Return 0 on success, negative on failure
2558 **/
2559static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2560{
Alexander Duyck439705e2009-10-27 23:49:20 +00002561 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002562 int i, err = 0;
2563
2564 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002565 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002566 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002567 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002568 "Allocation for Tx Queue %u failed\n", i);
2569 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002570 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002571 break;
2572 }
2573 }
2574
Alexander Duycka99955f2009-11-12 18:37:19 +00002575 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002576 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002577 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002578 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002579 return err;
2580}
2581
2582/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002583 * igb_setup_tctl - configure the transmit control registers
2584 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002585 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002586void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002587{
Auke Kok9d5c8242008-01-24 02:22:38 -08002588 struct e1000_hw *hw = &adapter->hw;
2589 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002590
Alexander Duyck85b430b2009-10-27 15:50:29 +00002591 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2592 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002593
2594 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002595 tctl = rd32(E1000_TCTL);
2596 tctl &= ~E1000_TCTL_CT;
2597 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2598 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2599
2600 igb_config_collision_dist(hw);
2601
Auke Kok9d5c8242008-01-24 02:22:38 -08002602 /* Enable transmits */
2603 tctl |= E1000_TCTL_EN;
2604
2605 wr32(E1000_TCTL, tctl);
2606}
2607
2608/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002609 * igb_configure_tx_ring - Configure transmit ring after Reset
2610 * @adapter: board private structure
2611 * @ring: tx ring to configure
2612 *
2613 * Configure a transmit ring after a reset.
2614 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002615void igb_configure_tx_ring(struct igb_adapter *adapter,
2616 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002617{
2618 struct e1000_hw *hw = &adapter->hw;
2619 u32 txdctl;
2620 u64 tdba = ring->dma;
2621 int reg_idx = ring->reg_idx;
2622
2623 /* disable the queue */
2624 txdctl = rd32(E1000_TXDCTL(reg_idx));
2625 wr32(E1000_TXDCTL(reg_idx),
2626 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2627 wrfl();
2628 mdelay(10);
2629
2630 wr32(E1000_TDLEN(reg_idx),
2631 ring->count * sizeof(union e1000_adv_tx_desc));
2632 wr32(E1000_TDBAL(reg_idx),
2633 tdba & 0x00000000ffffffffULL);
2634 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2635
Alexander Duyckfce99e32009-10-27 15:51:27 +00002636 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2637 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2638 writel(0, ring->head);
2639 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002640
2641 txdctl |= IGB_TX_PTHRESH;
2642 txdctl |= IGB_TX_HTHRESH << 8;
2643 txdctl |= IGB_TX_WTHRESH << 16;
2644
2645 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2646 wr32(E1000_TXDCTL(reg_idx), txdctl);
2647}
2648
2649/**
2650 * igb_configure_tx - Configure transmit Unit after Reset
2651 * @adapter: board private structure
2652 *
2653 * Configure the Tx unit of the MAC after a reset.
2654 **/
2655static void igb_configure_tx(struct igb_adapter *adapter)
2656{
2657 int i;
2658
2659 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002660 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002661}
2662
2663/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002664 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002665 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2666 *
2667 * Returns 0 on success, negative on failure
2668 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002669int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002670{
Alexander Duyck59d71982010-04-27 13:09:25 +00002671 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002672 int size, desc_len;
2673
2674 size = sizeof(struct igb_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002675 rx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002676 if (!rx_ring->buffer_info)
2677 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002678
2679 desc_len = sizeof(union e1000_adv_rx_desc);
2680
2681 /* Round up to nearest 4K */
2682 rx_ring->size = rx_ring->count * desc_len;
2683 rx_ring->size = ALIGN(rx_ring->size, 4096);
2684
Alexander Duyck59d71982010-04-27 13:09:25 +00002685 rx_ring->desc = dma_alloc_coherent(dev,
2686 rx_ring->size,
2687 &rx_ring->dma,
2688 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002689
2690 if (!rx_ring->desc)
2691 goto err;
2692
2693 rx_ring->next_to_clean = 0;
2694 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002695
Auke Kok9d5c8242008-01-24 02:22:38 -08002696 return 0;
2697
2698err:
2699 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002700 rx_ring->buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002701 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2702 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002703 return -ENOMEM;
2704}
2705
2706/**
2707 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2708 * (Descriptors) for all queues
2709 * @adapter: board private structure
2710 *
2711 * Return 0 on success, negative on failure
2712 **/
2713static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2714{
Alexander Duyck439705e2009-10-27 23:49:20 +00002715 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002716 int i, err = 0;
2717
2718 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002719 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002720 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002721 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002722 "Allocation for Rx Queue %u failed\n", i);
2723 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002724 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002725 break;
2726 }
2727 }
2728
2729 return err;
2730}
2731
2732/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002733 * igb_setup_mrqc - configure the multiple receive queue control registers
2734 * @adapter: Board private structure
2735 **/
2736static void igb_setup_mrqc(struct igb_adapter *adapter)
2737{
2738 struct e1000_hw *hw = &adapter->hw;
2739 u32 mrqc, rxcsum;
2740 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2741 union e1000_reta {
2742 u32 dword;
2743 u8 bytes[4];
2744 } reta;
2745 static const u8 rsshash[40] = {
2746 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2747 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2748 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2749 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2750
2751 /* Fill out hash function seeds */
2752 for (j = 0; j < 10; j++) {
2753 u32 rsskey = rsshash[(j * 4)];
2754 rsskey |= rsshash[(j * 4) + 1] << 8;
2755 rsskey |= rsshash[(j * 4) + 2] << 16;
2756 rsskey |= rsshash[(j * 4) + 3] << 24;
2757 array_wr32(E1000_RSSRK(0), j, rsskey);
2758 }
2759
Alexander Duycka99955f2009-11-12 18:37:19 +00002760 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002761
2762 if (adapter->vfs_allocated_count) {
2763 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2764 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002765 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002766 case e1000_82580:
2767 num_rx_queues = 1;
2768 shift = 0;
2769 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002770 case e1000_82576:
2771 shift = 3;
2772 num_rx_queues = 2;
2773 break;
2774 case e1000_82575:
2775 shift = 2;
2776 shift2 = 6;
2777 default:
2778 break;
2779 }
2780 } else {
2781 if (hw->mac.type == e1000_82575)
2782 shift = 6;
2783 }
2784
2785 for (j = 0; j < (32 * 4); j++) {
2786 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2787 if (shift2)
2788 reta.bytes[j & 3] |= num_rx_queues << shift2;
2789 if ((j & 3) == 3)
2790 wr32(E1000_RETA(j >> 2), reta.dword);
2791 }
2792
2793 /*
2794 * Disable raw packet checksumming so that RSS hash is placed in
2795 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2796 * offloads as they are enabled by default
2797 */
2798 rxcsum = rd32(E1000_RXCSUM);
2799 rxcsum |= E1000_RXCSUM_PCSD;
2800
2801 if (adapter->hw.mac.type >= e1000_82576)
2802 /* Enable Receive Checksum Offload for SCTP */
2803 rxcsum |= E1000_RXCSUM_CRCOFL;
2804
2805 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2806 wr32(E1000_RXCSUM, rxcsum);
2807
2808 /* If VMDq is enabled then we set the appropriate mode for that, else
2809 * we default to RSS so that an RSS hash is calculated per packet even
2810 * if we are only using one queue */
2811 if (adapter->vfs_allocated_count) {
2812 if (hw->mac.type > e1000_82575) {
2813 /* Set the default pool for the PF's first queue */
2814 u32 vtctl = rd32(E1000_VT_CTL);
2815 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2816 E1000_VT_CTL_DISABLE_DEF_POOL);
2817 vtctl |= adapter->vfs_allocated_count <<
2818 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2819 wr32(E1000_VT_CTL, vtctl);
2820 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002821 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002822 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2823 else
2824 mrqc = E1000_MRQC_ENABLE_VMDQ;
2825 } else {
2826 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2827 }
2828 igb_vmm_control(adapter);
2829
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002830 /*
2831 * Generate RSS hash based on TCP port numbers and/or
2832 * IPv4/v6 src and dst addresses since UDP cannot be
2833 * hashed reliably due to IP fragmentation
2834 */
2835 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2836 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2837 E1000_MRQC_RSS_FIELD_IPV6 |
2838 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2839 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002840
2841 wr32(E1000_MRQC, mrqc);
2842}
2843
2844/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002845 * igb_setup_rctl - configure the receive control registers
2846 * @adapter: Board private structure
2847 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002848void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002849{
2850 struct e1000_hw *hw = &adapter->hw;
2851 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002852
2853 rctl = rd32(E1000_RCTL);
2854
2855 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002856 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002857
Alexander Duyck69d728b2008-11-25 01:04:03 -08002858 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002859 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002860
Auke Kok87cb7e82008-07-08 15:08:29 -07002861 /*
2862 * enable stripping of CRC. It's unlikely this will break BMC
2863 * redirection as it did with e1000. Newer features require
2864 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002865 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002866 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002867
Alexander Duyck559e9c42009-10-27 23:52:50 +00002868 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002869 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002870
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002871 /* enable LPE to prevent packets larger than max_frame_size */
2872 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002873
Alexander Duyck952f72a2009-10-27 15:51:07 +00002874 /* disable queue 0 to prevent tail write w/o re-config */
2875 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002876
Alexander Duycke1739522009-02-19 20:39:44 -08002877 /* Attention!!! For SR-IOV PF driver operations you must enable
2878 * queue drop for all VF and PF queues to prevent head of line blocking
2879 * if an un-trusted VF does not provide descriptors to hardware.
2880 */
2881 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002882 /* set all queue drop enable bits */
2883 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002884 }
2885
Auke Kok9d5c8242008-01-24 02:22:38 -08002886 wr32(E1000_RCTL, rctl);
2887}
2888
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002889static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2890 int vfn)
2891{
2892 struct e1000_hw *hw = &adapter->hw;
2893 u32 vmolr;
2894
2895 /* if it isn't the PF check to see if VFs are enabled and
2896 * increase the size to support vlan tags */
2897 if (vfn < adapter->vfs_allocated_count &&
2898 adapter->vf_data[vfn].vlans_enabled)
2899 size += VLAN_TAG_SIZE;
2900
2901 vmolr = rd32(E1000_VMOLR(vfn));
2902 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2903 vmolr |= size | E1000_VMOLR_LPE;
2904 wr32(E1000_VMOLR(vfn), vmolr);
2905
2906 return 0;
2907}
2908
Auke Kok9d5c8242008-01-24 02:22:38 -08002909/**
Alexander Duycke1739522009-02-19 20:39:44 -08002910 * igb_rlpml_set - set maximum receive packet size
2911 * @adapter: board private structure
2912 *
2913 * Configure maximum receivable packet size.
2914 **/
2915static void igb_rlpml_set(struct igb_adapter *adapter)
2916{
2917 u32 max_frame_size = adapter->max_frame_size;
2918 struct e1000_hw *hw = &adapter->hw;
2919 u16 pf_id = adapter->vfs_allocated_count;
2920
2921 if (adapter->vlgrp)
2922 max_frame_size += VLAN_TAG_SIZE;
2923
2924 /* if vfs are enabled we set RLPML to the largest possible request
2925 * size and set the VMOLR RLPML to the size we need */
2926 if (pf_id) {
2927 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002928 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002929 }
2930
2931 wr32(E1000_RLPML, max_frame_size);
2932}
2933
Williams, Mitch A8151d292010-02-10 01:44:24 +00002934static inline void igb_set_vmolr(struct igb_adapter *adapter,
2935 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002936{
2937 struct e1000_hw *hw = &adapter->hw;
2938 u32 vmolr;
2939
2940 /*
2941 * This register exists only on 82576 and newer so if we are older then
2942 * we should exit and do nothing
2943 */
2944 if (hw->mac.type < e1000_82576)
2945 return;
2946
2947 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002948 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2949 if (aupe)
2950 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2951 else
2952 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002953
2954 /* clear all bits that might not be set */
2955 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2956
Alexander Duycka99955f2009-11-12 18:37:19 +00002957 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002958 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2959 /*
2960 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2961 * multicast packets
2962 */
2963 if (vfn <= adapter->vfs_allocated_count)
2964 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2965
2966 wr32(E1000_VMOLR(vfn), vmolr);
2967}
2968
Alexander Duycke1739522009-02-19 20:39:44 -08002969/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002970 * igb_configure_rx_ring - Configure a receive ring after Reset
2971 * @adapter: board private structure
2972 * @ring: receive ring to be configured
2973 *
2974 * Configure the Rx unit of the MAC after a reset.
2975 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002976void igb_configure_rx_ring(struct igb_adapter *adapter,
2977 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002978{
2979 struct e1000_hw *hw = &adapter->hw;
2980 u64 rdba = ring->dma;
2981 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002982 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002983
2984 /* disable the queue */
2985 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2986 wr32(E1000_RXDCTL(reg_idx),
2987 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2988
2989 /* Set DMA base address registers */
2990 wr32(E1000_RDBAL(reg_idx),
2991 rdba & 0x00000000ffffffffULL);
2992 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2993 wr32(E1000_RDLEN(reg_idx),
2994 ring->count * sizeof(union e1000_adv_rx_desc));
2995
2996 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002997 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2998 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2999 writel(0, ring->head);
3000 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00003001
Alexander Duyck952f72a2009-10-27 15:51:07 +00003002 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00003003 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
3004 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00003005 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3006#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3007 srrctl |= IGB_RXBUFFER_16384 >>
3008 E1000_SRRCTL_BSIZEPKT_SHIFT;
3009#else
3010 srrctl |= (PAGE_SIZE / 2) >>
3011 E1000_SRRCTL_BSIZEPKT_SHIFT;
3012#endif
3013 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3014 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00003015 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00003016 E1000_SRRCTL_BSIZEPKT_SHIFT;
3017 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3018 }
Nick Nunley757b77e2010-03-26 11:36:47 +00003019 if (hw->mac.type == e1000_82580)
3020 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00003021 /* Only set Drop Enable if we are supporting multiple queues */
3022 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3023 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00003024
3025 wr32(E1000_SRRCTL(reg_idx), srrctl);
3026
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003027 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00003028 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00003029
Alexander Duyck85b430b2009-10-27 15:50:29 +00003030 /* enable receive descriptor fetching */
3031 rxdctl = rd32(E1000_RXDCTL(reg_idx));
3032 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3033 rxdctl &= 0xFFF00000;
3034 rxdctl |= IGB_RX_PTHRESH;
3035 rxdctl |= IGB_RX_HTHRESH << 8;
3036 rxdctl |= IGB_RX_WTHRESH << 16;
3037 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3038}
3039
3040/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003041 * igb_configure_rx - Configure receive Unit after Reset
3042 * @adapter: board private structure
3043 *
3044 * Configure the Rx unit of the MAC after a reset.
3045 **/
3046static void igb_configure_rx(struct igb_adapter *adapter)
3047{
Hannes Eder91075842009-02-18 19:36:04 -08003048 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003049
Alexander Duyck68d480c2009-10-05 06:33:08 +00003050 /* set UTA to appropriate mode */
3051 igb_set_uta(adapter);
3052
Alexander Duyck26ad9172009-10-05 06:32:49 +00003053 /* set the correct pool for the PF default MAC address in entry 0 */
3054 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3055 adapter->vfs_allocated_count);
3056
Alexander Duyck06cf2662009-10-27 15:53:25 +00003057 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3058 * the Base and Length of the Rx Descriptor Ring */
3059 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003060 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003061}
3062
3063/**
3064 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003065 * @tx_ring: Tx descriptor ring for a specific queue
3066 *
3067 * Free all transmit software resources
3068 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003069void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003070{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003071 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003072
3073 vfree(tx_ring->buffer_info);
3074 tx_ring->buffer_info = NULL;
3075
Alexander Duyck439705e2009-10-27 23:49:20 +00003076 /* if not set, then don't free */
3077 if (!tx_ring->desc)
3078 return;
3079
Alexander Duyck59d71982010-04-27 13:09:25 +00003080 dma_free_coherent(tx_ring->dev, tx_ring->size,
3081 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003082
3083 tx_ring->desc = NULL;
3084}
3085
3086/**
3087 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3088 * @adapter: board private structure
3089 *
3090 * Free all transmit software resources
3091 **/
3092static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3093{
3094 int i;
3095
3096 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003097 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003098}
3099
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003100void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
3101 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003102{
Alexander Duyck6366ad32009-12-02 16:47:18 +00003103 if (buffer_info->dma) {
3104 if (buffer_info->mapped_as_page)
Alexander Duyck59d71982010-04-27 13:09:25 +00003105 dma_unmap_page(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003106 buffer_info->dma,
3107 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003108 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003109 else
Alexander Duyck59d71982010-04-27 13:09:25 +00003110 dma_unmap_single(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003111 buffer_info->dma,
3112 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003113 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003114 buffer_info->dma = 0;
3115 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003116 if (buffer_info->skb) {
3117 dev_kfree_skb_any(buffer_info->skb);
3118 buffer_info->skb = NULL;
3119 }
3120 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003121 buffer_info->length = 0;
3122 buffer_info->next_to_watch = 0;
3123 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08003124}
3125
3126/**
3127 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003128 * @tx_ring: ring to be cleaned
3129 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003130static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003131{
3132 struct igb_buffer *buffer_info;
3133 unsigned long size;
3134 unsigned int i;
3135
3136 if (!tx_ring->buffer_info)
3137 return;
3138 /* Free all the Tx ring sk_buffs */
3139
3140 for (i = 0; i < tx_ring->count; i++) {
3141 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003142 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003143 }
3144
3145 size = sizeof(struct igb_buffer) * tx_ring->count;
3146 memset(tx_ring->buffer_info, 0, size);
3147
3148 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003149 memset(tx_ring->desc, 0, tx_ring->size);
3150
3151 tx_ring->next_to_use = 0;
3152 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003153}
3154
3155/**
3156 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3157 * @adapter: board private structure
3158 **/
3159static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3160{
3161 int i;
3162
3163 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003164 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003165}
3166
3167/**
3168 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003169 * @rx_ring: ring to clean the resources from
3170 *
3171 * Free all receive software resources
3172 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003173void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003174{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003175 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003176
3177 vfree(rx_ring->buffer_info);
3178 rx_ring->buffer_info = NULL;
3179
Alexander Duyck439705e2009-10-27 23:49:20 +00003180 /* if not set, then don't free */
3181 if (!rx_ring->desc)
3182 return;
3183
Alexander Duyck59d71982010-04-27 13:09:25 +00003184 dma_free_coherent(rx_ring->dev, rx_ring->size,
3185 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003186
3187 rx_ring->desc = NULL;
3188}
3189
3190/**
3191 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3192 * @adapter: board private structure
3193 *
3194 * Free all receive software resources
3195 **/
3196static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3197{
3198 int i;
3199
3200 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003201 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003202}
3203
3204/**
3205 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003206 * @rx_ring: ring to free buffers from
3207 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003208static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003209{
3210 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003211 unsigned long size;
3212 unsigned int i;
3213
3214 if (!rx_ring->buffer_info)
3215 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003216
Auke Kok9d5c8242008-01-24 02:22:38 -08003217 /* Free all the Rx ring sk_buffs */
3218 for (i = 0; i < rx_ring->count; i++) {
3219 buffer_info = &rx_ring->buffer_info[i];
3220 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003221 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003222 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00003223 rx_ring->rx_buffer_len,
Alexander Duyck59d71982010-04-27 13:09:25 +00003224 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003225 buffer_info->dma = 0;
3226 }
3227
3228 if (buffer_info->skb) {
3229 dev_kfree_skb(buffer_info->skb);
3230 buffer_info->skb = NULL;
3231 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003232 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003233 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003234 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003235 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003236 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003237 buffer_info->page_dma = 0;
3238 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003239 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003240 put_page(buffer_info->page);
3241 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003242 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003243 }
3244 }
3245
Auke Kok9d5c8242008-01-24 02:22:38 -08003246 size = sizeof(struct igb_buffer) * rx_ring->count;
3247 memset(rx_ring->buffer_info, 0, size);
3248
3249 /* Zero out the descriptor ring */
3250 memset(rx_ring->desc, 0, rx_ring->size);
3251
3252 rx_ring->next_to_clean = 0;
3253 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003254}
3255
3256/**
3257 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3258 * @adapter: board private structure
3259 **/
3260static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3261{
3262 int i;
3263
3264 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003265 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003266}
3267
3268/**
3269 * igb_set_mac - Change the Ethernet Address of the NIC
3270 * @netdev: network interface device structure
3271 * @p: pointer to an address structure
3272 *
3273 * Returns 0 on success, negative on failure
3274 **/
3275static int igb_set_mac(struct net_device *netdev, void *p)
3276{
3277 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003278 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003279 struct sockaddr *addr = p;
3280
3281 if (!is_valid_ether_addr(addr->sa_data))
3282 return -EADDRNOTAVAIL;
3283
3284 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003285 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003286
Alexander Duyck26ad9172009-10-05 06:32:49 +00003287 /* set the correct pool for the new PF MAC address in entry 0 */
3288 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3289 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003290
Auke Kok9d5c8242008-01-24 02:22:38 -08003291 return 0;
3292}
3293
3294/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003295 * igb_write_mc_addr_list - write multicast addresses to MTA
3296 * @netdev: network interface device structure
3297 *
3298 * Writes multicast address list to the MTA hash table.
3299 * Returns: -ENOMEM on failure
3300 * 0 on no addresses written
3301 * X on writing X addresses to MTA
3302 **/
3303static int igb_write_mc_addr_list(struct net_device *netdev)
3304{
3305 struct igb_adapter *adapter = netdev_priv(netdev);
3306 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad2010-04-01 21:22:57 +00003307 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003308 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003309 int i;
3310
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003311 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003312 /* nothing to program, so clear mc list */
3313 igb_update_mc_addr_list(hw, NULL, 0);
3314 igb_restore_vf_multicasts(adapter);
3315 return 0;
3316 }
3317
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003318 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003319 if (!mta_list)
3320 return -ENOMEM;
3321
Alexander Duyck68d480c2009-10-05 06:33:08 +00003322 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003323 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +00003324 netdev_for_each_mc_addr(ha, netdev)
3325 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003326
Alexander Duyck68d480c2009-10-05 06:33:08 +00003327 igb_update_mc_addr_list(hw, mta_list, i);
3328 kfree(mta_list);
3329
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003330 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003331}
3332
3333/**
3334 * igb_write_uc_addr_list - write unicast addresses to RAR table
3335 * @netdev: network interface device structure
3336 *
3337 * Writes unicast address list to the RAR table.
3338 * Returns: -ENOMEM on failure/insufficient address space
3339 * 0 on no addresses written
3340 * X on writing X addresses to the RAR table
3341 **/
3342static int igb_write_uc_addr_list(struct net_device *netdev)
3343{
3344 struct igb_adapter *adapter = netdev_priv(netdev);
3345 struct e1000_hw *hw = &adapter->hw;
3346 unsigned int vfn = adapter->vfs_allocated_count;
3347 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3348 int count = 0;
3349
3350 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003351 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003352 return -ENOMEM;
3353
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003354 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003355 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003356
3357 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003358 if (!rar_entries)
3359 break;
3360 igb_rar_set_qsel(adapter, ha->addr,
3361 rar_entries--,
3362 vfn);
3363 count++;
3364 }
3365 }
3366 /* write the addresses in reverse order to avoid write combining */
3367 for (; rar_entries > 0 ; rar_entries--) {
3368 wr32(E1000_RAH(rar_entries), 0);
3369 wr32(E1000_RAL(rar_entries), 0);
3370 }
3371 wrfl();
3372
3373 return count;
3374}
3375
3376/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003377 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003378 * @netdev: network interface device structure
3379 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003380 * The set_rx_mode entry point is called whenever the unicast or multicast
3381 * address lists or the network interface flags are updated. This routine is
3382 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003383 * promiscuous mode, and all-multi behavior.
3384 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003385static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003386{
3387 struct igb_adapter *adapter = netdev_priv(netdev);
3388 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003389 unsigned int vfn = adapter->vfs_allocated_count;
3390 u32 rctl, vmolr = 0;
3391 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003392
3393 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003394 rctl = rd32(E1000_RCTL);
3395
Alexander Duyck68d480c2009-10-05 06:33:08 +00003396 /* clear the effected bits */
3397 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3398
Patrick McHardy746b9f02008-07-16 20:15:45 -07003399 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003400 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003401 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003402 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003403 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003404 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003405 vmolr |= E1000_VMOLR_MPME;
3406 } else {
3407 /*
3408 * Write addresses to the MTA, if the attempt fails
3409 * then we should just turn on promiscous mode so
3410 * that we can at least receive multicast traffic
3411 */
3412 count = igb_write_mc_addr_list(netdev);
3413 if (count < 0) {
3414 rctl |= E1000_RCTL_MPE;
3415 vmolr |= E1000_VMOLR_MPME;
3416 } else if (count) {
3417 vmolr |= E1000_VMOLR_ROMPE;
3418 }
3419 }
3420 /*
3421 * Write addresses to available RAR registers, if there is not
3422 * sufficient space to store all the addresses then enable
3423 * unicast promiscous mode
3424 */
3425 count = igb_write_uc_addr_list(netdev);
3426 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003427 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003428 vmolr |= E1000_VMOLR_ROPE;
3429 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003430 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003431 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003432 wr32(E1000_RCTL, rctl);
3433
Alexander Duyck68d480c2009-10-05 06:33:08 +00003434 /*
3435 * In order to support SR-IOV and eventually VMDq it is necessary to set
3436 * the VMOLR to enable the appropriate modes. Without this workaround
3437 * we will have issues with VLAN tag stripping not being done for frames
3438 * that are only arriving because we are the default pool
3439 */
3440 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003441 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003442
Alexander Duyck68d480c2009-10-05 06:33:08 +00003443 vmolr |= rd32(E1000_VMOLR(vfn)) &
3444 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3445 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003446 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003447}
3448
Greg Rose13800462010-11-06 02:08:26 +00003449static void igb_check_wvbr(struct igb_adapter *adapter)
3450{
3451 struct e1000_hw *hw = &adapter->hw;
3452 u32 wvbr = 0;
3453
3454 switch (hw->mac.type) {
3455 case e1000_82576:
3456 case e1000_i350:
3457 if (!(wvbr = rd32(E1000_WVBR)))
3458 return;
3459 break;
3460 default:
3461 break;
3462 }
3463
3464 adapter->wvbr |= wvbr;
3465}
3466
3467#define IGB_STAGGERED_QUEUE_OFFSET 8
3468
3469static void igb_spoof_check(struct igb_adapter *adapter)
3470{
3471 int j;
3472
3473 if (!adapter->wvbr)
3474 return;
3475
3476 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3477 if (adapter->wvbr & (1 << j) ||
3478 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3479 dev_warn(&adapter->pdev->dev,
3480 "Spoof event(s) detected on VF %d\n", j);
3481 adapter->wvbr &=
3482 ~((1 << j) |
3483 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3484 }
3485 }
3486}
3487
Auke Kok9d5c8242008-01-24 02:22:38 -08003488/* Need to wait a few seconds after link up to get diagnostic information from
3489 * the phy */
3490static void igb_update_phy_info(unsigned long data)
3491{
3492 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003493 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003494}
3495
3496/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003497 * igb_has_link - check shared code for link and determine up/down
3498 * @adapter: pointer to driver private info
3499 **/
Nick Nunley31455352010-02-17 01:01:21 +00003500bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003501{
3502 struct e1000_hw *hw = &adapter->hw;
3503 bool link_active = false;
3504 s32 ret_val = 0;
3505
3506 /* get_link_status is set on LSC (link status) interrupt or
3507 * rx sequence error interrupt. get_link_status will stay
3508 * false until the e1000_check_for_link establishes link
3509 * for copper adapters ONLY
3510 */
3511 switch (hw->phy.media_type) {
3512 case e1000_media_type_copper:
3513 if (hw->mac.get_link_status) {
3514 ret_val = hw->mac.ops.check_for_link(hw);
3515 link_active = !hw->mac.get_link_status;
3516 } else {
3517 link_active = true;
3518 }
3519 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003520 case e1000_media_type_internal_serdes:
3521 ret_val = hw->mac.ops.check_for_link(hw);
3522 link_active = hw->mac.serdes_has_link;
3523 break;
3524 default:
3525 case e1000_media_type_unknown:
3526 break;
3527 }
3528
3529 return link_active;
3530}
3531
3532/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003533 * igb_watchdog - Timer Call-back
3534 * @data: pointer to adapter cast into an unsigned long
3535 **/
3536static void igb_watchdog(unsigned long data)
3537{
3538 struct igb_adapter *adapter = (struct igb_adapter *)data;
3539 /* Do the rest outside of interrupt context */
3540 schedule_work(&adapter->watchdog_task);
3541}
3542
3543static void igb_watchdog_task(struct work_struct *work)
3544{
3545 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003546 struct igb_adapter,
3547 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003548 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003549 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003550 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003551 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003552
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003553 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003554 if (link) {
3555 if (!netif_carrier_ok(netdev)) {
3556 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003557 hw->mac.ops.get_speed_and_duplex(hw,
3558 &adapter->link_speed,
3559 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003560
3561 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003562 /* Links status message must follow this format */
3563 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003564 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003565 netdev->name,
3566 adapter->link_speed,
3567 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003568 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003569 ((ctrl & E1000_CTRL_TFCE) &&
3570 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3571 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3572 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003573
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003574 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003575 adapter->tx_timeout_factor = 1;
3576 switch (adapter->link_speed) {
3577 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003578 adapter->tx_timeout_factor = 14;
3579 break;
3580 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003581 /* maybe add some timeout factor ? */
3582 break;
3583 }
3584
3585 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003586
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003587 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003588 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003589
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003590 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003591 if (!test_bit(__IGB_DOWN, &adapter->state))
3592 mod_timer(&adapter->phy_info_timer,
3593 round_jiffies(jiffies + 2 * HZ));
3594 }
3595 } else {
3596 if (netif_carrier_ok(netdev)) {
3597 adapter->link_speed = 0;
3598 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003599 /* Links status message must follow this format */
3600 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3601 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003602 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003603
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003604 igb_ping_all_vfs(adapter);
3605
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003606 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003607 if (!test_bit(__IGB_DOWN, &adapter->state))
3608 mod_timer(&adapter->phy_info_timer,
3609 round_jiffies(jiffies + 2 * HZ));
3610 }
3611 }
3612
Eric Dumazet12dcd862010-10-15 17:27:10 +00003613 spin_lock(&adapter->stats64_lock);
3614 igb_update_stats(adapter, &adapter->stats64);
3615 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003616
Alexander Duyckdbabb062009-11-12 18:38:16 +00003617 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003618 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003619 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003620 /* We've lost link, so the controller stops DMA,
3621 * but we've got queued Tx work that's never going
3622 * to get done, so reset controller to flush Tx.
3623 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003624 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3625 adapter->tx_timeout_count++;
3626 schedule_work(&adapter->reset_task);
3627 /* return immediately since reset is imminent */
3628 return;
3629 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003630 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003631
Alexander Duyckdbabb062009-11-12 18:38:16 +00003632 /* Force detection of hung controller every watchdog period */
3633 tx_ring->detect_tx_hung = true;
3634 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003635
Auke Kok9d5c8242008-01-24 02:22:38 -08003636 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003637 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003638 u32 eics = 0;
3639 for (i = 0; i < adapter->num_q_vectors; i++) {
3640 struct igb_q_vector *q_vector = adapter->q_vector[i];
3641 eics |= q_vector->eims_value;
3642 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003643 wr32(E1000_EICS, eics);
3644 } else {
3645 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3646 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003647
Greg Rose13800462010-11-06 02:08:26 +00003648 igb_spoof_check(adapter);
3649
Auke Kok9d5c8242008-01-24 02:22:38 -08003650 /* Reset the timer */
3651 if (!test_bit(__IGB_DOWN, &adapter->state))
3652 mod_timer(&adapter->watchdog_timer,
3653 round_jiffies(jiffies + 2 * HZ));
3654}
3655
3656enum latency_range {
3657 lowest_latency = 0,
3658 low_latency = 1,
3659 bulk_latency = 2,
3660 latency_invalid = 255
3661};
3662
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003663/**
3664 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3665 *
3666 * Stores a new ITR value based on strictly on packet size. This
3667 * algorithm is less sophisticated than that used in igb_update_itr,
3668 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003669 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003670 * were determined based on theoretical maximum wire speed and testing
3671 * data, in order to minimize response time while increasing bulk
3672 * throughput.
3673 * This functionality is controlled by the InterruptThrottleRate module
3674 * parameter (see igb_param.c)
3675 * NOTE: This function is called only when operating in a multiqueue
3676 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003677 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003678 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003679static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003680{
Alexander Duyck047e0032009-10-27 15:49:27 +00003681 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003682 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003683 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003684 struct igb_ring *ring;
3685 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003686
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003687 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3688 * ints/sec - ITR timer value of 120 ticks.
3689 */
3690 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003691 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003692 goto set_itr_val;
3693 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003694
Eric Dumazet12dcd862010-10-15 17:27:10 +00003695 ring = q_vector->rx_ring;
3696 if (ring) {
3697 packets = ACCESS_ONCE(ring->total_packets);
3698
3699 if (packets)
3700 avg_wire_size = ring->total_bytes / packets;
Alexander Duyck047e0032009-10-27 15:49:27 +00003701 }
3702
Eric Dumazet12dcd862010-10-15 17:27:10 +00003703 ring = q_vector->tx_ring;
3704 if (ring) {
3705 packets = ACCESS_ONCE(ring->total_packets);
3706
3707 if (packets)
3708 avg_wire_size = max_t(u32, avg_wire_size,
3709 ring->total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003710 }
3711
3712 /* if avg_wire_size isn't set no work was done */
3713 if (!avg_wire_size)
3714 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003715
3716 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3717 avg_wire_size += 24;
3718
3719 /* Don't starve jumbo frames */
3720 avg_wire_size = min(avg_wire_size, 3000);
3721
3722 /* Give a little boost to mid-size frames */
3723 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3724 new_val = avg_wire_size / 3;
3725 else
3726 new_val = avg_wire_size / 2;
3727
Nick Nunleyabe1c362010-02-17 01:03:19 +00003728 /* when in itr mode 3 do not exceed 20K ints/sec */
3729 if (adapter->rx_itr_setting == 3 && new_val < 196)
3730 new_val = 196;
3731
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003732set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003733 if (new_val != q_vector->itr_val) {
3734 q_vector->itr_val = new_val;
3735 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003736 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003737clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003738 if (q_vector->rx_ring) {
3739 q_vector->rx_ring->total_bytes = 0;
3740 q_vector->rx_ring->total_packets = 0;
3741 }
3742 if (q_vector->tx_ring) {
3743 q_vector->tx_ring->total_bytes = 0;
3744 q_vector->tx_ring->total_packets = 0;
3745 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003746}
3747
3748/**
3749 * igb_update_itr - update the dynamic ITR value based on statistics
3750 * Stores a new ITR value based on packets and byte
3751 * counts during the last interrupt. The advantage of per interrupt
3752 * computation is faster updates and more accurate ITR for the current
3753 * traffic pattern. Constants in this function were computed
3754 * based on theoretical maximum wire speed and thresholds were set based
3755 * on testing data as well as attempting to minimize response time
3756 * while increasing bulk throughput.
3757 * this functionality is controlled by the InterruptThrottleRate module
3758 * parameter (see igb_param.c)
3759 * NOTE: These calculations are only valid when operating in a single-
3760 * queue environment.
3761 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003762 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003763 * @packets: the number of packets during this measurement interval
3764 * @bytes: the number of bytes during this measurement interval
3765 **/
3766static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3767 int packets, int bytes)
3768{
3769 unsigned int retval = itr_setting;
3770
3771 if (packets == 0)
3772 goto update_itr_done;
3773
3774 switch (itr_setting) {
3775 case lowest_latency:
3776 /* handle TSO and jumbo frames */
3777 if (bytes/packets > 8000)
3778 retval = bulk_latency;
3779 else if ((packets < 5) && (bytes > 512))
3780 retval = low_latency;
3781 break;
3782 case low_latency: /* 50 usec aka 20000 ints/s */
3783 if (bytes > 10000) {
3784 /* this if handles the TSO accounting */
3785 if (bytes/packets > 8000) {
3786 retval = bulk_latency;
3787 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3788 retval = bulk_latency;
3789 } else if ((packets > 35)) {
3790 retval = lowest_latency;
3791 }
3792 } else if (bytes/packets > 2000) {
3793 retval = bulk_latency;
3794 } else if (packets <= 2 && bytes < 512) {
3795 retval = lowest_latency;
3796 }
3797 break;
3798 case bulk_latency: /* 250 usec aka 4000 ints/s */
3799 if (bytes > 25000) {
3800 if (packets > 35)
3801 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003802 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003803 retval = low_latency;
3804 }
3805 break;
3806 }
3807
3808update_itr_done:
3809 return retval;
3810}
3811
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003812static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003813{
Alexander Duyck047e0032009-10-27 15:49:27 +00003814 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003815 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003816 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003817
3818 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3819 if (adapter->link_speed != SPEED_1000) {
3820 current_itr = 0;
3821 new_itr = 4000;
3822 goto set_itr_now;
3823 }
3824
3825 adapter->rx_itr = igb_update_itr(adapter,
3826 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003827 q_vector->rx_ring->total_packets,
3828 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003829
Alexander Duyck047e0032009-10-27 15:49:27 +00003830 adapter->tx_itr = igb_update_itr(adapter,
3831 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003832 q_vector->tx_ring->total_packets,
3833 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003834 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003835
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003836 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003837 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003838 current_itr = low_latency;
3839
Auke Kok9d5c8242008-01-24 02:22:38 -08003840 switch (current_itr) {
3841 /* counts and packets in update_itr are dependent on these numbers */
3842 case lowest_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003843 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003844 break;
3845 case low_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003846 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003847 break;
3848 case bulk_latency:
Alexander Duyck78b1f602009-04-23 11:20:29 +00003849 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003850 break;
3851 default:
3852 break;
3853 }
3854
3855set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003856 q_vector->rx_ring->total_bytes = 0;
3857 q_vector->rx_ring->total_packets = 0;
3858 q_vector->tx_ring->total_bytes = 0;
3859 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003860
Alexander Duyck047e0032009-10-27 15:49:27 +00003861 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003862 /* this attempts to bias the interrupt rate towards Bulk
3863 * by adding intermediate steps when interrupt rate is
3864 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003865 new_itr = new_itr > q_vector->itr_val ?
3866 max((new_itr * q_vector->itr_val) /
3867 (new_itr + (q_vector->itr_val >> 2)),
3868 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003869 new_itr;
3870 /* Don't write the value here; it resets the adapter's
3871 * internal timer, and causes us to delay far longer than
3872 * we should between interrupts. Instead, we write the ITR
3873 * value at the beginning of the next interrupt so the timing
3874 * ends up being correct.
3875 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003876 q_vector->itr_val = new_itr;
3877 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003878 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003879}
3880
Auke Kok9d5c8242008-01-24 02:22:38 -08003881#define IGB_TX_FLAGS_CSUM 0x00000001
3882#define IGB_TX_FLAGS_VLAN 0x00000002
3883#define IGB_TX_FLAGS_TSO 0x00000004
3884#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003885#define IGB_TX_FLAGS_TSTAMP 0x00000010
3886#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3887#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003888
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003889static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003890 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3891{
3892 struct e1000_adv_tx_context_desc *context_desc;
3893 unsigned int i;
3894 int err;
3895 struct igb_buffer *buffer_info;
3896 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003897 u32 mss_l4len_idx;
3898 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003899
3900 if (skb_header_cloned(skb)) {
3901 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3902 if (err)
3903 return err;
3904 }
3905
3906 l4len = tcp_hdrlen(skb);
3907 *hdr_len += l4len;
3908
3909 if (skb->protocol == htons(ETH_P_IP)) {
3910 struct iphdr *iph = ip_hdr(skb);
3911 iph->tot_len = 0;
3912 iph->check = 0;
3913 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3914 iph->daddr, 0,
3915 IPPROTO_TCP,
3916 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003917 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003918 ipv6_hdr(skb)->payload_len = 0;
3919 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3920 &ipv6_hdr(skb)->daddr,
3921 0, IPPROTO_TCP, 0);
3922 }
3923
3924 i = tx_ring->next_to_use;
3925
3926 buffer_info = &tx_ring->buffer_info[i];
3927 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3928 /* VLAN MACLEN IPLEN */
3929 if (tx_flags & IGB_TX_FLAGS_VLAN)
3930 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3931 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3932 *hdr_len += skb_network_offset(skb);
3933 info |= skb_network_header_len(skb);
3934 *hdr_len += skb_network_header_len(skb);
3935 context_desc->vlan_macip_lens = cpu_to_le32(info);
3936
3937 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3938 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3939
3940 if (skb->protocol == htons(ETH_P_IP))
3941 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3942 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3943
3944 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3945
3946 /* MSS L4LEN IDX */
3947 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3948 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3949
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003950 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003951 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3952 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003953
3954 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3955 context_desc->seqnum_seed = 0;
3956
3957 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003958 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003959 buffer_info->dma = 0;
3960 i++;
3961 if (i == tx_ring->count)
3962 i = 0;
3963
3964 tx_ring->next_to_use = i;
3965
3966 return true;
3967}
3968
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003969static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3970 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003971{
3972 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck59d71982010-04-27 13:09:25 +00003973 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003974 struct igb_buffer *buffer_info;
3975 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003976 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003977
3978 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3979 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3980 i = tx_ring->next_to_use;
3981 buffer_info = &tx_ring->buffer_info[i];
3982 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3983
3984 if (tx_flags & IGB_TX_FLAGS_VLAN)
3985 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00003986
Auke Kok9d5c8242008-01-24 02:22:38 -08003987 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3988 if (skb->ip_summed == CHECKSUM_PARTIAL)
3989 info |= skb_network_header_len(skb);
3990
3991 context_desc->vlan_macip_lens = cpu_to_le32(info);
3992
3993 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3994
3995 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003996 __be16 protocol;
3997
3998 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3999 const struct vlan_ethhdr *vhdr =
4000 (const struct vlan_ethhdr*)skb->data;
4001
4002 protocol = vhdr->h_vlan_encapsulated_proto;
4003 } else {
4004 protocol = skb->protocol;
4005 }
4006
4007 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004008 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08004009 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004010 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4011 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004012 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4013 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004014 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004015 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08004016 /* XXX what about other V6 headers?? */
4017 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4018 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00004019 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08004021 break;
4022 default:
4023 if (unlikely(net_ratelimit()))
Alexander Duyck59d71982010-04-27 13:09:25 +00004024 dev_warn(dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08004025 "partial checksum but proto=%x!\n",
4026 skb->protocol);
4027 break;
4028 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004029 }
4030
4031 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4032 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004033 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004034 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004035 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08004036
4037 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004038 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004039 buffer_info->dma = 0;
4040
4041 i++;
4042 if (i == tx_ring->count)
4043 i = 0;
4044 tx_ring->next_to_use = i;
4045
4046 return true;
4047 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004048 return false;
4049}
4050
4051#define IGB_MAX_TXD_PWR 16
4052#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4053
Alexander Duyck80785292009-10-27 15:51:47 +00004054static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004055 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08004056{
4057 struct igb_buffer *buffer_info;
Alexander Duyck59d71982010-04-27 13:09:25 +00004058 struct device *dev = tx_ring->dev;
Nick Nunley28739572010-05-04 21:58:07 +00004059 unsigned int hlen = skb_headlen(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08004060 unsigned int count = 0, i;
4061 unsigned int f;
Nick Nunley28739572010-05-04 21:58:07 +00004062 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004063
4064 i = tx_ring->next_to_use;
4065
4066 buffer_info = &tx_ring->buffer_info[i];
Nick Nunley28739572010-05-04 21:58:07 +00004067 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4068 buffer_info->length = hlen;
Auke Kok9d5c8242008-01-24 02:22:38 -08004069 /* set time_stamp *before* dma to help avoid a possible race */
4070 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004071 buffer_info->next_to_watch = i;
Nick Nunley28739572010-05-04 21:58:07 +00004072 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
Alexander Duyck59d71982010-04-27 13:09:25 +00004073 DMA_TO_DEVICE);
4074 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004075 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004076
4077 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Nick Nunley28739572010-05-04 21:58:07 +00004078 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
4079 unsigned int len = frag->size;
Auke Kok9d5c8242008-01-24 02:22:38 -08004080
Alexander Duyck85811452010-01-23 01:35:00 -08004081 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004082 i++;
4083 if (i == tx_ring->count)
4084 i = 0;
4085
Auke Kok9d5c8242008-01-24 02:22:38 -08004086 buffer_info = &tx_ring->buffer_info[i];
4087 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4088 buffer_info->length = len;
4089 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004090 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004091 buffer_info->mapped_as_page = true;
Alexander Duyck59d71982010-04-27 13:09:25 +00004092 buffer_info->dma = dma_map_page(dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00004093 frag->page,
4094 frag->page_offset,
4095 len,
Alexander Duyck59d71982010-04-27 13:09:25 +00004096 DMA_TO_DEVICE);
4097 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004098 goto dma_error;
4099
Auke Kok9d5c8242008-01-24 02:22:38 -08004100 }
4101
Auke Kok9d5c8242008-01-24 02:22:38 -08004102 tx_ring->buffer_info[i].skb = skb;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004103 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
Nick Nunley28739572010-05-04 21:58:07 +00004104 /* multiply data chunks by size of headers */
4105 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
4106 tx_ring->buffer_info[i].gso_segs = gso_segs;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004107 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004108
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004109 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004110
4111dma_error:
Alexander Duyck59d71982010-04-27 13:09:25 +00004112 dev_err(dev, "TX DMA map failed\n");
Alexander Duyck6366ad32009-12-02 16:47:18 +00004113
4114 /* clear timestamp and dma mappings for failed buffer_info mapping */
4115 buffer_info->dma = 0;
4116 buffer_info->time_stamp = 0;
4117 buffer_info->length = 0;
4118 buffer_info->next_to_watch = 0;
4119 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004120
4121 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00004122 while (count--) {
4123 if (i == 0)
4124 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004125 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004126 buffer_info = &tx_ring->buffer_info[i];
4127 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4128 }
4129
4130 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004131}
4132
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004133static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Nick Nunley91d4ee32010-02-17 01:04:56 +00004134 u32 tx_flags, int count, u32 paylen,
Auke Kok9d5c8242008-01-24 02:22:38 -08004135 u8 hdr_len)
4136{
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004137 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004138 struct igb_buffer *buffer_info;
4139 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004140 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08004141
4142 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
4143 E1000_ADVTXD_DCMD_DEXT);
4144
4145 if (tx_flags & IGB_TX_FLAGS_VLAN)
4146 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
4147
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004148 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4149 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
4150
Auke Kok9d5c8242008-01-24 02:22:38 -08004151 if (tx_flags & IGB_TX_FLAGS_TSO) {
4152 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
4153
4154 /* insert tcp checksum */
4155 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4156
4157 /* insert ip checksum */
4158 if (tx_flags & IGB_TX_FLAGS_IPV4)
4159 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4160
4161 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
4162 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4163 }
4164
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004165 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4166 (tx_flags & (IGB_TX_FLAGS_CSUM |
4167 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004168 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004169 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004170
4171 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4172
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004173 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08004174 buffer_info = &tx_ring->buffer_info[i];
4175 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4176 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4177 tx_desc->read.cmd_type_len =
4178 cpu_to_le32(cmd_type_len | buffer_info->length);
4179 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004180 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08004181 i++;
4182 if (i == tx_ring->count)
4183 i = 0;
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004184 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004185
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004186 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004187 /* Force memory writes to complete before letting h/w
4188 * know there are new descriptors to fetch. (Only
4189 * applicable for weak-ordered memory model archs,
4190 * such as IA-64). */
4191 wmb();
4192
4193 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00004194 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08004195 /* we need this if more than one processor can write to our tail
4196 * at a time, it syncronizes IO on IA64/Altix systems */
4197 mmiowb();
4198}
4199
Alexander Duycke694e962009-10-27 15:53:06 +00004200static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004201{
Alexander Duycke694e962009-10-27 15:53:06 +00004202 struct net_device *netdev = tx_ring->netdev;
4203
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004204 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004205
Auke Kok9d5c8242008-01-24 02:22:38 -08004206 /* Herbert's original patch had:
4207 * smp_mb__after_netif_stop_queue();
4208 * but since that doesn't exist yet, just open code it. */
4209 smp_mb();
4210
4211 /* We need to check again in a case another CPU has just
4212 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004213 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004214 return -EBUSY;
4215
4216 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004217 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004218
4219 u64_stats_update_begin(&tx_ring->tx_syncp2);
4220 tx_ring->tx_stats.restart_queue2++;
4221 u64_stats_update_end(&tx_ring->tx_syncp2);
4222
Auke Kok9d5c8242008-01-24 02:22:38 -08004223 return 0;
4224}
4225
Nick Nunley717ba082010-02-17 01:04:18 +00004226static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004227{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004228 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004229 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004230 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004231}
4232
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004233netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4234 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004235{
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004236 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004237 u32 tx_flags = 0;
4238 u16 first;
4239 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004240
Auke Kok9d5c8242008-01-24 02:22:38 -08004241 /* need: 1 descriptor per page,
4242 * + 2 desc gap to keep tail from touching head,
4243 * + 1 desc for skb->data,
4244 * + 1 desc for context descriptor,
4245 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004246 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004247 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004248 return NETDEV_TX_BUSY;
4249 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004250
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004251 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4252 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004253 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004254 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004255
Jesse Grosseab6d182010-10-20 13:56:03 +00004256 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004257 tx_flags |= IGB_TX_FLAGS_VLAN;
4258 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4259 }
4260
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004261 if (skb->protocol == htons(ETH_P_IP))
4262 tx_flags |= IGB_TX_FLAGS_IPV4;
4263
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004264 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004265 if (skb_is_gso(skb)) {
4266 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004267
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004268 if (tso < 0) {
4269 dev_kfree_skb_any(skb);
4270 return NETDEV_TX_OK;
4271 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004272 }
4273
4274 if (tso)
4275 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004276 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00004277 (skb->ip_summed == CHECKSUM_PARTIAL))
4278 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004279
Alexander Duyck65689fe2009-03-20 00:17:43 +00004280 /*
Alexander Duyckcdfd01f2009-10-27 23:50:57 +00004281 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00004282 * has occured and we need to rewind the descriptor queue
4283 */
Alexander Duyck80785292009-10-27 15:51:47 +00004284 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004285 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00004286 dev_kfree_skb_any(skb);
4287 tx_ring->buffer_info[first].time_stamp = 0;
4288 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004289 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004290 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004291
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004292 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
4293
4294 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004295 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004296
Auke Kok9d5c8242008-01-24 02:22:38 -08004297 return NETDEV_TX_OK;
4298}
4299
Stephen Hemminger3b29a562009-08-31 19:50:55 +00004300static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
4301 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004302{
4303 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004304 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004305 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004306
4307 if (test_bit(__IGB_DOWN, &adapter->state)) {
4308 dev_kfree_skb_any(skb);
4309 return NETDEV_TX_OK;
4310 }
4311
4312 if (skb->len <= 0) {
4313 dev_kfree_skb_any(skb);
4314 return NETDEV_TX_OK;
4315 }
4316
Alexander Duyck1bfaf072009-02-19 20:39:23 -08004317 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004318 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08004319
4320 /* This goes back to the question of how to logically map a tx queue
4321 * to a flow. Right now, performance is impacted slightly negatively
4322 * if using multiple tx queues. If the stack breaks away from a
4323 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00004324 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004325}
4326
4327/**
4328 * igb_tx_timeout - Respond to a Tx Hang
4329 * @netdev: network interface device structure
4330 **/
4331static void igb_tx_timeout(struct net_device *netdev)
4332{
4333 struct igb_adapter *adapter = netdev_priv(netdev);
4334 struct e1000_hw *hw = &adapter->hw;
4335
4336 /* Do the reset outside of interrupt context */
4337 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004338
Alexander Duyck55cac242009-11-19 12:42:21 +00004339 if (hw->mac.type == e1000_82580)
4340 hw->dev_spec._82575.global_device_reset = true;
4341
Auke Kok9d5c8242008-01-24 02:22:38 -08004342 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004343 wr32(E1000_EICS,
4344 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004345}
4346
4347static void igb_reset_task(struct work_struct *work)
4348{
4349 struct igb_adapter *adapter;
4350 adapter = container_of(work, struct igb_adapter, reset_task);
4351
Taku Izumic97ec422010-04-27 14:39:30 +00004352 igb_dump(adapter);
4353 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004354 igb_reinit_locked(adapter);
4355}
4356
4357/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004358 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004359 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004360 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004361 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004362 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004363static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4364 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004365{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004366 struct igb_adapter *adapter = netdev_priv(netdev);
4367
4368 spin_lock(&adapter->stats64_lock);
4369 igb_update_stats(adapter, &adapter->stats64);
4370 memcpy(stats, &adapter->stats64, sizeof(*stats));
4371 spin_unlock(&adapter->stats64_lock);
4372
4373 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004374}
4375
4376/**
4377 * igb_change_mtu - Change the Maximum Transfer Unit
4378 * @netdev: network interface device structure
4379 * @new_mtu: new value for maximum frame size
4380 *
4381 * Returns 0 on success, negative on failure
4382 **/
4383static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4384{
4385 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004386 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004387 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00004388 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004389
Alexander Duyckc809d222009-10-27 23:52:13 +00004390 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004391 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004392 return -EINVAL;
4393 }
4394
Auke Kok9d5c8242008-01-24 02:22:38 -08004395 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004396 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004397 return -EINVAL;
4398 }
4399
4400 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4401 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004402
Auke Kok9d5c8242008-01-24 02:22:38 -08004403 /* igb_down has a dependency on max_frame_size */
4404 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004405
Auke Kok9d5c8242008-01-24 02:22:38 -08004406 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4407 * means we reserve 2 more, this pushes us to allocate from the next
4408 * larger slab size.
4409 * i.e. RXBUFFER_2048 --> size-4096 slab
4410 */
4411
Nick Nunley757b77e2010-03-26 11:36:47 +00004412 if (adapter->hw.mac.type == e1000_82580)
4413 max_frame += IGB_TS_HDR_LEN;
4414
Alexander Duyck7d95b712009-10-27 15:50:08 +00004415 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00004416 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004417 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00004418 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004419 else
Alexander Duyck4c844852009-10-27 15:52:07 +00004420 rx_buffer_len = IGB_RXBUFFER_128;
4421
Nick Nunley757b77e2010-03-26 11:36:47 +00004422 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4423 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4424 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4425
4426 if ((adapter->hw.mac.type == e1000_82580) &&
4427 (rx_buffer_len == IGB_RXBUFFER_128))
4428 rx_buffer_len += IGB_RXBUFFER_64;
4429
Alexander Duyck4c844852009-10-27 15:52:07 +00004430 if (netif_running(netdev))
4431 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004432
Alexander Duyck090b1792009-10-27 23:51:55 +00004433 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004434 netdev->mtu, new_mtu);
4435 netdev->mtu = new_mtu;
4436
Alexander Duyck4c844852009-10-27 15:52:07 +00004437 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00004438 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00004439
Auke Kok9d5c8242008-01-24 02:22:38 -08004440 if (netif_running(netdev))
4441 igb_up(adapter);
4442 else
4443 igb_reset(adapter);
4444
4445 clear_bit(__IGB_RESETTING, &adapter->state);
4446
4447 return 0;
4448}
4449
4450/**
4451 * igb_update_stats - Update the board statistics counters
4452 * @adapter: board private structure
4453 **/
4454
Eric Dumazet12dcd862010-10-15 17:27:10 +00004455void igb_update_stats(struct igb_adapter *adapter,
4456 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004457{
4458 struct e1000_hw *hw = &adapter->hw;
4459 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004460 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004461 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004462 int i;
4463 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004464 unsigned int start;
4465 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004466
4467#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4468
4469 /*
4470 * Prevent stats update while adapter is being reset, or if the pci
4471 * connection is down.
4472 */
4473 if (adapter->link_speed == 0)
4474 return;
4475 if (pci_channel_offline(pdev))
4476 return;
4477
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004478 bytes = 0;
4479 packets = 0;
4480 for (i = 0; i < adapter->num_rx_queues; i++) {
4481 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004482 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004483
Alexander Duyck3025a442010-02-17 01:02:39 +00004484 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004485 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004486
4487 do {
4488 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4489 _bytes = ring->rx_stats.bytes;
4490 _packets = ring->rx_stats.packets;
4491 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4492 bytes += _bytes;
4493 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004494 }
4495
Alexander Duyck128e45e2009-11-12 18:37:38 +00004496 net_stats->rx_bytes = bytes;
4497 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004498
4499 bytes = 0;
4500 packets = 0;
4501 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004502 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004503 do {
4504 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4505 _bytes = ring->tx_stats.bytes;
4506 _packets = ring->tx_stats.packets;
4507 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4508 bytes += _bytes;
4509 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004510 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004511 net_stats->tx_bytes = bytes;
4512 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004513
4514 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004515 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4516 adapter->stats.gprc += rd32(E1000_GPRC);
4517 adapter->stats.gorc += rd32(E1000_GORCL);
4518 rd32(E1000_GORCH); /* clear GORCL */
4519 adapter->stats.bprc += rd32(E1000_BPRC);
4520 adapter->stats.mprc += rd32(E1000_MPRC);
4521 adapter->stats.roc += rd32(E1000_ROC);
4522
4523 adapter->stats.prc64 += rd32(E1000_PRC64);
4524 adapter->stats.prc127 += rd32(E1000_PRC127);
4525 adapter->stats.prc255 += rd32(E1000_PRC255);
4526 adapter->stats.prc511 += rd32(E1000_PRC511);
4527 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4528 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4529 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4530 adapter->stats.sec += rd32(E1000_SEC);
4531
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004532 mpc = rd32(E1000_MPC);
4533 adapter->stats.mpc += mpc;
4534 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004535 adapter->stats.scc += rd32(E1000_SCC);
4536 adapter->stats.ecol += rd32(E1000_ECOL);
4537 adapter->stats.mcc += rd32(E1000_MCC);
4538 adapter->stats.latecol += rd32(E1000_LATECOL);
4539 adapter->stats.dc += rd32(E1000_DC);
4540 adapter->stats.rlec += rd32(E1000_RLEC);
4541 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4542 adapter->stats.xontxc += rd32(E1000_XONTXC);
4543 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4544 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4545 adapter->stats.fcruc += rd32(E1000_FCRUC);
4546 adapter->stats.gptc += rd32(E1000_GPTC);
4547 adapter->stats.gotc += rd32(E1000_GOTCL);
4548 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004549 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004550 adapter->stats.ruc += rd32(E1000_RUC);
4551 adapter->stats.rfc += rd32(E1000_RFC);
4552 adapter->stats.rjc += rd32(E1000_RJC);
4553 adapter->stats.tor += rd32(E1000_TORH);
4554 adapter->stats.tot += rd32(E1000_TOTH);
4555 adapter->stats.tpr += rd32(E1000_TPR);
4556
4557 adapter->stats.ptc64 += rd32(E1000_PTC64);
4558 adapter->stats.ptc127 += rd32(E1000_PTC127);
4559 adapter->stats.ptc255 += rd32(E1000_PTC255);
4560 adapter->stats.ptc511 += rd32(E1000_PTC511);
4561 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4562 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4563
4564 adapter->stats.mptc += rd32(E1000_MPTC);
4565 adapter->stats.bptc += rd32(E1000_BPTC);
4566
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004567 adapter->stats.tpt += rd32(E1000_TPT);
4568 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004569
4570 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004571 /* read internal phy specific stats */
4572 reg = rd32(E1000_CTRL_EXT);
4573 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4574 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4575 adapter->stats.tncrs += rd32(E1000_TNCRS);
4576 }
4577
Auke Kok9d5c8242008-01-24 02:22:38 -08004578 adapter->stats.tsctc += rd32(E1000_TSCTC);
4579 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4580
4581 adapter->stats.iac += rd32(E1000_IAC);
4582 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4583 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4584 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4585 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4586 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4587 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4588 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4589 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4590
4591 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004592 net_stats->multicast = adapter->stats.mprc;
4593 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004594
4595 /* Rx Errors */
4596
4597 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004598 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004599 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004600 adapter->stats.crcerrs + adapter->stats.algnerrc +
4601 adapter->stats.ruc + adapter->stats.roc +
4602 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004603 net_stats->rx_length_errors = adapter->stats.ruc +
4604 adapter->stats.roc;
4605 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4606 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4607 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004608
4609 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004610 net_stats->tx_errors = adapter->stats.ecol +
4611 adapter->stats.latecol;
4612 net_stats->tx_aborted_errors = adapter->stats.ecol;
4613 net_stats->tx_window_errors = adapter->stats.latecol;
4614 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004615
4616 /* Tx Dropped needs to be maintained elsewhere */
4617
4618 /* Phy Stats */
4619 if (hw->phy.media_type == e1000_media_type_copper) {
4620 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004621 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004622 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4623 adapter->phy_stats.idle_errors += phy_tmp;
4624 }
4625 }
4626
4627 /* Management Stats */
4628 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4629 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4630 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004631
4632 /* OS2BMC Stats */
4633 reg = rd32(E1000_MANC);
4634 if (reg & E1000_MANC_EN_BMC2OS) {
4635 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4636 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4637 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4638 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4639 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004640}
4641
Auke Kok9d5c8242008-01-24 02:22:38 -08004642static irqreturn_t igb_msix_other(int irq, void *data)
4643{
Alexander Duyck047e0032009-10-27 15:49:27 +00004644 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004645 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004646 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004647 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004648
Alexander Duyck7f081d42010-01-07 17:41:00 +00004649 if (icr & E1000_ICR_DRSTA)
4650 schedule_work(&adapter->reset_task);
4651
Alexander Duyck047e0032009-10-27 15:49:27 +00004652 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004653 /* HW is reporting DMA is out of sync */
4654 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004655 /* The DMA Out of Sync is also indication of a spoof event
4656 * in IOV mode. Check the Wrong VM Behavior register to
4657 * see if it is really a spoof event. */
4658 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004659 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004660
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004661 /* Check for a mailbox event */
4662 if (icr & E1000_ICR_VMMB)
4663 igb_msg_task(adapter);
4664
4665 if (icr & E1000_ICR_LSC) {
4666 hw->mac.get_link_status = 1;
4667 /* guard against interrupt when we're going down */
4668 if (!test_bit(__IGB_DOWN, &adapter->state))
4669 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4670 }
4671
Alexander Duyck25568a52009-10-27 23:49:59 +00004672 if (adapter->vfs_allocated_count)
4673 wr32(E1000_IMS, E1000_IMS_LSC |
4674 E1000_IMS_VMMB |
4675 E1000_IMS_DOUTSYNC);
4676 else
4677 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004678 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004679
4680 return IRQ_HANDLED;
4681}
4682
Alexander Duyck047e0032009-10-27 15:49:27 +00004683static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004684{
Alexander Duyck26b39272010-02-17 01:00:41 +00004685 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004686 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004687
Alexander Duyck047e0032009-10-27 15:49:27 +00004688 if (!q_vector->set_itr)
4689 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004690
Alexander Duyck047e0032009-10-27 15:49:27 +00004691 if (!itr_val)
4692 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004693
Alexander Duyck26b39272010-02-17 01:00:41 +00004694 if (adapter->hw.mac.type == e1000_82575)
4695 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004696 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004697 itr_val |= 0x8000000;
4698
4699 writel(itr_val, q_vector->itr_register);
4700 q_vector->set_itr = 0;
4701}
4702
4703static irqreturn_t igb_msix_ring(int irq, void *data)
4704{
4705 struct igb_q_vector *q_vector = data;
4706
4707 /* Write the ITR value calculated from the previous interrupt. */
4708 igb_write_itr(q_vector);
4709
4710 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004711
Auke Kok9d5c8242008-01-24 02:22:38 -08004712 return IRQ_HANDLED;
4713}
4714
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004715#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004716static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004717{
Alexander Duyck047e0032009-10-27 15:49:27 +00004718 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004719 struct e1000_hw *hw = &adapter->hw;
4720 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004721
Alexander Duyck047e0032009-10-27 15:49:27 +00004722 if (q_vector->cpu == cpu)
4723 goto out_no_update;
4724
4725 if (q_vector->tx_ring) {
4726 int q = q_vector->tx_ring->reg_idx;
4727 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4728 if (hw->mac.type == e1000_82575) {
4729 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4730 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4731 } else {
4732 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4733 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4734 E1000_DCA_TXCTRL_CPUID_SHIFT;
4735 }
4736 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4737 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4738 }
4739 if (q_vector->rx_ring) {
4740 int q = q_vector->rx_ring->reg_idx;
4741 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4742 if (hw->mac.type == e1000_82575) {
4743 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4744 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4745 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004746 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004747 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004748 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004749 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004750 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4751 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4752 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4753 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004754 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004755 q_vector->cpu = cpu;
4756out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004757 put_cpu();
4758}
4759
4760static void igb_setup_dca(struct igb_adapter *adapter)
4761{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004762 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004763 int i;
4764
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004765 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004766 return;
4767
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004768 /* Always use CB2 mode, difference is masked in the CB driver. */
4769 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4770
Alexander Duyck047e0032009-10-27 15:49:27 +00004771 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004772 adapter->q_vector[i]->cpu = -1;
4773 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004774 }
4775}
4776
4777static int __igb_notify_dca(struct device *dev, void *data)
4778{
4779 struct net_device *netdev = dev_get_drvdata(dev);
4780 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004781 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004782 struct e1000_hw *hw = &adapter->hw;
4783 unsigned long event = *(unsigned long *)data;
4784
4785 switch (event) {
4786 case DCA_PROVIDER_ADD:
4787 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004788 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004789 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004790 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004791 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004792 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004793 igb_setup_dca(adapter);
4794 break;
4795 }
4796 /* Fall Through since DCA is disabled. */
4797 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004798 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004799 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004800 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004801 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004802 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004803 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004804 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004805 }
4806 break;
4807 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004808
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004809 return 0;
4810}
4811
4812static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4813 void *p)
4814{
4815 int ret_val;
4816
4817 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4818 __igb_notify_dca);
4819
4820 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4821}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004822#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004823
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004824static void igb_ping_all_vfs(struct igb_adapter *adapter)
4825{
4826 struct e1000_hw *hw = &adapter->hw;
4827 u32 ping;
4828 int i;
4829
4830 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4831 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004832 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004833 ping |= E1000_VT_MSGTYPE_CTS;
4834 igb_write_mbx(hw, &ping, 1, i);
4835 }
4836}
4837
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004838static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4839{
4840 struct e1000_hw *hw = &adapter->hw;
4841 u32 vmolr = rd32(E1000_VMOLR(vf));
4842 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4843
Alexander Duyckd85b90042010-09-22 17:56:20 +00004844 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004845 IGB_VF_FLAG_MULTI_PROMISC);
4846 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4847
4848 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4849 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004850 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004851 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4852 } else {
4853 /*
4854 * if we have hashes and we are clearing a multicast promisc
4855 * flag we need to write the hashes to the MTA as this step
4856 * was previously skipped
4857 */
4858 if (vf_data->num_vf_mc_hashes > 30) {
4859 vmolr |= E1000_VMOLR_MPME;
4860 } else if (vf_data->num_vf_mc_hashes) {
4861 int j;
4862 vmolr |= E1000_VMOLR_ROMPE;
4863 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4864 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4865 }
4866 }
4867
4868 wr32(E1000_VMOLR(vf), vmolr);
4869
4870 /* there are flags left unprocessed, likely not supported */
4871 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4872 return -EINVAL;
4873
4874 return 0;
4875
4876}
4877
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004878static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4879 u32 *msgbuf, u32 vf)
4880{
4881 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4882 u16 *hash_list = (u16 *)&msgbuf[1];
4883 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4884 int i;
4885
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004886 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004887 * to this VF for later use to restore when the PF multi cast
4888 * list changes
4889 */
4890 vf_data->num_vf_mc_hashes = n;
4891
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004892 /* only up to 30 hash values supported */
4893 if (n > 30)
4894 n = 30;
4895
4896 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004897 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004898 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004899
4900 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004901 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004902
4903 return 0;
4904}
4905
4906static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4907{
4908 struct e1000_hw *hw = &adapter->hw;
4909 struct vf_data_storage *vf_data;
4910 int i, j;
4911
4912 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004913 u32 vmolr = rd32(E1000_VMOLR(i));
4914 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4915
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004916 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004917
4918 if ((vf_data->num_vf_mc_hashes > 30) ||
4919 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4920 vmolr |= E1000_VMOLR_MPME;
4921 } else if (vf_data->num_vf_mc_hashes) {
4922 vmolr |= E1000_VMOLR_ROMPE;
4923 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4924 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4925 }
4926 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004927 }
4928}
4929
4930static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4931{
4932 struct e1000_hw *hw = &adapter->hw;
4933 u32 pool_mask, reg, vid;
4934 int i;
4935
4936 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4937
4938 /* Find the vlan filter for this id */
4939 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4940 reg = rd32(E1000_VLVF(i));
4941
4942 /* remove the vf from the pool */
4943 reg &= ~pool_mask;
4944
4945 /* if pool is empty then remove entry from vfta */
4946 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4947 (reg & E1000_VLVF_VLANID_ENABLE)) {
4948 reg = 0;
4949 vid = reg & E1000_VLVF_VLANID_MASK;
4950 igb_vfta_set(hw, vid, false);
4951 }
4952
4953 wr32(E1000_VLVF(i), reg);
4954 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004955
4956 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004957}
4958
4959static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4960{
4961 struct e1000_hw *hw = &adapter->hw;
4962 u32 reg, i;
4963
Alexander Duyck51466232009-10-27 23:47:35 +00004964 /* The vlvf table only exists on 82576 hardware and newer */
4965 if (hw->mac.type < e1000_82576)
4966 return -1;
4967
4968 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004969 if (!adapter->vfs_allocated_count)
4970 return -1;
4971
4972 /* Find the vlan filter for this id */
4973 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4974 reg = rd32(E1000_VLVF(i));
4975 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4976 vid == (reg & E1000_VLVF_VLANID_MASK))
4977 break;
4978 }
4979
4980 if (add) {
4981 if (i == E1000_VLVF_ARRAY_SIZE) {
4982 /* Did not find a matching VLAN ID entry that was
4983 * enabled. Search for a free filter entry, i.e.
4984 * one without the enable bit set
4985 */
4986 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4987 reg = rd32(E1000_VLVF(i));
4988 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4989 break;
4990 }
4991 }
4992 if (i < E1000_VLVF_ARRAY_SIZE) {
4993 /* Found an enabled/available entry */
4994 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4995
4996 /* if !enabled we need to set this up in vfta */
4997 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004998 /* add VID to filter table */
4999 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005000 reg |= E1000_VLVF_VLANID_ENABLE;
5001 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00005002 reg &= ~E1000_VLVF_VLANID_MASK;
5003 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005004 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005005
5006 /* do not modify RLPML for PF devices */
5007 if (vf >= adapter->vfs_allocated_count)
5008 return 0;
5009
5010 if (!adapter->vf_data[vf].vlans_enabled) {
5011 u32 size;
5012 reg = rd32(E1000_VMOLR(vf));
5013 size = reg & E1000_VMOLR_RLPML_MASK;
5014 size += 4;
5015 reg &= ~E1000_VMOLR_RLPML_MASK;
5016 reg |= size;
5017 wr32(E1000_VMOLR(vf), reg);
5018 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00005019
Alexander Duyck51466232009-10-27 23:47:35 +00005020 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005021 return 0;
5022 }
5023 } else {
5024 if (i < E1000_VLVF_ARRAY_SIZE) {
5025 /* remove vf from the pool */
5026 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5027 /* if pool is empty then remove entry from vfta */
5028 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5029 reg = 0;
5030 igb_vfta_set(hw, vid, false);
5031 }
5032 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00005033
5034 /* do not modify RLPML for PF devices */
5035 if (vf >= adapter->vfs_allocated_count)
5036 return 0;
5037
5038 adapter->vf_data[vf].vlans_enabled--;
5039 if (!adapter->vf_data[vf].vlans_enabled) {
5040 u32 size;
5041 reg = rd32(E1000_VMOLR(vf));
5042 size = reg & E1000_VMOLR_RLPML_MASK;
5043 size -= 4;
5044 reg &= ~E1000_VMOLR_RLPML_MASK;
5045 reg |= size;
5046 wr32(E1000_VMOLR(vf), reg);
5047 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005048 }
5049 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00005050 return 0;
5051}
5052
5053static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5054{
5055 struct e1000_hw *hw = &adapter->hw;
5056
5057 if (vid)
5058 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5059 else
5060 wr32(E1000_VMVIR(vf), 0);
5061}
5062
5063static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5064 int vf, u16 vlan, u8 qos)
5065{
5066 int err = 0;
5067 struct igb_adapter *adapter = netdev_priv(netdev);
5068
5069 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5070 return -EINVAL;
5071 if (vlan || qos) {
5072 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5073 if (err)
5074 goto out;
5075 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5076 igb_set_vmolr(adapter, vf, !vlan);
5077 adapter->vf_data[vf].pf_vlan = vlan;
5078 adapter->vf_data[vf].pf_qos = qos;
5079 dev_info(&adapter->pdev->dev,
5080 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5081 if (test_bit(__IGB_DOWN, &adapter->state)) {
5082 dev_warn(&adapter->pdev->dev,
5083 "The VF VLAN has been set,"
5084 " but the PF device is not up.\n");
5085 dev_warn(&adapter->pdev->dev,
5086 "Bring the PF device up before"
5087 " attempting to use the VF device.\n");
5088 }
5089 } else {
5090 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5091 false, vf);
5092 igb_set_vmvir(adapter, vlan, vf);
5093 igb_set_vmolr(adapter, vf, true);
5094 adapter->vf_data[vf].pf_vlan = 0;
5095 adapter->vf_data[vf].pf_qos = 0;
5096 }
5097out:
5098 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005099}
5100
5101static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5102{
5103 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5104 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5105
5106 return igb_vlvf_set(adapter, vid, add, vf);
5107}
5108
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005109static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005110{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005111 /* clear flags - except flag that indicates PF has set the MAC */
5112 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005113 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005114
5115 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005116 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005117
5118 /* reset vlans for device */
5119 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005120 if (adapter->vf_data[vf].pf_vlan)
5121 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5122 adapter->vf_data[vf].pf_vlan,
5123 adapter->vf_data[vf].pf_qos);
5124 else
5125 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005126
5127 /* reset multicast table array for vf */
5128 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5129
5130 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005131 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005132}
5133
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005134static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5135{
5136 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5137
5138 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005139 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5140 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005141
5142 /* process remaining reset events */
5143 igb_vf_reset(adapter, vf);
5144}
5145
5146static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005147{
5148 struct e1000_hw *hw = &adapter->hw;
5149 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005150 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005151 u32 reg, msgbuf[3];
5152 u8 *addr = (u8 *)(&msgbuf[1]);
5153
5154 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005155 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005156
5157 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005158 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005159
5160 /* enable transmit and receive for vf */
5161 reg = rd32(E1000_VFTE);
5162 wr32(E1000_VFTE, reg | (1 << vf));
5163 reg = rd32(E1000_VFRE);
5164 wr32(E1000_VFRE, reg | (1 << vf));
5165
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005166 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005167
5168 /* reply to reset with ack and vf mac address */
5169 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5170 memcpy(addr, vf_mac, 6);
5171 igb_write_mbx(hw, msgbuf, 3, vf);
5172}
5173
5174static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5175{
Greg Rosede42edd2010-07-01 13:39:23 +00005176 /*
5177 * The VF MAC Address is stored in a packed array of bytes
5178 * starting at the second 32 bit word of the msg array
5179 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005180 unsigned char *addr = (char *)&msg[1];
5181 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005182
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005183 if (is_valid_ether_addr(addr))
5184 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005185
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005186 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005187}
5188
5189static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5190{
5191 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005192 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005193 u32 msg = E1000_VT_MSGTYPE_NACK;
5194
5195 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005196 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5197 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005198 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005199 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005200 }
5201}
5202
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005203static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005204{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005205 struct pci_dev *pdev = adapter->pdev;
5206 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005207 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005208 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005209 s32 retval;
5210
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005211 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005212
Alexander Duyckfef45f42009-12-11 22:57:34 -08005213 if (retval) {
5214 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005215 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005216 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5217 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5218 return;
5219 goto out;
5220 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005221
5222 /* this is a message we already processed, do nothing */
5223 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005224 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005225
5226 /*
5227 * until the vf completes a reset it should not be
5228 * allowed to start any configuration.
5229 */
5230
5231 if (msgbuf[0] == E1000_VF_RESET) {
5232 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005233 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005234 }
5235
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005236 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005237 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5238 return;
5239 retval = -1;
5240 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005241 }
5242
5243 switch ((msgbuf[0] & 0xFFFF)) {
5244 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005245 retval = -EINVAL;
5246 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5247 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5248 else
5249 dev_warn(&pdev->dev,
5250 "VF %d attempted to override administratively "
5251 "set MAC address\nReload the VF driver to "
5252 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005253 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005254 case E1000_VF_SET_PROMISC:
5255 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5256 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005257 case E1000_VF_SET_MULTICAST:
5258 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5259 break;
5260 case E1000_VF_SET_LPE:
5261 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5262 break;
5263 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005264 retval = -1;
5265 if (vf_data->pf_vlan)
5266 dev_warn(&pdev->dev,
5267 "VF %d attempted to override administratively "
5268 "set VLAN tag\nReload the VF driver to "
5269 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005270 else
5271 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005272 break;
5273 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005274 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005275 retval = -1;
5276 break;
5277 }
5278
Alexander Duyckfef45f42009-12-11 22:57:34 -08005279 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5280out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005281 /* notify the VF of the results of what it sent us */
5282 if (retval)
5283 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5284 else
5285 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5286
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005287 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005288}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005289
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005290static void igb_msg_task(struct igb_adapter *adapter)
5291{
5292 struct e1000_hw *hw = &adapter->hw;
5293 u32 vf;
5294
5295 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5296 /* process any reset requests */
5297 if (!igb_check_for_rst(hw, vf))
5298 igb_vf_reset_event(adapter, vf);
5299
5300 /* process any messages pending */
5301 if (!igb_check_for_msg(hw, vf))
5302 igb_rcv_msg_from_vf(adapter, vf);
5303
5304 /* process any acks */
5305 if (!igb_check_for_ack(hw, vf))
5306 igb_rcv_ack_from_vf(adapter, vf);
5307 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005308}
5309
Auke Kok9d5c8242008-01-24 02:22:38 -08005310/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005311 * igb_set_uta - Set unicast filter table address
5312 * @adapter: board private structure
5313 *
5314 * The unicast table address is a register array of 32-bit registers.
5315 * The table is meant to be used in a way similar to how the MTA is used
5316 * however due to certain limitations in the hardware it is necessary to
5317 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
5318 * enable bit to allow vlan tag stripping when promiscous mode is enabled
5319 **/
5320static void igb_set_uta(struct igb_adapter *adapter)
5321{
5322 struct e1000_hw *hw = &adapter->hw;
5323 int i;
5324
5325 /* The UTA table only exists on 82576 hardware and newer */
5326 if (hw->mac.type < e1000_82576)
5327 return;
5328
5329 /* we only need to do this if VMDq is enabled */
5330 if (!adapter->vfs_allocated_count)
5331 return;
5332
5333 for (i = 0; i < hw->mac.uta_reg_count; i++)
5334 array_wr32(E1000_UTA, i, ~0);
5335}
5336
5337/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005338 * igb_intr_msi - Interrupt Handler
5339 * @irq: interrupt number
5340 * @data: pointer to a network interface device structure
5341 **/
5342static irqreturn_t igb_intr_msi(int irq, void *data)
5343{
Alexander Duyck047e0032009-10-27 15:49:27 +00005344 struct igb_adapter *adapter = data;
5345 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005346 struct e1000_hw *hw = &adapter->hw;
5347 /* read ICR disables interrupts using IAM */
5348 u32 icr = rd32(E1000_ICR);
5349
Alexander Duyck047e0032009-10-27 15:49:27 +00005350 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005351
Alexander Duyck7f081d42010-01-07 17:41:00 +00005352 if (icr & E1000_ICR_DRSTA)
5353 schedule_work(&adapter->reset_task);
5354
Alexander Duyck047e0032009-10-27 15:49:27 +00005355 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005356 /* HW is reporting DMA is out of sync */
5357 adapter->stats.doosync++;
5358 }
5359
Auke Kok9d5c8242008-01-24 02:22:38 -08005360 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5361 hw->mac.get_link_status = 1;
5362 if (!test_bit(__IGB_DOWN, &adapter->state))
5363 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5364 }
5365
Alexander Duyck047e0032009-10-27 15:49:27 +00005366 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005367
5368 return IRQ_HANDLED;
5369}
5370
5371/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005372 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005373 * @irq: interrupt number
5374 * @data: pointer to a network interface device structure
5375 **/
5376static irqreturn_t igb_intr(int irq, void *data)
5377{
Alexander Duyck047e0032009-10-27 15:49:27 +00005378 struct igb_adapter *adapter = data;
5379 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005380 struct e1000_hw *hw = &adapter->hw;
5381 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5382 * need for the IMC write */
5383 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005384 if (!icr)
5385 return IRQ_NONE; /* Not our interrupt */
5386
Alexander Duyck047e0032009-10-27 15:49:27 +00005387 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005388
5389 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5390 * not set, then the adapter didn't send an interrupt */
5391 if (!(icr & E1000_ICR_INT_ASSERTED))
5392 return IRQ_NONE;
5393
Alexander Duyck7f081d42010-01-07 17:41:00 +00005394 if (icr & E1000_ICR_DRSTA)
5395 schedule_work(&adapter->reset_task);
5396
Alexander Duyck047e0032009-10-27 15:49:27 +00005397 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005398 /* HW is reporting DMA is out of sync */
5399 adapter->stats.doosync++;
5400 }
5401
Auke Kok9d5c8242008-01-24 02:22:38 -08005402 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5403 hw->mac.get_link_status = 1;
5404 /* guard against interrupt when we're going down */
5405 if (!test_bit(__IGB_DOWN, &adapter->state))
5406 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5407 }
5408
Alexander Duyck047e0032009-10-27 15:49:27 +00005409 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005410
5411 return IRQ_HANDLED;
5412}
5413
Alexander Duyck047e0032009-10-27 15:49:27 +00005414static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005415{
Alexander Duyck047e0032009-10-27 15:49:27 +00005416 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005417 struct e1000_hw *hw = &adapter->hw;
5418
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00005419 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5420 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005421 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08005422 igb_set_itr(adapter);
5423 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005424 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005425 }
5426
5427 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5428 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005429 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005430 else
5431 igb_irq_enable(adapter);
5432 }
5433}
5434
Auke Kok9d5c8242008-01-24 02:22:38 -08005435/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005436 * igb_poll - NAPI Rx polling callback
5437 * @napi: napi polling structure
5438 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005439 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005440static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005441{
Alexander Duyck047e0032009-10-27 15:49:27 +00005442 struct igb_q_vector *q_vector = container_of(napi,
5443 struct igb_q_vector,
5444 napi);
5445 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005446
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005447#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005448 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5449 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005450#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00005451 if (q_vector->tx_ring)
5452 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005453
Alexander Duyck047e0032009-10-27 15:49:27 +00005454 if (q_vector->rx_ring)
5455 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
5456
5457 if (!tx_clean_complete)
5458 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005459
Alexander Duyck46544252009-02-19 20:39:04 -08005460 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00005461 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08005462 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00005463 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005464 }
5465
5466 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08005467}
Al Viro6d8126f2008-03-16 22:23:24 +00005468
Auke Kok9d5c8242008-01-24 02:22:38 -08005469/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005470 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005471 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005472 * @shhwtstamps: timestamp structure to update
5473 * @regval: unsigned 64bit system time value.
5474 *
5475 * We need to convert the system time value stored in the RX/TXSTMP registers
5476 * into a hwtstamp which can be used by the upper level timestamping functions
5477 */
5478static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5479 struct skb_shared_hwtstamps *shhwtstamps,
5480 u64 regval)
5481{
5482 u64 ns;
5483
Alexander Duyck55cac242009-11-19 12:42:21 +00005484 /*
5485 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5486 * 24 to match clock shift we setup earlier.
5487 */
5488 if (adapter->hw.mac.type == e1000_82580)
5489 regval <<= IGB_82580_TSYNC_SHIFT;
5490
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005491 ns = timecounter_cyc2time(&adapter->clock, regval);
5492 timecompare_update(&adapter->compare, ns);
5493 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5494 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5495 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5496}
5497
5498/**
5499 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5500 * @q_vector: pointer to q_vector containing needed info
Nick Nunley28739572010-05-04 21:58:07 +00005501 * @buffer: pointer to igb_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005502 *
5503 * If we were asked to do hardware stamping and such a time stamp is
5504 * available, then it must have been for this skb here because we only
5505 * allow only one such packet into the queue.
5506 */
Nick Nunley28739572010-05-04 21:58:07 +00005507static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005508{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005509 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005510 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005511 struct skb_shared_hwtstamps shhwtstamps;
5512 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005513
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005514 /* if skb does not support hw timestamp or TX stamp not valid exit */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005515 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005516 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5517 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005518
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005519 regval = rd32(E1000_TXSTMPL);
5520 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5521
5522 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005523 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005524}
5525
5526/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005527 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005528 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005529 * returns true if ring is completely cleaned
5530 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005531static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005532{
Alexander Duyck047e0032009-10-27 15:49:27 +00005533 struct igb_adapter *adapter = q_vector->adapter;
5534 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005535 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005536 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005537 struct igb_buffer *buffer_info;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005538 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005539 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005540 unsigned int i, eop, count = 0;
5541 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005542
Auke Kok9d5c8242008-01-24 02:22:38 -08005543 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005544 eop = tx_ring->buffer_info[i].next_to_watch;
5545 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5546
5547 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5548 (count < tx_ring->count)) {
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00005549 rmb(); /* read buffer_info after eop_desc status */
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005550 for (cleaned = false; !cleaned; count++) {
5551 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005552 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005553 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005554
Nick Nunley28739572010-05-04 21:58:07 +00005555 if (buffer_info->skb) {
5556 total_bytes += buffer_info->bytecount;
Auke Kok9d5c8242008-01-24 02:22:38 -08005557 /* gso_segs is currently only valid for tcp */
Nick Nunley28739572010-05-04 21:58:07 +00005558 total_packets += buffer_info->gso_segs;
5559 igb_tx_hwtstamp(q_vector, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08005560 }
5561
Alexander Duyck80785292009-10-27 15:51:47 +00005562 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005563 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005564
5565 i++;
5566 if (i == tx_ring->count)
5567 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005568 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005569 eop = tx_ring->buffer_info[i].next_to_watch;
5570 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5571 }
5572
Auke Kok9d5c8242008-01-24 02:22:38 -08005573 tx_ring->next_to_clean = i;
5574
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005575 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005576 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005577 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005578 /* Make sure that anybody stopping the queue after this
5579 * sees the new next_to_clean.
5580 */
5581 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005582 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5583 !(test_bit(__IGB_DOWN, &adapter->state))) {
5584 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005585
5586 u64_stats_update_begin(&tx_ring->tx_syncp);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005587 tx_ring->tx_stats.restart_queue++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005588 u64_stats_update_end(&tx_ring->tx_syncp);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005589 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005590 }
5591
5592 if (tx_ring->detect_tx_hung) {
5593 /* Detect a transmit hang in hardware, this serializes the
5594 * check with the clearing of time_stamp and movement of i */
5595 tx_ring->detect_tx_hung = false;
5596 if (tx_ring->buffer_info[i].time_stamp &&
5597 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005598 (adapter->tx_timeout_factor * HZ)) &&
5599 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005600
Auke Kok9d5c8242008-01-24 02:22:38 -08005601 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005602 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005603 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005604 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005605 " TDH <%x>\n"
5606 " TDT <%x>\n"
5607 " next_to_use <%x>\n"
5608 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005609 "buffer_info[next_to_clean]\n"
5610 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005611 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005612 " jiffies <%lx>\n"
5613 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005614 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005615 readl(tx_ring->head),
5616 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005617 tx_ring->next_to_use,
5618 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005619 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005620 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005621 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005622 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005623 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005624 }
5625 }
5626 tx_ring->total_bytes += total_bytes;
5627 tx_ring->total_packets += total_packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005628 u64_stats_update_begin(&tx_ring->tx_syncp);
Alexander Duycke21ed352008-07-08 15:07:24 -07005629 tx_ring->tx_stats.bytes += total_bytes;
5630 tx_ring->tx_stats.packets += total_packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005631 u64_stats_update_end(&tx_ring->tx_syncp);
Eric Dumazet807540b2010-09-23 05:40:09 +00005632 return count < tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005633}
5634
Auke Kok9d5c8242008-01-24 02:22:38 -08005635/**
5636 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005637 * @q_vector: structure containing interrupt and ring information
5638 * @skb: packet to send up
5639 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005640 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005641static void igb_receive_skb(struct igb_q_vector *q_vector,
5642 struct sk_buff *skb,
5643 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005644{
Alexander Duyck047e0032009-10-27 15:49:27 +00005645 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005646
Alexander Duyck31b24b92010-03-23 18:35:18 +00005647 if (vlan_tag && adapter->vlgrp)
Alexander Duyck047e0032009-10-27 15:49:27 +00005648 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5649 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005650 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005651 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005652}
5653
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005654static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005655 u32 status_err, struct sk_buff *skb)
5656{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005657 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005658
5659 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005660 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5661 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005662 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005663
Auke Kok9d5c8242008-01-24 02:22:38 -08005664 /* TCP/UDP checksum error bit is set */
5665 if (status_err &
5666 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005667 /*
5668 * work around errata with sctp packets where the TCPE aka
5669 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5670 * packets, (aka let the stack check the crc32c)
5671 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005672 if ((skb->len == 60) &&
Eric Dumazet12dcd862010-10-15 17:27:10 +00005673 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5674 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005675 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005676 u64_stats_update_end(&ring->rx_syncp);
5677 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005678 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005679 return;
5680 }
5681 /* It must be a TCP or UDP packet with a valid checksum */
5682 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5683 skb->ip_summed = CHECKSUM_UNNECESSARY;
5684
Alexander Duyck59d71982010-04-27 13:09:25 +00005685 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005686}
5687
Nick Nunley757b77e2010-03-26 11:36:47 +00005688static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005689 struct sk_buff *skb)
5690{
5691 struct igb_adapter *adapter = q_vector->adapter;
5692 struct e1000_hw *hw = &adapter->hw;
5693 u64 regval;
5694
5695 /*
5696 * If this bit is set, then the RX registers contain the time stamp. No
5697 * other packet will be time stamped until we read these registers, so
5698 * read the registers to make them available again. Because only one
5699 * packet can be time stamped at a time, we know that the register
5700 * values must belong to this one here and therefore we don't need to
5701 * compare any of the additional attributes stored for it.
5702 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005703 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005704 * can turn into a skb_shared_hwtstamps.
5705 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005706 if (staterr & E1000_RXDADV_STAT_TSIP) {
5707 u32 *stamp = (u32 *)skb->data;
5708 regval = le32_to_cpu(*(stamp + 2));
5709 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5710 skb_pull(skb, IGB_TS_HDR_LEN);
5711 } else {
5712 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5713 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005714
Nick Nunley757b77e2010-03-26 11:36:47 +00005715 regval = rd32(E1000_RXSTMPL);
5716 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5717 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005718
5719 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5720}
Alexander Duyck4c844852009-10-27 15:52:07 +00005721static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005722 union e1000_adv_rx_desc *rx_desc)
5723{
5724 /* HW will not DMA in data larger than the given buffer, even if it
5725 * parses the (NFS, of course) header to be larger. In that case, it
5726 * fills the header buffer and spills the rest into the page.
5727 */
5728 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5729 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005730 if (hlen > rx_ring->rx_buffer_len)
5731 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005732 return hlen;
5733}
5734
Alexander Duyck047e0032009-10-27 15:49:27 +00005735static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5736 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005737{
Alexander Duyck047e0032009-10-27 15:49:27 +00005738 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005739 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck59d71982010-04-27 13:09:25 +00005740 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005741 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5742 struct igb_buffer *buffer_info , *next_buffer;
5743 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005744 bool cleaned = false;
5745 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005746 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005747 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005748 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005749 u32 staterr;
5750 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005751 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005752
5753 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005754 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005755 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5756 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5757
5758 while (staterr & E1000_RXD_STAT_DD) {
5759 if (*work_done >= budget)
5760 break;
5761 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00005762 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005763
5764 skb = buffer_info->skb;
5765 prefetch(skb->data - NET_IP_ALIGN);
5766 buffer_info->skb = NULL;
5767
5768 i++;
5769 if (i == rx_ring->count)
5770 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005771
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005772 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5773 prefetch(next_rxd);
5774 next_buffer = &rx_ring->buffer_info[i];
5775
5776 length = le16_to_cpu(rx_desc->wb.upper.length);
5777 cleaned = true;
5778 cleaned_count++;
5779
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005780 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005781 dma_unmap_single(dev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005782 rx_ring->rx_buffer_len,
Alexander Duyck59d71982010-04-27 13:09:25 +00005783 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005784 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005785 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005786 skb_put(skb, length);
5787 goto send_up;
5788 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005789 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005790 }
5791
5792 if (length) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005793 dma_unmap_page(dev, buffer_info->page_dma,
5794 PAGE_SIZE / 2, DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005795 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005796
Koki Sanagiaa913402010-04-27 01:01:19 +00005797 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005798 buffer_info->page,
5799 buffer_info->page_offset,
5800 length);
5801
Alexander Duyckd1eff352009-11-12 18:38:35 +00005802 if ((page_count(buffer_info->page) != 1) ||
5803 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005804 buffer_info->page = NULL;
5805 else
5806 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005807
5808 skb->len += length;
5809 skb->data_len += length;
5810 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005811 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005812
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005813 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005814 buffer_info->skb = next_buffer->skb;
5815 buffer_info->dma = next_buffer->dma;
5816 next_buffer->skb = skb;
5817 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005818 goto next_desc;
5819 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005820send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005821 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5822 dev_kfree_skb_irq(skb);
5823 goto next_desc;
5824 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005825
Nick Nunley757b77e2010-03-26 11:36:47 +00005826 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5827 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005828 total_bytes += skb->len;
5829 total_packets++;
5830
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005831 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005832
5833 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005834 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005835
Alexander Duyck047e0032009-10-27 15:49:27 +00005836 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5837 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5838
5839 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005840
Auke Kok9d5c8242008-01-24 02:22:38 -08005841next_desc:
5842 rx_desc->wb.upper.status_error = 0;
5843
5844 /* return some buffers to hardware, one at a time is too slow */
5845 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005846 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005847 cleaned_count = 0;
5848 }
5849
5850 /* use prefetched values */
5851 rx_desc = next_rxd;
5852 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005853 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5854 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005855
Auke Kok9d5c8242008-01-24 02:22:38 -08005856 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005857 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005858
5859 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005860 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005861
5862 rx_ring->total_packets += total_packets;
5863 rx_ring->total_bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005864 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005865 rx_ring->rx_stats.packets += total_packets;
5866 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005867 u64_stats_update_end(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005868 return cleaned;
5869}
5870
Auke Kok9d5c8242008-01-24 02:22:38 -08005871/**
5872 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5873 * @adapter: address of board private structure
5874 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005875void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005876{
Alexander Duycke694e962009-10-27 15:53:06 +00005877 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005878 union e1000_adv_rx_desc *rx_desc;
5879 struct igb_buffer *buffer_info;
5880 struct sk_buff *skb;
5881 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005882 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005883
5884 i = rx_ring->next_to_use;
5885 buffer_info = &rx_ring->buffer_info[i];
5886
Alexander Duyck4c844852009-10-27 15:52:07 +00005887 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005888
Auke Kok9d5c8242008-01-24 02:22:38 -08005889 while (cleaned_count--) {
5890 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5891
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005892 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005893 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005894 buffer_info->page = netdev_alloc_page(netdev);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005895 if (unlikely(!buffer_info->page)) {
5896 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005897 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005898 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005899 goto no_buffers;
5900 }
5901 buffer_info->page_offset = 0;
5902 } else {
5903 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005904 }
5905 buffer_info->page_dma =
Alexander Duyck59d71982010-04-27 13:09:25 +00005906 dma_map_page(rx_ring->dev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005907 buffer_info->page_offset,
5908 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00005909 DMA_FROM_DEVICE);
5910 if (dma_mapping_error(rx_ring->dev,
5911 buffer_info->page_dma)) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005912 buffer_info->page_dma = 0;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005913 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005914 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005915 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005916 goto no_buffers;
5917 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005918 }
5919
Alexander Duyck42d07812009-10-27 23:51:16 +00005920 skb = buffer_info->skb;
5921 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005922 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005923 if (unlikely(!skb)) {
5924 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck04a5fca2009-10-27 15:52:27 +00005925 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005926 u64_stats_update_end(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005927 goto no_buffers;
5928 }
5929
Auke Kok9d5c8242008-01-24 02:22:38 -08005930 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005931 }
5932 if (!buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005933 buffer_info->dma = dma_map_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00005934 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005935 bufsz,
Alexander Duyck59d71982010-04-27 13:09:25 +00005936 DMA_FROM_DEVICE);
5937 if (dma_mapping_error(rx_ring->dev,
5938 buffer_info->dma)) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005939 buffer_info->dma = 0;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005940 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005941 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005942 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005943 goto no_buffers;
5944 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005945 }
5946 /* Refresh the desc even if buffer_addrs didn't change because
5947 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005948 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005949 rx_desc->read.pkt_addr =
5950 cpu_to_le64(buffer_info->page_dma);
5951 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5952 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005953 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005954 rx_desc->read.hdr_addr = 0;
5955 }
5956
5957 i++;
5958 if (i == rx_ring->count)
5959 i = 0;
5960 buffer_info = &rx_ring->buffer_info[i];
5961 }
5962
5963no_buffers:
5964 if (rx_ring->next_to_use != i) {
5965 rx_ring->next_to_use = i;
5966 if (i == 0)
5967 i = (rx_ring->count - 1);
5968 else
5969 i--;
5970
5971 /* Force memory writes to complete before letting h/w
5972 * know there are new descriptors to fetch. (Only
5973 * applicable for weak-ordered memory model archs,
5974 * such as IA-64). */
5975 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005976 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005977 }
5978}
5979
5980/**
5981 * igb_mii_ioctl -
5982 * @netdev:
5983 * @ifreq:
5984 * @cmd:
5985 **/
5986static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5987{
5988 struct igb_adapter *adapter = netdev_priv(netdev);
5989 struct mii_ioctl_data *data = if_mii(ifr);
5990
5991 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5992 return -EOPNOTSUPP;
5993
5994 switch (cmd) {
5995 case SIOCGMIIPHY:
5996 data->phy_id = adapter->hw.phy.addr;
5997 break;
5998 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005999 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6000 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08006001 return -EIO;
6002 break;
6003 case SIOCSMIIREG:
6004 default:
6005 return -EOPNOTSUPP;
6006 }
6007 return 0;
6008}
6009
6010/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006011 * igb_hwtstamp_ioctl - control hardware time stamping
6012 * @netdev:
6013 * @ifreq:
6014 * @cmd:
6015 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006016 * Outgoing time stamping can be enabled and disabled. Play nice and
6017 * disable it when requested, although it shouldn't case any overhead
6018 * when no packet needs it. At most one packet in the queue may be
6019 * marked for time stamping, otherwise it would be impossible to tell
6020 * for sure to which packet the hardware time stamp belongs.
6021 *
6022 * Incoming time stamping has to be configured via the hardware
6023 * filters. Not all combinations are supported, in particular event
6024 * type has to be specified. Matching the kind of event packet is
6025 * not supported, with the exception of "all V2 events regardless of
6026 * level 2 or 4".
6027 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006028 **/
6029static int igb_hwtstamp_ioctl(struct net_device *netdev,
6030 struct ifreq *ifr, int cmd)
6031{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006032 struct igb_adapter *adapter = netdev_priv(netdev);
6033 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006034 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006035 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6036 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006037 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006038 bool is_l4 = false;
6039 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006040 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006041
6042 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6043 return -EFAULT;
6044
6045 /* reserved for future extensions */
6046 if (config.flags)
6047 return -EINVAL;
6048
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006049 switch (config.tx_type) {
6050 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006051 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006052 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006053 break;
6054 default:
6055 return -ERANGE;
6056 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006057
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006058 switch (config.rx_filter) {
6059 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006060 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006061 break;
6062 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6063 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6064 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6065 case HWTSTAMP_FILTER_ALL:
6066 /*
6067 * register TSYNCRXCFG must be set, therefore it is not
6068 * possible to time stamp both Sync and Delay_Req messages
6069 * => fall back to time stamping all packets
6070 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006071 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006072 config.rx_filter = HWTSTAMP_FILTER_ALL;
6073 break;
6074 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006075 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006076 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006077 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006078 break;
6079 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006080 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006081 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006082 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006083 break;
6084 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6085 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006086 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006087 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006088 is_l2 = true;
6089 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006090 config.rx_filter = HWTSTAMP_FILTER_SOME;
6091 break;
6092 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6093 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006094 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006095 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006096 is_l2 = true;
6097 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006098 config.rx_filter = HWTSTAMP_FILTER_SOME;
6099 break;
6100 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6101 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6102 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006103 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006104 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006105 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006106 break;
6107 default:
6108 return -ERANGE;
6109 }
6110
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006111 if (hw->mac.type == e1000_82575) {
6112 if (tsync_rx_ctl | tsync_tx_ctl)
6113 return -EINVAL;
6114 return 0;
6115 }
6116
Nick Nunley757b77e2010-03-26 11:36:47 +00006117 /*
6118 * Per-packet timestamping only works if all packets are
6119 * timestamped, so enable timestamping in all packets as
6120 * long as one rx filter was configured.
6121 */
6122 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6123 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6124 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6125 }
6126
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006127 /* enable/disable TX */
6128 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006129 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6130 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006131 wr32(E1000_TSYNCTXCTL, regval);
6132
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006133 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006134 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006135 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6136 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006137 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006138
6139 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006140 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6141
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006142 /* define ethertype filter for timestamped packets */
6143 if (is_l2)
6144 wr32(E1000_ETQF(3),
6145 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6146 E1000_ETQF_1588 | /* enable timestamping */
6147 ETH_P_1588)); /* 1588 eth protocol type */
6148 else
6149 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006150
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006151#define PTP_PORT 319
6152 /* L4 Queue Filter[3]: filter by destination port and protocol */
6153 if (is_l4) {
6154 u32 ftqf = (IPPROTO_UDP /* UDP */
6155 | E1000_FTQF_VF_BP /* VF not compared */
6156 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6157 | E1000_FTQF_MASK); /* mask all inputs */
6158 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006159
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006160 wr32(E1000_IMIR(3), htons(PTP_PORT));
6161 wr32(E1000_IMIREXT(3),
6162 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6163 if (hw->mac.type == e1000_82576) {
6164 /* enable source port check */
6165 wr32(E1000_SPQF(3), htons(PTP_PORT));
6166 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6167 }
6168 wr32(E1000_FTQF(3), ftqf);
6169 } else {
6170 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6171 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006172 wrfl();
6173
6174 adapter->hwtstamp_config = config;
6175
6176 /* clear TX/RX time stamp registers, just to be sure */
6177 regval = rd32(E1000_TXSTMPH);
6178 regval = rd32(E1000_RXSTMPH);
6179
6180 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6181 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006182}
6183
6184/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006185 * igb_ioctl -
6186 * @netdev:
6187 * @ifreq:
6188 * @cmd:
6189 **/
6190static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6191{
6192 switch (cmd) {
6193 case SIOCGMIIPHY:
6194 case SIOCGMIIREG:
6195 case SIOCSMIIREG:
6196 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006197 case SIOCSHWTSTAMP:
6198 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006199 default:
6200 return -EOPNOTSUPP;
6201 }
6202}
6203
Alexander Duyck009bc062009-07-23 18:08:35 +00006204s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6205{
6206 struct igb_adapter *adapter = hw->back;
6207 u16 cap_offset;
6208
6209 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
6210 if (!cap_offset)
6211 return -E1000_ERR_CONFIG;
6212
6213 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6214
6215 return 0;
6216}
6217
6218s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6219{
6220 struct igb_adapter *adapter = hw->back;
6221 u16 cap_offset;
6222
6223 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
6224 if (!cap_offset)
6225 return -E1000_ERR_CONFIG;
6226
6227 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6228
6229 return 0;
6230}
6231
Auke Kok9d5c8242008-01-24 02:22:38 -08006232static void igb_vlan_rx_register(struct net_device *netdev,
6233 struct vlan_group *grp)
6234{
6235 struct igb_adapter *adapter = netdev_priv(netdev);
6236 struct e1000_hw *hw = &adapter->hw;
6237 u32 ctrl, rctl;
6238
6239 igb_irq_disable(adapter);
6240 adapter->vlgrp = grp;
6241
6242 if (grp) {
6243 /* enable VLAN tag insert/strip */
6244 ctrl = rd32(E1000_CTRL);
6245 ctrl |= E1000_CTRL_VME;
6246 wr32(E1000_CTRL, ctrl);
6247
Alexander Duyck51466232009-10-27 23:47:35 +00006248 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006249 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006250 rctl &= ~E1000_RCTL_CFIEN;
6251 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006252 } else {
6253 /* disable VLAN tag insert/strip */
6254 ctrl = rd32(E1000_CTRL);
6255 ctrl &= ~E1000_CTRL_VME;
6256 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006257 }
6258
Alexander Duycke1739522009-02-19 20:39:44 -08006259 igb_rlpml_set(adapter);
6260
Auke Kok9d5c8242008-01-24 02:22:38 -08006261 if (!test_bit(__IGB_DOWN, &adapter->state))
6262 igb_irq_enable(adapter);
6263}
6264
6265static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6266{
6267 struct igb_adapter *adapter = netdev_priv(netdev);
6268 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006269 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006270
Alexander Duyck51466232009-10-27 23:47:35 +00006271 /* attempt to add filter to vlvf array */
6272 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006273
Alexander Duyck51466232009-10-27 23:47:35 +00006274 /* add the filter since PF can receive vlans w/o entry in vlvf */
6275 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08006276}
6277
6278static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6279{
6280 struct igb_adapter *adapter = netdev_priv(netdev);
6281 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006282 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006283 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006284
6285 igb_irq_disable(adapter);
6286 vlan_group_set_device(adapter->vlgrp, vid, NULL);
6287
6288 if (!test_bit(__IGB_DOWN, &adapter->state))
6289 igb_irq_enable(adapter);
6290
Alexander Duyck51466232009-10-27 23:47:35 +00006291 /* remove vlan from VLVF table array */
6292 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006293
Alexander Duyck51466232009-10-27 23:47:35 +00006294 /* if vid was not present in VLVF just remove it from table */
6295 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006296 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08006297}
6298
6299static void igb_restore_vlan(struct igb_adapter *adapter)
6300{
6301 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
6302
6303 if (adapter->vlgrp) {
6304 u16 vid;
Jesse Grossb7381272010-10-20 13:56:02 +00006305 for (vid = 0; vid < VLAN_N_VID; vid++) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006306 if (!vlan_group_get_device(adapter->vlgrp, vid))
6307 continue;
6308 igb_vlan_rx_add_vid(adapter->netdev, vid);
6309 }
6310 }
6311}
6312
6313int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6314{
Alexander Duyck090b1792009-10-27 23:51:55 +00006315 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006316 struct e1000_mac_info *mac = &adapter->hw.mac;
6317
6318 mac->autoneg = 0;
6319
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006320 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6321 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6322 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
6323 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6324 return -EINVAL;
6325 }
6326
Auke Kok9d5c8242008-01-24 02:22:38 -08006327 switch (spddplx) {
6328 case SPEED_10 + DUPLEX_HALF:
6329 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6330 break;
6331 case SPEED_10 + DUPLEX_FULL:
6332 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6333 break;
6334 case SPEED_100 + DUPLEX_HALF:
6335 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6336 break;
6337 case SPEED_100 + DUPLEX_FULL:
6338 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6339 break;
6340 case SPEED_1000 + DUPLEX_FULL:
6341 mac->autoneg = 1;
6342 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6343 break;
6344 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6345 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00006346 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08006347 return -EINVAL;
6348 }
6349 return 0;
6350}
6351
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006352static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006353{
6354 struct net_device *netdev = pci_get_drvdata(pdev);
6355 struct igb_adapter *adapter = netdev_priv(netdev);
6356 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006357 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006358 u32 wufc = adapter->wol;
6359#ifdef CONFIG_PM
6360 int retval = 0;
6361#endif
6362
6363 netif_device_detach(netdev);
6364
Alexander Duycka88f10e2008-07-08 15:13:38 -07006365 if (netif_running(netdev))
6366 igb_close(netdev);
6367
Alexander Duyck047e0032009-10-27 15:49:27 +00006368 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006369
6370#ifdef CONFIG_PM
6371 retval = pci_save_state(pdev);
6372 if (retval)
6373 return retval;
6374#endif
6375
6376 status = rd32(E1000_STATUS);
6377 if (status & E1000_STATUS_LU)
6378 wufc &= ~E1000_WUFC_LNKC;
6379
6380 if (wufc) {
6381 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006382 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006383
6384 /* turn on all-multi mode if wake on multicast is enabled */
6385 if (wufc & E1000_WUFC_MC) {
6386 rctl = rd32(E1000_RCTL);
6387 rctl |= E1000_RCTL_MPE;
6388 wr32(E1000_RCTL, rctl);
6389 }
6390
6391 ctrl = rd32(E1000_CTRL);
6392 /* advertise wake from D3Cold */
6393 #define E1000_CTRL_ADVD3WUC 0x00100000
6394 /* phy power management enable */
6395 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6396 ctrl |= E1000_CTRL_ADVD3WUC;
6397 wr32(E1000_CTRL, ctrl);
6398
Auke Kok9d5c8242008-01-24 02:22:38 -08006399 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006400 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006401
6402 wr32(E1000_WUC, E1000_WUC_PME_EN);
6403 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006404 } else {
6405 wr32(E1000_WUC, 0);
6406 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006407 }
6408
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006409 *enable_wake = wufc || adapter->en_mng_pt;
6410 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006411 igb_power_down_link(adapter);
6412 else
6413 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006414
6415 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6416 * would have already happened in close and is redundant. */
6417 igb_release_hw_control(adapter);
6418
6419 pci_disable_device(pdev);
6420
Auke Kok9d5c8242008-01-24 02:22:38 -08006421 return 0;
6422}
6423
6424#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006425static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6426{
6427 int retval;
6428 bool wake;
6429
6430 retval = __igb_shutdown(pdev, &wake);
6431 if (retval)
6432 return retval;
6433
6434 if (wake) {
6435 pci_prepare_to_sleep(pdev);
6436 } else {
6437 pci_wake_from_d3(pdev, false);
6438 pci_set_power_state(pdev, PCI_D3hot);
6439 }
6440
6441 return 0;
6442}
6443
Auke Kok9d5c8242008-01-24 02:22:38 -08006444static int igb_resume(struct pci_dev *pdev)
6445{
6446 struct net_device *netdev = pci_get_drvdata(pdev);
6447 struct igb_adapter *adapter = netdev_priv(netdev);
6448 struct e1000_hw *hw = &adapter->hw;
6449 u32 err;
6450
6451 pci_set_power_state(pdev, PCI_D0);
6452 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006453 pci_save_state(pdev);
Taku Izumi42bfd332008-06-20 12:10:30 +09006454
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006455 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006456 if (err) {
6457 dev_err(&pdev->dev,
6458 "igb: Cannot enable PCI device from suspend\n");
6459 return err;
6460 }
6461 pci_set_master(pdev);
6462
6463 pci_enable_wake(pdev, PCI_D3hot, 0);
6464 pci_enable_wake(pdev, PCI_D3cold, 0);
6465
Alexander Duyck047e0032009-10-27 15:49:27 +00006466 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006467 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6468 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006469 }
6470
Auke Kok9d5c8242008-01-24 02:22:38 -08006471 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006472
6473 /* let the f/w know that the h/w is now under the control of the
6474 * driver. */
6475 igb_get_hw_control(adapter);
6476
Auke Kok9d5c8242008-01-24 02:22:38 -08006477 wr32(E1000_WUS, ~0);
6478
Alexander Duycka88f10e2008-07-08 15:13:38 -07006479 if (netif_running(netdev)) {
6480 err = igb_open(netdev);
6481 if (err)
6482 return err;
6483 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006484
6485 netif_device_attach(netdev);
6486
Auke Kok9d5c8242008-01-24 02:22:38 -08006487 return 0;
6488}
6489#endif
6490
6491static void igb_shutdown(struct pci_dev *pdev)
6492{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006493 bool wake;
6494
6495 __igb_shutdown(pdev, &wake);
6496
6497 if (system_state == SYSTEM_POWER_OFF) {
6498 pci_wake_from_d3(pdev, wake);
6499 pci_set_power_state(pdev, PCI_D3hot);
6500 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006501}
6502
6503#ifdef CONFIG_NET_POLL_CONTROLLER
6504/*
6505 * Polling 'interrupt' - used by things like netconsole to send skbs
6506 * without having to re-enable interrupts. It's not called while
6507 * the interrupt routine is executing.
6508 */
6509static void igb_netpoll(struct net_device *netdev)
6510{
6511 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006512 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006513 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006514
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006515 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006516 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006517 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006518 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006519 return;
6520 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006521
Alexander Duyck047e0032009-10-27 15:49:27 +00006522 for (i = 0; i < adapter->num_q_vectors; i++) {
6523 struct igb_q_vector *q_vector = adapter->q_vector[i];
6524 wr32(E1000_EIMC, q_vector->eims_value);
6525 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006526 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006527}
6528#endif /* CONFIG_NET_POLL_CONTROLLER */
6529
6530/**
6531 * igb_io_error_detected - called when PCI error is detected
6532 * @pdev: Pointer to PCI device
6533 * @state: The current pci connection state
6534 *
6535 * This function is called after a PCI bus error affecting
6536 * this device has been detected.
6537 */
6538static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6539 pci_channel_state_t state)
6540{
6541 struct net_device *netdev = pci_get_drvdata(pdev);
6542 struct igb_adapter *adapter = netdev_priv(netdev);
6543
6544 netif_device_detach(netdev);
6545
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006546 if (state == pci_channel_io_perm_failure)
6547 return PCI_ERS_RESULT_DISCONNECT;
6548
Auke Kok9d5c8242008-01-24 02:22:38 -08006549 if (netif_running(netdev))
6550 igb_down(adapter);
6551 pci_disable_device(pdev);
6552
6553 /* Request a slot slot reset. */
6554 return PCI_ERS_RESULT_NEED_RESET;
6555}
6556
6557/**
6558 * igb_io_slot_reset - called after the pci bus has been reset.
6559 * @pdev: Pointer to PCI device
6560 *
6561 * Restart the card from scratch, as if from a cold-boot. Implementation
6562 * resembles the first-half of the igb_resume routine.
6563 */
6564static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6565{
6566 struct net_device *netdev = pci_get_drvdata(pdev);
6567 struct igb_adapter *adapter = netdev_priv(netdev);
6568 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006569 pci_ers_result_t result;
Taku Izumi42bfd332008-06-20 12:10:30 +09006570 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006571
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006572 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006573 dev_err(&pdev->dev,
6574 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006575 result = PCI_ERS_RESULT_DISCONNECT;
6576 } else {
6577 pci_set_master(pdev);
6578 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006579 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006580
6581 pci_enable_wake(pdev, PCI_D3hot, 0);
6582 pci_enable_wake(pdev, PCI_D3cold, 0);
6583
6584 igb_reset(adapter);
6585 wr32(E1000_WUS, ~0);
6586 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006587 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006588
Jeff Kirsherea943d42008-12-11 20:34:19 -08006589 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6590 if (err) {
6591 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6592 "failed 0x%0x\n", err);
6593 /* non-fatal, continue */
6594 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006595
Alexander Duyck40a914f2008-11-27 00:24:37 -08006596 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006597}
6598
6599/**
6600 * igb_io_resume - called when traffic can start flowing again.
6601 * @pdev: Pointer to PCI device
6602 *
6603 * This callback is called when the error recovery driver tells us that
6604 * its OK to resume normal operation. Implementation resembles the
6605 * second-half of the igb_resume routine.
6606 */
6607static void igb_io_resume(struct pci_dev *pdev)
6608{
6609 struct net_device *netdev = pci_get_drvdata(pdev);
6610 struct igb_adapter *adapter = netdev_priv(netdev);
6611
Auke Kok9d5c8242008-01-24 02:22:38 -08006612 if (netif_running(netdev)) {
6613 if (igb_up(adapter)) {
6614 dev_err(&pdev->dev, "igb_up failed after reset\n");
6615 return;
6616 }
6617 }
6618
6619 netif_device_attach(netdev);
6620
6621 /* let the f/w know that the h/w is now under the control of the
6622 * driver. */
6623 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006624}
6625
Alexander Duyck26ad9172009-10-05 06:32:49 +00006626static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6627 u8 qsel)
6628{
6629 u32 rar_low, rar_high;
6630 struct e1000_hw *hw = &adapter->hw;
6631
6632 /* HW expects these in little endian so we reverse the byte order
6633 * from network order (big endian) to little endian
6634 */
6635 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6636 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6637 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6638
6639 /* Indicate to hardware the Address is Valid. */
6640 rar_high |= E1000_RAH_AV;
6641
6642 if (hw->mac.type == e1000_82575)
6643 rar_high |= E1000_RAH_POOL_1 * qsel;
6644 else
6645 rar_high |= E1000_RAH_POOL_1 << qsel;
6646
6647 wr32(E1000_RAL(index), rar_low);
6648 wrfl();
6649 wr32(E1000_RAH(index), rar_high);
6650 wrfl();
6651}
6652
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006653static int igb_set_vf_mac(struct igb_adapter *adapter,
6654 int vf, unsigned char *mac_addr)
6655{
6656 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006657 /* VF MAC addresses start at end of receive addresses and moves
6658 * torwards the first, as a result a collision should not be possible */
6659 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006660
Alexander Duyck37680112009-02-19 20:40:30 -08006661 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006662
Alexander Duyck26ad9172009-10-05 06:32:49 +00006663 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006664
6665 return 0;
6666}
6667
Williams, Mitch A8151d292010-02-10 01:44:24 +00006668static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6669{
6670 struct igb_adapter *adapter = netdev_priv(netdev);
6671 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6672 return -EINVAL;
6673 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6674 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6675 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6676 " change effective.");
6677 if (test_bit(__IGB_DOWN, &adapter->state)) {
6678 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6679 " but the PF device is not up.\n");
6680 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6681 " attempting to use the VF device.\n");
6682 }
6683 return igb_set_vf_mac(adapter, vf, mac);
6684}
6685
Lior Levy17dc5662011-02-08 02:28:46 +00006686static int igb_link_mbps(int internal_link_speed)
6687{
6688 switch (internal_link_speed) {
6689 case SPEED_100:
6690 return 100;
6691 case SPEED_1000:
6692 return 1000;
6693 default:
6694 return 0;
6695 }
6696}
6697
6698static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6699 int link_speed)
6700{
6701 int rf_dec, rf_int;
6702 u32 bcnrc_val;
6703
6704 if (tx_rate != 0) {
6705 /* Calculate the rate factor values to set */
6706 rf_int = link_speed / tx_rate;
6707 rf_dec = (link_speed - (rf_int * tx_rate));
6708 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6709
6710 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6711 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6712 E1000_RTTBCNRC_RF_INT_MASK);
6713 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6714 } else {
6715 bcnrc_val = 0;
6716 }
6717
6718 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6719 wr32(E1000_RTTBCNRC, bcnrc_val);
6720}
6721
6722static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6723{
6724 int actual_link_speed, i;
6725 bool reset_rate = false;
6726
6727 /* VF TX rate limit was not set or not supported */
6728 if ((adapter->vf_rate_link_speed == 0) ||
6729 (adapter->hw.mac.type != e1000_82576))
6730 return;
6731
6732 actual_link_speed = igb_link_mbps(adapter->link_speed);
6733 if (actual_link_speed != adapter->vf_rate_link_speed) {
6734 reset_rate = true;
6735 adapter->vf_rate_link_speed = 0;
6736 dev_info(&adapter->pdev->dev,
6737 "Link speed has been changed. VF Transmit "
6738 "rate is disabled\n");
6739 }
6740
6741 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6742 if (reset_rate)
6743 adapter->vf_data[i].tx_rate = 0;
6744
6745 igb_set_vf_rate_limit(&adapter->hw, i,
6746 adapter->vf_data[i].tx_rate,
6747 actual_link_speed);
6748 }
6749}
6750
Williams, Mitch A8151d292010-02-10 01:44:24 +00006751static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6752{
Lior Levy17dc5662011-02-08 02:28:46 +00006753 struct igb_adapter *adapter = netdev_priv(netdev);
6754 struct e1000_hw *hw = &adapter->hw;
6755 int actual_link_speed;
6756
6757 if (hw->mac.type != e1000_82576)
6758 return -EOPNOTSUPP;
6759
6760 actual_link_speed = igb_link_mbps(adapter->link_speed);
6761 if ((vf >= adapter->vfs_allocated_count) ||
6762 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6763 (tx_rate < 0) || (tx_rate > actual_link_speed))
6764 return -EINVAL;
6765
6766 adapter->vf_rate_link_speed = actual_link_speed;
6767 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6768 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6769
6770 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006771}
6772
6773static int igb_ndo_get_vf_config(struct net_device *netdev,
6774 int vf, struct ifla_vf_info *ivi)
6775{
6776 struct igb_adapter *adapter = netdev_priv(netdev);
6777 if (vf >= adapter->vfs_allocated_count)
6778 return -EINVAL;
6779 ivi->vf = vf;
6780 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006781 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006782 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6783 ivi->qos = adapter->vf_data[vf].pf_qos;
6784 return 0;
6785}
6786
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006787static void igb_vmm_control(struct igb_adapter *adapter)
6788{
6789 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006790 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006791
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006792 switch (hw->mac.type) {
6793 case e1000_82575:
6794 default:
6795 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006796 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006797 case e1000_82576:
6798 /* notify HW that the MAC is adding vlan tags */
6799 reg = rd32(E1000_DTXCTL);
6800 reg |= E1000_DTXCTL_VLAN_ADDED;
6801 wr32(E1000_DTXCTL, reg);
6802 case e1000_82580:
6803 /* enable replication vlan tag stripping */
6804 reg = rd32(E1000_RPLOLR);
6805 reg |= E1000_RPLOLR_STRVLAN;
6806 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006807 case e1000_i350:
6808 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006809 break;
6810 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006811
Alexander Duyckd4960302009-10-27 15:53:45 +00006812 if (adapter->vfs_allocated_count) {
6813 igb_vmdq_set_loopback_pf(hw, true);
6814 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006815 igb_vmdq_set_anti_spoofing_pf(hw, true,
6816 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006817 } else {
6818 igb_vmdq_set_loopback_pf(hw, false);
6819 igb_vmdq_set_replication_pf(hw, false);
6820 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006821}
6822
Auke Kok9d5c8242008-01-24 02:22:38 -08006823/* igb_main.c */