blob: 8643f8c29199ff3f7f0e8b4316d6c8ab07206376 [file] [log] [blame]
Auke Kok9d5c8242008-01-24 02:22:38 -08001/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
Alexander Duyck86d5d382009-02-06 23:23:12 +00004 Copyright(c) 2007-2009 Intel Corporation.
Auke Kok9d5c8242008-01-24 02:22:38 -08005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080034#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080036#include <net/checksum.h>
37#include <net/ip6_checksum.h>
Patrick Ohlyc6cb0902009-02-12 05:03:42 +000038#include <linux/net_tstamp.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080039#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/pci.h>
Alexander Duyckc54106b2008-10-16 21:26:57 -070043#include <linux/pci-aspm.h>
Auke Kok9d5c8242008-01-24 02:22:38 -080044#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
Alexander Duyck40a914f2008-11-27 00:24:37 -080047#include <linux/aer.h>
Jeff Kirsher421e02f2008-10-17 11:08:31 -070048#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -070049#include <linux/dca.h>
50#endif
Auke Kok9d5c8242008-01-24 02:22:38 -080051#include "igb.h"
52
Carolyn Wybornyc2b6a052011-02-16 05:09:46 +000053#define DRV_VERSION "2.4.13-k2"
Auke Kok9d5c8242008-01-24 02:22:38 -080054char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver";
Carolyn Wyborny4c4b42c2011-02-17 09:02:30 +000058static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
Auke Kok9d5c8242008-01-24 02:22:38 -080059
Auke Kok9d5c8242008-01-24 02:22:38 -080060static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info,
62};
63
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000064static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +000065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
Carolyn Wyborny6493d242011-01-14 05:33:46 +000071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
Alexander Duyck55cac242009-11-19 12:42:21 +000072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
Joseph Gasparakis308fb392010-09-22 17:56:44 +000075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
Gasparakis, Joseph1b5dda32010-12-09 01:41:01 +000077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
Alexander Duyck9eb23412009-03-13 20:42:15 +000080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
Alexander Duyck747d49b2009-10-05 06:33:27 +000081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
Alexander Duyck2d064c02008-07-08 15:10:12 -070082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
Alexander Duyck4703bf72009-07-23 18:09:48 +000084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
Carolyn Wybornyb894fa22010-03-19 06:07:48 +000085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +000086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
Auke Kok9d5c8242008-01-24 02:22:38 -080087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
90 /* required last entry */
91 {0, }
92};
93
94MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
95
96void igb_reset(struct igb_adapter *);
97static int igb_setup_all_tx_resources(struct igb_adapter *);
98static int igb_setup_all_rx_resources(struct igb_adapter *);
99static void igb_free_all_tx_resources(struct igb_adapter *);
100static void igb_free_all_rx_resources(struct igb_adapter *);
Alexander Duyck06cf2662009-10-27 15:53:25 +0000101static void igb_setup_mrqc(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800102static int igb_probe(struct pci_dev *, const struct pci_device_id *);
103static void __devexit igb_remove(struct pci_dev *pdev);
104static int igb_sw_init(struct igb_adapter *);
105static int igb_open(struct net_device *);
106static int igb_close(struct net_device *);
107static void igb_configure_tx(struct igb_adapter *);
108static void igb_configure_rx(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800109static void igb_clean_all_tx_rings(struct igb_adapter *);
110static void igb_clean_all_rx_rings(struct igb_adapter *);
Mitch Williams3b644cf2008-06-27 10:59:48 -0700111static void igb_clean_tx_ring(struct igb_ring *);
112static void igb_clean_rx_ring(struct igb_ring *);
Alexander Duyckff41f8d2009-09-03 14:48:56 +0000113static void igb_set_rx_mode(struct net_device *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800114static void igb_update_phy_info(unsigned long);
115static void igb_watchdog(unsigned long);
116static void igb_watchdog_task(struct work_struct *);
Alexander Duyckb1a436c2009-10-27 15:54:43 +0000117static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
Eric Dumazet12dcd862010-10-15 17:27:10 +0000118static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
119 struct rtnl_link_stats64 *stats);
Auke Kok9d5c8242008-01-24 02:22:38 -0800120static int igb_change_mtu(struct net_device *, int);
121static int igb_set_mac(struct net_device *, void *);
Alexander Duyck68d480c2009-10-05 06:33:08 +0000122static void igb_set_uta(struct igb_adapter *adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800123static irqreturn_t igb_intr(int irq, void *);
124static irqreturn_t igb_intr_msi(int irq, void *);
125static irqreturn_t igb_msix_other(int irq, void *);
Alexander Duyck047e0032009-10-27 15:49:27 +0000126static irqreturn_t igb_msix_ring(int irq, void *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700127#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +0000128static void igb_update_dca(struct igb_q_vector *);
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700129static void igb_setup_dca(struct igb_adapter *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700130#endif /* CONFIG_IGB_DCA */
Alexander Duyck047e0032009-10-27 15:49:27 +0000131static bool igb_clean_tx_irq(struct igb_q_vector *);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700132static int igb_poll(struct napi_struct *, int);
Alexander Duyck047e0032009-10-27 15:49:27 +0000133static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
Auke Kok9d5c8242008-01-24 02:22:38 -0800134static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
135static void igb_tx_timeout(struct net_device *);
136static void igb_reset_task(struct work_struct *);
137static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
138static void igb_vlan_rx_add_vid(struct net_device *, u16);
139static void igb_vlan_rx_kill_vid(struct net_device *, u16);
140static void igb_restore_vlan(struct igb_adapter *);
Alexander Duyck26ad9172009-10-05 06:32:49 +0000141static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800142static void igb_ping_all_vfs(struct igb_adapter *);
143static void igb_msg_task(struct igb_adapter *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800144static void igb_vmm_control(struct igb_adapter *);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +0000145static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
Alexander Duyck4ae196d2009-02-19 20:40:07 -0800146static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
Williams, Mitch A8151d292010-02-10 01:44:24 +0000147static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
148static int igb_ndo_set_vf_vlan(struct net_device *netdev,
149 int vf, u16 vlan, u8 qos);
150static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
151static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
152 struct ifla_vf_info *ivi);
Lior Levy17dc5662011-02-08 02:28:46 +0000153static void igb_check_vf_rate_limit(struct igb_adapter *);
Auke Kok9d5c8242008-01-24 02:22:38 -0800154
Auke Kok9d5c8242008-01-24 02:22:38 -0800155#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +0000156static int igb_suspend(struct pci_dev *, pm_message_t);
Auke Kok9d5c8242008-01-24 02:22:38 -0800157static int igb_resume(struct pci_dev *);
158#endif
159static void igb_shutdown(struct pci_dev *);
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700160#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700161static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
162static struct notifier_block dca_notifier = {
163 .notifier_call = igb_notify_dca,
164 .next = NULL,
165 .priority = 0
166};
167#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800168#ifdef CONFIG_NET_POLL_CONTROLLER
169/* for netdump / net console */
170static void igb_netpoll(struct net_device *);
171#endif
Alexander Duyck37680112009-02-19 20:40:30 -0800172#ifdef CONFIG_PCI_IOV
Alexander Duyck2a3abf62009-04-07 14:37:52 +0000173static unsigned int max_vfs = 0;
174module_param(max_vfs, uint, 0);
175MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
176 "per physical function");
177#endif /* CONFIG_PCI_IOV */
178
Auke Kok9d5c8242008-01-24 02:22:38 -0800179static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
180 pci_channel_state_t);
181static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
182static void igb_io_resume(struct pci_dev *);
183
184static struct pci_error_handlers igb_err_handler = {
185 .error_detected = igb_io_error_detected,
186 .slot_reset = igb_io_slot_reset,
187 .resume = igb_io_resume,
188};
189
190
191static struct pci_driver igb_driver = {
192 .name = igb_driver_name,
193 .id_table = igb_pci_tbl,
194 .probe = igb_probe,
195 .remove = __devexit_p(igb_remove),
196#ifdef CONFIG_PM
197 /* Power Managment Hooks */
198 .suspend = igb_suspend,
199 .resume = igb_resume,
200#endif
201 .shutdown = igb_shutdown,
202 .err_handler = &igb_err_handler
203};
204
205MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
206MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
207MODULE_LICENSE("GPL");
208MODULE_VERSION(DRV_VERSION);
209
Taku Izumic97ec422010-04-27 14:39:30 +0000210struct igb_reg_info {
211 u32 ofs;
212 char *name;
213};
214
215static const struct igb_reg_info igb_reg_info_tbl[] = {
216
217 /* General Registers */
218 {E1000_CTRL, "CTRL"},
219 {E1000_STATUS, "STATUS"},
220 {E1000_CTRL_EXT, "CTRL_EXT"},
221
222 /* Interrupt Registers */
223 {E1000_ICR, "ICR"},
224
225 /* RX Registers */
226 {E1000_RCTL, "RCTL"},
227 {E1000_RDLEN(0), "RDLEN"},
228 {E1000_RDH(0), "RDH"},
229 {E1000_RDT(0), "RDT"},
230 {E1000_RXDCTL(0), "RXDCTL"},
231 {E1000_RDBAL(0), "RDBAL"},
232 {E1000_RDBAH(0), "RDBAH"},
233
234 /* TX Registers */
235 {E1000_TCTL, "TCTL"},
236 {E1000_TDBAL(0), "TDBAL"},
237 {E1000_TDBAH(0), "TDBAH"},
238 {E1000_TDLEN(0), "TDLEN"},
239 {E1000_TDH(0), "TDH"},
240 {E1000_TDT(0), "TDT"},
241 {E1000_TXDCTL(0), "TXDCTL"},
242 {E1000_TDFH, "TDFH"},
243 {E1000_TDFT, "TDFT"},
244 {E1000_TDFHS, "TDFHS"},
245 {E1000_TDFPC, "TDFPC"},
246
247 /* List Terminator */
248 {}
249};
250
251/*
252 * igb_regdump - register printout routine
253 */
254static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
255{
256 int n = 0;
257 char rname[16];
258 u32 regs[8];
259
260 switch (reginfo->ofs) {
261 case E1000_RDLEN(0):
262 for (n = 0; n < 4; n++)
263 regs[n] = rd32(E1000_RDLEN(n));
264 break;
265 case E1000_RDH(0):
266 for (n = 0; n < 4; n++)
267 regs[n] = rd32(E1000_RDH(n));
268 break;
269 case E1000_RDT(0):
270 for (n = 0; n < 4; n++)
271 regs[n] = rd32(E1000_RDT(n));
272 break;
273 case E1000_RXDCTL(0):
274 for (n = 0; n < 4; n++)
275 regs[n] = rd32(E1000_RXDCTL(n));
276 break;
277 case E1000_RDBAL(0):
278 for (n = 0; n < 4; n++)
279 regs[n] = rd32(E1000_RDBAL(n));
280 break;
281 case E1000_RDBAH(0):
282 for (n = 0; n < 4; n++)
283 regs[n] = rd32(E1000_RDBAH(n));
284 break;
285 case E1000_TDBAL(0):
286 for (n = 0; n < 4; n++)
287 regs[n] = rd32(E1000_RDBAL(n));
288 break;
289 case E1000_TDBAH(0):
290 for (n = 0; n < 4; n++)
291 regs[n] = rd32(E1000_TDBAH(n));
292 break;
293 case E1000_TDLEN(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_TDLEN(n));
296 break;
297 case E1000_TDH(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_TDH(n));
300 break;
301 case E1000_TDT(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_TDT(n));
304 break;
305 case E1000_TXDCTL(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_TXDCTL(n));
308 break;
309 default:
310 printk(KERN_INFO "%-15s %08x\n",
311 reginfo->name, rd32(reginfo->ofs));
312 return;
313 }
314
315 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
316 printk(KERN_INFO "%-15s ", rname);
317 for (n = 0; n < 4; n++)
318 printk(KERN_CONT "%08x ", regs[n]);
319 printk(KERN_CONT "\n");
320}
321
322/*
323 * igb_dump - Print registers, tx-rings and rx-rings
324 */
325static void igb_dump(struct igb_adapter *adapter)
326{
327 struct net_device *netdev = adapter->netdev;
328 struct e1000_hw *hw = &adapter->hw;
329 struct igb_reg_info *reginfo;
330 int n = 0;
331 struct igb_ring *tx_ring;
332 union e1000_adv_tx_desc *tx_desc;
333 struct my_u0 { u64 a; u64 b; } *u0;
334 struct igb_buffer *buffer_info;
335 struct igb_ring *rx_ring;
336 union e1000_adv_rx_desc *rx_desc;
337 u32 staterr;
338 int i = 0;
339
340 if (!netif_msg_hw(adapter))
341 return;
342
343 /* Print netdevice Info */
344 if (netdev) {
345 dev_info(&adapter->pdev->dev, "Net device Info\n");
346 printk(KERN_INFO "Device Name state "
347 "trans_start last_rx\n");
348 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
349 netdev->name,
350 netdev->state,
351 netdev->trans_start,
352 netdev->last_rx);
353 }
354
355 /* Print Registers */
356 dev_info(&adapter->pdev->dev, "Register Dump\n");
357 printk(KERN_INFO " Register Name Value\n");
358 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
359 reginfo->name; reginfo++) {
360 igb_regdump(hw, reginfo);
361 }
362
363 /* Print TX Ring Summary */
364 if (!netdev || !netif_running(netdev))
365 goto exit;
366
367 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
368 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
369 " leng ntw timestamp\n");
370 for (n = 0; n < adapter->num_tx_queues; n++) {
371 tx_ring = adapter->tx_ring[n];
372 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
373 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
374 n, tx_ring->next_to_use, tx_ring->next_to_clean,
375 (u64)buffer_info->dma,
376 buffer_info->length,
377 buffer_info->next_to_watch,
378 (u64)buffer_info->time_stamp);
379 }
380
381 /* Print TX Rings */
382 if (!netif_msg_tx_done(adapter))
383 goto rx_ring_summary;
384
385 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
386
387 /* Transmit Descriptor Formats
388 *
389 * Advanced Transmit Descriptor
390 * +--------------------------------------------------------------+
391 * 0 | Buffer Address [63:0] |
392 * +--------------------------------------------------------------+
393 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
394 * +--------------------------------------------------------------+
395 * 63 46 45 40 39 38 36 35 32 31 24 15 0
396 */
397
398 for (n = 0; n < adapter->num_tx_queues; n++) {
399 tx_ring = adapter->tx_ring[n];
400 printk(KERN_INFO "------------------------------------\n");
401 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
402 printk(KERN_INFO "------------------------------------\n");
403 printk(KERN_INFO "T [desc] [address 63:0 ] "
404 "[PlPOCIStDDM Ln] [bi->dma ] "
405 "leng ntw timestamp bi->skb\n");
406
407 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
408 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
409 buffer_info = &tx_ring->buffer_info[i];
410 u0 = (struct my_u0 *)tx_desc;
411 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
412 " %04X %3X %016llX %p", i,
413 le64_to_cpu(u0->a),
414 le64_to_cpu(u0->b),
415 (u64)buffer_info->dma,
416 buffer_info->length,
417 buffer_info->next_to_watch,
418 (u64)buffer_info->time_stamp,
419 buffer_info->skb);
420 if (i == tx_ring->next_to_use &&
421 i == tx_ring->next_to_clean)
422 printk(KERN_CONT " NTC/U\n");
423 else if (i == tx_ring->next_to_use)
424 printk(KERN_CONT " NTU\n");
425 else if (i == tx_ring->next_to_clean)
426 printk(KERN_CONT " NTC\n");
427 else
428 printk(KERN_CONT "\n");
429
430 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
431 print_hex_dump(KERN_INFO, "",
432 DUMP_PREFIX_ADDRESS,
433 16, 1, phys_to_virt(buffer_info->dma),
434 buffer_info->length, true);
435 }
436 }
437
438 /* Print RX Rings Summary */
439rx_ring_summary:
440 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
441 printk(KERN_INFO "Queue [NTU] [NTC]\n");
442 for (n = 0; n < adapter->num_rx_queues; n++) {
443 rx_ring = adapter->rx_ring[n];
444 printk(KERN_INFO " %5d %5X %5X\n", n,
445 rx_ring->next_to_use, rx_ring->next_to_clean);
446 }
447
448 /* Print RX Rings */
449 if (!netif_msg_rx_status(adapter))
450 goto exit;
451
452 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
453
454 /* Advanced Receive Descriptor (Read) Format
455 * 63 1 0
456 * +-----------------------------------------------------+
457 * 0 | Packet Buffer Address [63:1] |A0/NSE|
458 * +----------------------------------------------+------+
459 * 8 | Header Buffer Address [63:1] | DD |
460 * +-----------------------------------------------------+
461 *
462 *
463 * Advanced Receive Descriptor (Write-Back) Format
464 *
465 * 63 48 47 32 31 30 21 20 17 16 4 3 0
466 * +------------------------------------------------------+
467 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
468 * | Checksum Ident | | | | Type | Type |
469 * +------------------------------------------------------+
470 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
471 * +------------------------------------------------------+
472 * 63 48 47 32 31 20 19 0
473 */
474
475 for (n = 0; n < adapter->num_rx_queues; n++) {
476 rx_ring = adapter->rx_ring[n];
477 printk(KERN_INFO "------------------------------------\n");
478 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
479 printk(KERN_INFO "------------------------------------\n");
480 printk(KERN_INFO "R [desc] [ PktBuf A0] "
481 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
482 "<-- Adv Rx Read format\n");
483 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
484 "[vl er S cks ln] ---------------- [bi->skb] "
485 "<-- Adv Rx Write-Back format\n");
486
487 for (i = 0; i < rx_ring->count; i++) {
488 buffer_info = &rx_ring->buffer_info[i];
489 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
490 u0 = (struct my_u0 *)rx_desc;
491 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
492 if (staterr & E1000_RXD_STAT_DD) {
493 /* Descriptor Done */
494 printk(KERN_INFO "RWB[0x%03X] %016llX "
495 "%016llX ---------------- %p", i,
496 le64_to_cpu(u0->a),
497 le64_to_cpu(u0->b),
498 buffer_info->skb);
499 } else {
500 printk(KERN_INFO "R [0x%03X] %016llX "
501 "%016llX %016llX %p", i,
502 le64_to_cpu(u0->a),
503 le64_to_cpu(u0->b),
504 (u64)buffer_info->dma,
505 buffer_info->skb);
506
507 if (netif_msg_pktdata(adapter)) {
508 print_hex_dump(KERN_INFO, "",
509 DUMP_PREFIX_ADDRESS,
510 16, 1,
511 phys_to_virt(buffer_info->dma),
512 rx_ring->rx_buffer_len, true);
513 if (rx_ring->rx_buffer_len
514 < IGB_RXBUFFER_1024)
515 print_hex_dump(KERN_INFO, "",
516 DUMP_PREFIX_ADDRESS,
517 16, 1,
518 phys_to_virt(
519 buffer_info->page_dma +
520 buffer_info->page_offset),
521 PAGE_SIZE/2, true);
522 }
523 }
524
525 if (i == rx_ring->next_to_use)
526 printk(KERN_CONT " NTU\n");
527 else if (i == rx_ring->next_to_clean)
528 printk(KERN_CONT " NTC\n");
529 else
530 printk(KERN_CONT "\n");
531
532 }
533 }
534
535exit:
536 return;
537}
538
539
Patrick Ohly38c845c2009-02-12 05:03:41 +0000540/**
Patrick Ohly38c845c2009-02-12 05:03:41 +0000541 * igb_read_clock - read raw cycle counter (to be used by time counter)
542 */
543static cycle_t igb_read_clock(const struct cyclecounter *tc)
544{
545 struct igb_adapter *adapter =
546 container_of(tc, struct igb_adapter, cycles);
547 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000548 u64 stamp = 0;
549 int shift = 0;
Patrick Ohly38c845c2009-02-12 05:03:41 +0000550
Alexander Duyck55cac242009-11-19 12:42:21 +0000551 /*
552 * The timestamp latches on lowest register read. For the 82580
553 * the lowest register is SYSTIMR instead of SYSTIML. However we never
554 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
555 */
556 if (hw->mac.type == e1000_82580) {
557 stamp = rd32(E1000_SYSTIMR) >> 8;
558 shift = IGB_82580_TSYNC_SHIFT;
559 }
560
Alexander Duyckc5b9bd52009-10-27 23:46:01 +0000561 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
562 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
Patrick Ohly38c845c2009-02-12 05:03:41 +0000563 return stamp;
564}
565
Auke Kok9d5c8242008-01-24 02:22:38 -0800566/**
Alexander Duyckc0410762010-03-25 13:10:08 +0000567 * igb_get_hw_dev - return device
Auke Kok9d5c8242008-01-24 02:22:38 -0800568 * used by hardware layer to print debugging information
569 **/
Alexander Duyckc0410762010-03-25 13:10:08 +0000570struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
Auke Kok9d5c8242008-01-24 02:22:38 -0800571{
572 struct igb_adapter *adapter = hw->back;
Alexander Duyckc0410762010-03-25 13:10:08 +0000573 return adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -0800574}
Patrick Ohly38c845c2009-02-12 05:03:41 +0000575
576/**
Auke Kok9d5c8242008-01-24 02:22:38 -0800577 * igb_init_module - Driver Registration Routine
578 *
579 * igb_init_module is the first routine called when the driver is
580 * loaded. All it does is register with the PCI subsystem.
581 **/
582static int __init igb_init_module(void)
583{
584 int ret;
585 printk(KERN_INFO "%s - version %s\n",
586 igb_driver_string, igb_driver_version);
587
588 printk(KERN_INFO "%s\n", igb_copyright);
589
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700590#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700591 dca_register_notify(&dca_notifier);
592#endif
Alexander Duyckbbd98fe2009-01-31 00:52:30 -0800593 ret = pci_register_driver(&igb_driver);
Auke Kok9d5c8242008-01-24 02:22:38 -0800594 return ret;
595}
596
597module_init(igb_init_module);
598
599/**
600 * igb_exit_module - Driver Exit Cleanup Routine
601 *
602 * igb_exit_module is called just before the driver is removed
603 * from memory.
604 **/
605static void __exit igb_exit_module(void)
606{
Jeff Kirsher421e02f2008-10-17 11:08:31 -0700607#ifdef CONFIG_IGB_DCA
Jeb Cramerfe4506b2008-07-08 15:07:55 -0700608 dca_unregister_notify(&dca_notifier);
609#endif
Auke Kok9d5c8242008-01-24 02:22:38 -0800610 pci_unregister_driver(&igb_driver);
611}
612
613module_exit(igb_exit_module);
614
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800615#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
616/**
617 * igb_cache_ring_register - Descriptor ring to register mapping
618 * @adapter: board private structure to initialize
619 *
620 * Once we know the feature-set enabled for the device, we'll cache
621 * the register offset the descriptor ring is assigned to.
622 **/
623static void igb_cache_ring_register(struct igb_adapter *adapter)
624{
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000625 int i = 0, j = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000626 u32 rbase_offset = adapter->vfs_allocated_count;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800627
628 switch (adapter->hw.mac.type) {
629 case e1000_82576:
630 /* The queues are allocated for virtualization such that VF 0
631 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
632 * In order to avoid collision we start at the first free queue
633 * and continue consuming queues in the same sequence
634 */
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000635 if (adapter->vfs_allocated_count) {
Alexander Duycka99955f2009-11-12 18:37:19 +0000636 for (; i < adapter->rss_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000637 adapter->rx_ring[i]->reg_idx = rbase_offset +
638 Q_IDX_82576(i);
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000639 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800640 case e1000_82575:
Alexander Duyck55cac242009-11-19 12:42:21 +0000641 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000642 case e1000_i350:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800643 default:
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000644 for (; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000645 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
Alexander Duyckee1b9f02009-10-27 23:49:40 +0000646 for (; j < adapter->num_tx_queues; j++)
Alexander Duyck3025a442010-02-17 01:02:39 +0000647 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800648 break;
649 }
650}
651
Alexander Duyck047e0032009-10-27 15:49:27 +0000652static void igb_free_queues(struct igb_adapter *adapter)
653{
Alexander Duyck3025a442010-02-17 01:02:39 +0000654 int i;
Alexander Duyck047e0032009-10-27 15:49:27 +0000655
Alexander Duyck3025a442010-02-17 01:02:39 +0000656 for (i = 0; i < adapter->num_tx_queues; i++) {
657 kfree(adapter->tx_ring[i]);
658 adapter->tx_ring[i] = NULL;
659 }
660 for (i = 0; i < adapter->num_rx_queues; i++) {
661 kfree(adapter->rx_ring[i]);
662 adapter->rx_ring[i] = NULL;
663 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000664 adapter->num_rx_queues = 0;
665 adapter->num_tx_queues = 0;
666}
667
Auke Kok9d5c8242008-01-24 02:22:38 -0800668/**
669 * igb_alloc_queues - Allocate memory for all rings
670 * @adapter: board private structure to initialize
671 *
672 * We allocate one ring per queue at run-time since we don't know the
673 * number of queues at compile-time.
674 **/
675static int igb_alloc_queues(struct igb_adapter *adapter)
676{
Alexander Duyck3025a442010-02-17 01:02:39 +0000677 struct igb_ring *ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800678 int i;
679
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700680 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000681 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
682 if (!ring)
683 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800684 ring->count = adapter->tx_ring_count;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700685 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000686 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000687 ring->netdev = adapter->netdev;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000688 /* For 82575, context index must be unique per ring. */
689 if (adapter->hw.mac.type == e1000_82575)
690 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
Alexander Duyck3025a442010-02-17 01:02:39 +0000691 adapter->tx_ring[i] = ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -0700692 }
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000693
Auke Kok9d5c8242008-01-24 02:22:38 -0800694 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +0000695 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
696 if (!ring)
697 goto err;
Alexander Duyck68fd9912008-11-20 00:48:10 -0800698 ring->count = adapter->rx_ring_count;
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700699 ring->queue_index = i;
Alexander Duyck59d71982010-04-27 13:09:25 +0000700 ring->dev = &adapter->pdev->dev;
Alexander Duycke694e962009-10-27 15:53:06 +0000701 ring->netdev = adapter->netdev;
Alexander Duyck4c844852009-10-27 15:52:07 +0000702 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck85ad76b2009-10-27 15:52:46 +0000703 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
704 /* set flag indicating ring supports SCTP checksum offload */
705 if (adapter->hw.mac.type >= e1000_82576)
706 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
Alexander Duyck3025a442010-02-17 01:02:39 +0000707 adapter->rx_ring[i] = ring;
Auke Kok9d5c8242008-01-24 02:22:38 -0800708 }
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800709
710 igb_cache_ring_register(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +0000711
Auke Kok9d5c8242008-01-24 02:22:38 -0800712 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800713
Alexander Duyck047e0032009-10-27 15:49:27 +0000714err:
715 igb_free_queues(adapter);
Alexander Duycka88f10e2008-07-08 15:13:38 -0700716
Alexander Duyck047e0032009-10-27 15:49:27 +0000717 return -ENOMEM;
Alexander Duycka88f10e2008-07-08 15:13:38 -0700718}
719
Auke Kok9d5c8242008-01-24 02:22:38 -0800720#define IGB_N0_QUEUE -1
Alexander Duyck047e0032009-10-27 15:49:27 +0000721static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -0800722{
723 u32 msixbm = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +0000724 struct igb_adapter *adapter = q_vector->adapter;
Auke Kok9d5c8242008-01-24 02:22:38 -0800725 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700726 u32 ivar, index;
Alexander Duyck047e0032009-10-27 15:49:27 +0000727 int rx_queue = IGB_N0_QUEUE;
728 int tx_queue = IGB_N0_QUEUE;
729
730 if (q_vector->rx_ring)
731 rx_queue = q_vector->rx_ring->reg_idx;
732 if (q_vector->tx_ring)
733 tx_queue = q_vector->tx_ring->reg_idx;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700734
735 switch (hw->mac.type) {
736 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800737 /* The 82575 assigns vectors using a bitmask, which matches the
738 bitmask for the EICR/EIMS/EIMC registers. To assign one
739 or more queues to a vector, we write the appropriate bits
740 into the MSIXBM register for that vector. */
Alexander Duyck047e0032009-10-27 15:49:27 +0000741 if (rx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800742 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000743 if (tx_queue > IGB_N0_QUEUE)
Auke Kok9d5c8242008-01-24 02:22:38 -0800744 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
Alexander Duyckfeeb2722010-02-03 21:59:51 +0000745 if (!adapter->msix_entries && msix_vector == 0)
746 msixbm |= E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800747 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
Alexander Duyck047e0032009-10-27 15:49:27 +0000748 q_vector->eims_value = msixbm;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700749 break;
750 case e1000_82576:
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800751 /* 82576 uses a table-based method for assigning vectors.
Alexander Duyck2d064c02008-07-08 15:10:12 -0700752 Each queue has a single entry in the table to which we write
753 a vector number along with a "valid" bit. Sadly, the layout
754 of the table is somewhat counterintuitive. */
755 if (rx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000756 index = (rx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700757 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000758 if (rx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800759 /* vector goes into low byte of register */
760 ivar = ivar & 0xFFFFFF00;
761 ivar |= msix_vector | E1000_IVAR_VALID;
Alexander Duyck047e0032009-10-27 15:49:27 +0000762 } else {
763 /* vector goes into third byte of register */
764 ivar = ivar & 0xFF00FFFF;
765 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700766 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700767 array_wr32(E1000_IVAR0, index, ivar);
768 }
769 if (tx_queue > IGB_N0_QUEUE) {
Alexander Duyck047e0032009-10-27 15:49:27 +0000770 index = (tx_queue & 0x7);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700771 ivar = array_rd32(E1000_IVAR0, index);
Alexander Duyck047e0032009-10-27 15:49:27 +0000772 if (tx_queue < 8) {
Alexander Duyck26bc19e2008-12-26 01:34:11 -0800773 /* vector goes into second byte of register */
774 ivar = ivar & 0xFFFF00FF;
775 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
Alexander Duyck047e0032009-10-27 15:49:27 +0000776 } else {
777 /* vector goes into high byte of register */
778 ivar = ivar & 0x00FFFFFF;
779 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700780 }
Alexander Duyck2d064c02008-07-08 15:10:12 -0700781 array_wr32(E1000_IVAR0, index, ivar);
782 }
Alexander Duyck047e0032009-10-27 15:49:27 +0000783 q_vector->eims_value = 1 << msix_vector;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700784 break;
Alexander Duyck55cac242009-11-19 12:42:21 +0000785 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000786 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +0000787 /* 82580 uses the same table-based approach as 82576 but has fewer
788 entries as a result we carry over for queues greater than 4. */
789 if (rx_queue > IGB_N0_QUEUE) {
790 index = (rx_queue >> 1);
791 ivar = array_rd32(E1000_IVAR0, index);
792 if (rx_queue & 0x1) {
793 /* vector goes into third byte of register */
794 ivar = ivar & 0xFF00FFFF;
795 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
796 } else {
797 /* vector goes into low byte of register */
798 ivar = ivar & 0xFFFFFF00;
799 ivar |= msix_vector | E1000_IVAR_VALID;
800 }
801 array_wr32(E1000_IVAR0, index, ivar);
802 }
803 if (tx_queue > IGB_N0_QUEUE) {
804 index = (tx_queue >> 1);
805 ivar = array_rd32(E1000_IVAR0, index);
806 if (tx_queue & 0x1) {
807 /* vector goes into high byte of register */
808 ivar = ivar & 0x00FFFFFF;
809 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
810 } else {
811 /* vector goes into second byte of register */
812 ivar = ivar & 0xFFFF00FF;
813 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
814 }
815 array_wr32(E1000_IVAR0, index, ivar);
816 }
817 q_vector->eims_value = 1 << msix_vector;
818 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -0700819 default:
820 BUG();
821 break;
822 }
Alexander Duyck26b39272010-02-17 01:00:41 +0000823
824 /* add q_vector eims value to global eims_enable_mask */
825 adapter->eims_enable_mask |= q_vector->eims_value;
826
827 /* configure q_vector to set itr on first interrupt */
828 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -0800829}
830
831/**
832 * igb_configure_msix - Configure MSI-X hardware
833 *
834 * igb_configure_msix sets up the hardware to properly
835 * generate MSI-X interrupts.
836 **/
837static void igb_configure_msix(struct igb_adapter *adapter)
838{
839 u32 tmp;
840 int i, vector = 0;
841 struct e1000_hw *hw = &adapter->hw;
842
843 adapter->eims_enable_mask = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -0800844
845 /* set vector for other causes, i.e. link changes */
Alexander Duyck2d064c02008-07-08 15:10:12 -0700846 switch (hw->mac.type) {
847 case e1000_82575:
Auke Kok9d5c8242008-01-24 02:22:38 -0800848 tmp = rd32(E1000_CTRL_EXT);
849 /* enable MSI-X PBA support*/
850 tmp |= E1000_CTRL_EXT_PBA_CLR;
851
852 /* Auto-Mask interrupts upon ICR read. */
853 tmp |= E1000_CTRL_EXT_EIAME;
854 tmp |= E1000_CTRL_EXT_IRCA;
855
856 wr32(E1000_CTRL_EXT, tmp);
Alexander Duyck047e0032009-10-27 15:49:27 +0000857
858 /* enable msix_other interrupt */
859 array_wr32(E1000_MSIXBM(0), vector++,
860 E1000_EIMS_OTHER);
PJ Waskiewicz844290e2008-06-27 11:00:39 -0700861 adapter->eims_other = E1000_EIMS_OTHER;
Auke Kok9d5c8242008-01-24 02:22:38 -0800862
Alexander Duyck2d064c02008-07-08 15:10:12 -0700863 break;
864
865 case e1000_82576:
Alexander Duyck55cac242009-11-19 12:42:21 +0000866 case e1000_82580:
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +0000867 case e1000_i350:
Alexander Duyck047e0032009-10-27 15:49:27 +0000868 /* Turn on MSI-X capability first, or our settings
869 * won't stick. And it will take days to debug. */
870 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
871 E1000_GPIE_PBA | E1000_GPIE_EIAME |
872 E1000_GPIE_NSICR);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700873
Alexander Duyck047e0032009-10-27 15:49:27 +0000874 /* enable msix_other interrupt */
875 adapter->eims_other = 1 << vector;
876 tmp = (vector++ | E1000_IVAR_VALID) << 8;
877
878 wr32(E1000_IVAR_MISC, tmp);
Alexander Duyck2d064c02008-07-08 15:10:12 -0700879 break;
880 default:
881 /* do nothing, since nothing else supports MSI-X */
882 break;
883 } /* switch (hw->mac.type) */
Alexander Duyck047e0032009-10-27 15:49:27 +0000884
885 adapter->eims_enable_mask |= adapter->eims_other;
886
Alexander Duyck26b39272010-02-17 01:00:41 +0000887 for (i = 0; i < adapter->num_q_vectors; i++)
888 igb_assign_vector(adapter->q_vector[i], vector++);
Alexander Duyck047e0032009-10-27 15:49:27 +0000889
Auke Kok9d5c8242008-01-24 02:22:38 -0800890 wrfl();
891}
892
893/**
894 * igb_request_msix - Initialize MSI-X interrupts
895 *
896 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
897 * kernel.
898 **/
899static int igb_request_msix(struct igb_adapter *adapter)
900{
901 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +0000902 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -0800903 int i, err = 0, vector = 0;
904
Auke Kok9d5c8242008-01-24 02:22:38 -0800905 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800906 igb_msix_other, 0, netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -0800907 if (err)
908 goto out;
Alexander Duyck047e0032009-10-27 15:49:27 +0000909 vector++;
910
911 for (i = 0; i < adapter->num_q_vectors; i++) {
912 struct igb_q_vector *q_vector = adapter->q_vector[i];
913
914 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
915
916 if (q_vector->rx_ring && q_vector->tx_ring)
917 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
918 q_vector->rx_ring->queue_index);
919 else if (q_vector->tx_ring)
920 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
921 q_vector->tx_ring->queue_index);
922 else if (q_vector->rx_ring)
923 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
924 q_vector->rx_ring->queue_index);
925 else
926 sprintf(q_vector->name, "%s-unused", netdev->name);
927
928 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchesa0607fd2009-11-18 23:29:17 -0800929 igb_msix_ring, 0, q_vector->name,
Alexander Duyck047e0032009-10-27 15:49:27 +0000930 q_vector);
931 if (err)
932 goto out;
933 vector++;
934 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800935
Auke Kok9d5c8242008-01-24 02:22:38 -0800936 igb_configure_msix(adapter);
937 return 0;
938out:
939 return err;
940}
941
942static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
943{
944 if (adapter->msix_entries) {
945 pci_disable_msix(adapter->pdev);
946 kfree(adapter->msix_entries);
947 adapter->msix_entries = NULL;
Alexander Duyck047e0032009-10-27 15:49:27 +0000948 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
Auke Kok9d5c8242008-01-24 02:22:38 -0800949 pci_disable_msi(adapter->pdev);
Alexander Duyck047e0032009-10-27 15:49:27 +0000950 }
Auke Kok9d5c8242008-01-24 02:22:38 -0800951}
952
Alexander Duyck047e0032009-10-27 15:49:27 +0000953/**
954 * igb_free_q_vectors - Free memory allocated for interrupt vectors
955 * @adapter: board private structure to initialize
956 *
957 * This function frees the memory allocated to the q_vectors. In addition if
958 * NAPI is enabled it will delete any references to the NAPI struct prior
959 * to freeing the q_vector.
960 **/
961static void igb_free_q_vectors(struct igb_adapter *adapter)
962{
963 int v_idx;
964
965 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
966 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
967 adapter->q_vector[v_idx] = NULL;
Nick Nunleyfe0592b2010-02-17 01:05:35 +0000968 if (!q_vector)
969 continue;
Alexander Duyck047e0032009-10-27 15:49:27 +0000970 netif_napi_del(&q_vector->napi);
971 kfree(q_vector);
972 }
973 adapter->num_q_vectors = 0;
974}
975
976/**
977 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
978 *
979 * This function resets the device so that it has 0 rx queues, tx queues, and
980 * MSI-X interrupts allocated.
981 */
982static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
983{
984 igb_free_queues(adapter);
985 igb_free_q_vectors(adapter);
986 igb_reset_interrupt_capability(adapter);
987}
Auke Kok9d5c8242008-01-24 02:22:38 -0800988
989/**
990 * igb_set_interrupt_capability - set MSI or MSI-X if supported
991 *
992 * Attempt to configure interrupts using the best available
993 * capabilities of the hardware and kernel.
994 **/
Ben Hutchings21adef32010-09-27 08:28:39 +0000995static int igb_set_interrupt_capability(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -0800996{
997 int err;
998 int numvecs, i;
999
Alexander Duyck83b71802009-02-06 23:15:45 +00001000 /* Number of supported queues. */
Alexander Duycka99955f2009-11-12 18:37:19 +00001001 adapter->num_rx_queues = adapter->rss_queues;
Greg Rose5fa85172010-07-01 13:38:16 +00001002 if (adapter->vfs_allocated_count)
1003 adapter->num_tx_queues = 1;
1004 else
1005 adapter->num_tx_queues = adapter->rss_queues;
Alexander Duyck83b71802009-02-06 23:15:45 +00001006
Alexander Duyck047e0032009-10-27 15:49:27 +00001007 /* start with one vector for every rx queue */
1008 numvecs = adapter->num_rx_queues;
1009
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001010 /* if tx handler is separate add 1 for every tx queue */
Alexander Duycka99955f2009-11-12 18:37:19 +00001011 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1012 numvecs += adapter->num_tx_queues;
Alexander Duyck047e0032009-10-27 15:49:27 +00001013
1014 /* store the number of vectors reserved for queues */
1015 adapter->num_q_vectors = numvecs;
1016
1017 /* add 1 vector for link status interrupts */
1018 numvecs++;
Auke Kok9d5c8242008-01-24 02:22:38 -08001019 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1020 GFP_KERNEL);
1021 if (!adapter->msix_entries)
1022 goto msi_only;
1023
1024 for (i = 0; i < numvecs; i++)
1025 adapter->msix_entries[i].entry = i;
1026
1027 err = pci_enable_msix(adapter->pdev,
1028 adapter->msix_entries,
1029 numvecs);
1030 if (err == 0)
Alexander Duyck34a20e82008-08-26 04:25:13 -07001031 goto out;
Auke Kok9d5c8242008-01-24 02:22:38 -08001032
1033 igb_reset_interrupt_capability(adapter);
1034
1035 /* If we can't do MSI-X, try MSI */
1036msi_only:
Alexander Duyck2a3abf62009-04-07 14:37:52 +00001037#ifdef CONFIG_PCI_IOV
1038 /* disable SR-IOV for non MSI-X configurations */
1039 if (adapter->vf_data) {
1040 struct e1000_hw *hw = &adapter->hw;
1041 /* disable iov and allow time for transactions to clear */
1042 pci_disable_sriov(adapter->pdev);
1043 msleep(500);
1044
1045 kfree(adapter->vf_data);
1046 adapter->vf_data = NULL;
1047 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1048 msleep(100);
1049 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1050 }
1051#endif
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001052 adapter->vfs_allocated_count = 0;
Alexander Duycka99955f2009-11-12 18:37:19 +00001053 adapter->rss_queues = 1;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001054 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
Auke Kok9d5c8242008-01-24 02:22:38 -08001055 adapter->num_rx_queues = 1;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07001056 adapter->num_tx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001057 adapter->num_q_vectors = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001058 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001059 adapter->flags |= IGB_FLAG_HAS_MSI;
Alexander Duyck34a20e82008-08-26 04:25:13 -07001060out:
Ben Hutchings21adef32010-09-27 08:28:39 +00001061 /* Notify the stack of the (possibly) reduced queue counts. */
1062 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1063 return netif_set_real_num_rx_queues(adapter->netdev,
1064 adapter->num_rx_queues);
Auke Kok9d5c8242008-01-24 02:22:38 -08001065}
1066
1067/**
Alexander Duyck047e0032009-10-27 15:49:27 +00001068 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1069 * @adapter: board private structure to initialize
1070 *
1071 * We allocate one q_vector per queue interrupt. If allocation fails we
1072 * return -ENOMEM.
1073 **/
1074static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1075{
1076 struct igb_q_vector *q_vector;
1077 struct e1000_hw *hw = &adapter->hw;
1078 int v_idx;
1079
1080 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1081 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1082 if (!q_vector)
1083 goto err_out;
1084 q_vector->adapter = adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00001085 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1086 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001087 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1088 adapter->q_vector[v_idx] = q_vector;
1089 }
1090 return 0;
1091
1092err_out:
Nick Nunleyfe0592b2010-02-17 01:05:35 +00001093 igb_free_q_vectors(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001094 return -ENOMEM;
1095}
1096
1097static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1098 int ring_idx, int v_idx)
1099{
Alexander Duyck3025a442010-02-17 01:02:39 +00001100 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001101
Alexander Duyck3025a442010-02-17 01:02:39 +00001102 q_vector->rx_ring = adapter->rx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001103 q_vector->rx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001104 q_vector->itr_val = adapter->rx_itr_setting;
1105 if (q_vector->itr_val && q_vector->itr_val <= 3)
1106 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001107}
1108
1109static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1110 int ring_idx, int v_idx)
1111{
Alexander Duyck3025a442010-02-17 01:02:39 +00001112 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001113
Alexander Duyck3025a442010-02-17 01:02:39 +00001114 q_vector->tx_ring = adapter->tx_ring[ring_idx];
Alexander Duyck047e0032009-10-27 15:49:27 +00001115 q_vector->tx_ring->q_vector = q_vector;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00001116 q_vector->itr_val = adapter->tx_itr_setting;
1117 if (q_vector->itr_val && q_vector->itr_val <= 3)
1118 q_vector->itr_val = IGB_START_ITR;
Alexander Duyck047e0032009-10-27 15:49:27 +00001119}
1120
1121/**
1122 * igb_map_ring_to_vector - maps allocated queues to vectors
1123 *
1124 * This function maps the recently allocated queues to vectors.
1125 **/
1126static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1127{
1128 int i;
1129 int v_idx = 0;
1130
1131 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1132 (adapter->num_q_vectors < adapter->num_tx_queues))
1133 return -ENOMEM;
1134
1135 if (adapter->num_q_vectors >=
1136 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1137 for (i = 0; i < adapter->num_rx_queues; i++)
1138 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1139 for (i = 0; i < adapter->num_tx_queues; i++)
1140 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1141 } else {
1142 for (i = 0; i < adapter->num_rx_queues; i++) {
1143 if (i < adapter->num_tx_queues)
1144 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1145 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1146 }
1147 for (; i < adapter->num_tx_queues; i++)
1148 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1149 }
1150 return 0;
1151}
1152
1153/**
1154 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1155 *
1156 * This function initializes the interrupts and allocates all of the queues.
1157 **/
1158static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1159{
1160 struct pci_dev *pdev = adapter->pdev;
1161 int err;
1162
Ben Hutchings21adef32010-09-27 08:28:39 +00001163 err = igb_set_interrupt_capability(adapter);
1164 if (err)
1165 return err;
Alexander Duyck047e0032009-10-27 15:49:27 +00001166
1167 err = igb_alloc_q_vectors(adapter);
1168 if (err) {
1169 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1170 goto err_alloc_q_vectors;
1171 }
1172
1173 err = igb_alloc_queues(adapter);
1174 if (err) {
1175 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1176 goto err_alloc_queues;
1177 }
1178
1179 err = igb_map_ring_to_vector(adapter);
1180 if (err) {
1181 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1182 goto err_map_queues;
1183 }
1184
1185
1186 return 0;
1187err_map_queues:
1188 igb_free_queues(adapter);
1189err_alloc_queues:
1190 igb_free_q_vectors(adapter);
1191err_alloc_q_vectors:
1192 igb_reset_interrupt_capability(adapter);
1193 return err;
1194}
1195
1196/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001197 * igb_request_irq - initialize interrupts
1198 *
1199 * Attempts to configure interrupts using the best available
1200 * capabilities of the hardware and kernel.
1201 **/
1202static int igb_request_irq(struct igb_adapter *adapter)
1203{
1204 struct net_device *netdev = adapter->netdev;
Alexander Duyck047e0032009-10-27 15:49:27 +00001205 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001206 int err = 0;
1207
1208 if (adapter->msix_entries) {
1209 err = igb_request_msix(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001210 if (!err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001211 goto request_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08001212 /* fall back to MSI */
Alexander Duyck047e0032009-10-27 15:49:27 +00001213 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001214 if (!pci_enable_msi(adapter->pdev))
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001215 adapter->flags |= IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001216 igb_free_all_tx_resources(adapter);
1217 igb_free_all_rx_resources(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00001218 adapter->num_tx_queues = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08001219 adapter->num_rx_queues = 1;
Alexander Duyck047e0032009-10-27 15:49:27 +00001220 adapter->num_q_vectors = 1;
1221 err = igb_alloc_q_vectors(adapter);
1222 if (err) {
1223 dev_err(&pdev->dev,
1224 "Unable to allocate memory for vectors\n");
1225 goto request_done;
1226 }
1227 err = igb_alloc_queues(adapter);
1228 if (err) {
1229 dev_err(&pdev->dev,
1230 "Unable to allocate memory for queues\n");
1231 igb_free_q_vectors(adapter);
1232 goto request_done;
1233 }
1234 igb_setup_all_tx_resources(adapter);
1235 igb_setup_all_rx_resources(adapter);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001236 } else {
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001237 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001238 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001239
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001240 if (adapter->flags & IGB_FLAG_HAS_MSI) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08001241 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
Alexander Duyck047e0032009-10-27 15:49:27 +00001242 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001243 if (!err)
1244 goto request_done;
Alexander Duyck047e0032009-10-27 15:49:27 +00001245
Auke Kok9d5c8242008-01-24 02:22:38 -08001246 /* fall back to legacy interrupts */
1247 igb_reset_interrupt_capability(adapter);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001248 adapter->flags &= ~IGB_FLAG_HAS_MSI;
Auke Kok9d5c8242008-01-24 02:22:38 -08001249 }
1250
Joe Perchesa0607fd2009-11-18 23:29:17 -08001251 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
Alexander Duyck047e0032009-10-27 15:49:27 +00001252 netdev->name, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001253
Andy Gospodarek6cb5e572008-02-15 14:05:25 -08001254 if (err)
Auke Kok9d5c8242008-01-24 02:22:38 -08001255 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1256 err);
Auke Kok9d5c8242008-01-24 02:22:38 -08001257
1258request_done:
1259 return err;
1260}
1261
1262static void igb_free_irq(struct igb_adapter *adapter)
1263{
Auke Kok9d5c8242008-01-24 02:22:38 -08001264 if (adapter->msix_entries) {
1265 int vector = 0, i;
1266
Alexander Duyck047e0032009-10-27 15:49:27 +00001267 free_irq(adapter->msix_entries[vector++].vector, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001268
Alexander Duyck047e0032009-10-27 15:49:27 +00001269 for (i = 0; i < adapter->num_q_vectors; i++) {
1270 struct igb_q_vector *q_vector = adapter->q_vector[i];
1271 free_irq(adapter->msix_entries[vector++].vector,
1272 q_vector);
1273 }
1274 } else {
1275 free_irq(adapter->pdev->irq, adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001276 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001277}
1278
1279/**
1280 * igb_irq_disable - Mask off interrupt generation on the NIC
1281 * @adapter: board private structure
1282 **/
1283static void igb_irq_disable(struct igb_adapter *adapter)
1284{
1285 struct e1000_hw *hw = &adapter->hw;
1286
Alexander Duyck25568a52009-10-27 23:49:59 +00001287 /*
1288 * we need to be careful when disabling interrupts. The VFs are also
1289 * mapped into these registers and so clearing the bits can cause
1290 * issues on the VF drivers so we only need to clear what we set
1291 */
Auke Kok9d5c8242008-01-24 02:22:38 -08001292 if (adapter->msix_entries) {
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001293 u32 regval = rd32(E1000_EIAM);
1294 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1295 wr32(E1000_EIMC, adapter->eims_enable_mask);
1296 regval = rd32(E1000_EIAC);
1297 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
Auke Kok9d5c8242008-01-24 02:22:38 -08001298 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001299
1300 wr32(E1000_IAM, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001301 wr32(E1000_IMC, ~0);
1302 wrfl();
Emil Tantilov81a61852010-08-02 14:40:52 +00001303 if (adapter->msix_entries) {
1304 int i;
1305 for (i = 0; i < adapter->num_q_vectors; i++)
1306 synchronize_irq(adapter->msix_entries[i].vector);
1307 } else {
1308 synchronize_irq(adapter->pdev->irq);
1309 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001310}
1311
1312/**
1313 * igb_irq_enable - Enable default interrupt generation settings
1314 * @adapter: board private structure
1315 **/
1316static void igb_irq_enable(struct igb_adapter *adapter)
1317{
1318 struct e1000_hw *hw = &adapter->hw;
1319
1320 if (adapter->msix_entries) {
Alexander Duyck25568a52009-10-27 23:49:59 +00001321 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
Alexander Duyck2dfd1212009-09-03 14:49:15 +00001322 u32 regval = rd32(E1000_EIAC);
1323 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1324 regval = rd32(E1000_EIAM);
1325 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001326 wr32(E1000_EIMS, adapter->eims_enable_mask);
Alexander Duyck25568a52009-10-27 23:49:59 +00001327 if (adapter->vfs_allocated_count) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001328 wr32(E1000_MBVFIMR, 0xFF);
Alexander Duyck25568a52009-10-27 23:49:59 +00001329 ims |= E1000_IMS_VMMB;
1330 }
Alexander Duyck55cac242009-11-19 12:42:21 +00001331 if (adapter->hw.mac.type == e1000_82580)
1332 ims |= E1000_IMS_DRSTA;
1333
Alexander Duyck25568a52009-10-27 23:49:59 +00001334 wr32(E1000_IMS, ims);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001335 } else {
Alexander Duyck55cac242009-11-19 12:42:21 +00001336 wr32(E1000_IMS, IMS_ENABLE_MASK |
1337 E1000_IMS_DRSTA);
1338 wr32(E1000_IAM, IMS_ENABLE_MASK |
1339 E1000_IMS_DRSTA);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001340 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001341}
1342
1343static void igb_update_mng_vlan(struct igb_adapter *adapter)
1344{
Alexander Duyck51466232009-10-27 23:47:35 +00001345 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001346 u16 vid = adapter->hw.mng_cookie.vlan_id;
1347 u16 old_vid = adapter->mng_vlan_id;
Auke Kok9d5c8242008-01-24 02:22:38 -08001348
Alexander Duyck51466232009-10-27 23:47:35 +00001349 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1350 /* add VID to filter table */
1351 igb_vfta_set(hw, vid, true);
1352 adapter->mng_vlan_id = vid;
1353 } else {
1354 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1355 }
1356
1357 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1358 (vid != old_vid) &&
1359 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1360 /* remove VID from filter table */
1361 igb_vfta_set(hw, old_vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08001362 }
1363}
1364
1365/**
1366 * igb_release_hw_control - release control of the h/w to f/w
1367 * @adapter: address of board private structure
1368 *
1369 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1370 * For ASF and Pass Through versions of f/w this means that the
1371 * driver is no longer loaded.
1372 *
1373 **/
1374static void igb_release_hw_control(struct igb_adapter *adapter)
1375{
1376 struct e1000_hw *hw = &adapter->hw;
1377 u32 ctrl_ext;
1378
1379 /* Let firmware take over control of h/w */
1380 ctrl_ext = rd32(E1000_CTRL_EXT);
1381 wr32(E1000_CTRL_EXT,
1382 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1383}
1384
Auke Kok9d5c8242008-01-24 02:22:38 -08001385/**
1386 * igb_get_hw_control - get control of the h/w from f/w
1387 * @adapter: address of board private structure
1388 *
1389 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1390 * For ASF and Pass Through versions of f/w this means that
1391 * the driver is loaded.
1392 *
1393 **/
1394static void igb_get_hw_control(struct igb_adapter *adapter)
1395{
1396 struct e1000_hw *hw = &adapter->hw;
1397 u32 ctrl_ext;
1398
1399 /* Let firmware know the driver has taken over */
1400 ctrl_ext = rd32(E1000_CTRL_EXT);
1401 wr32(E1000_CTRL_EXT,
1402 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1403}
1404
Auke Kok9d5c8242008-01-24 02:22:38 -08001405/**
1406 * igb_configure - configure the hardware for RX and TX
1407 * @adapter: private board structure
1408 **/
1409static void igb_configure(struct igb_adapter *adapter)
1410{
1411 struct net_device *netdev = adapter->netdev;
1412 int i;
1413
1414 igb_get_hw_control(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001415 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001416
1417 igb_restore_vlan(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001418
Alexander Duyck85b430b2009-10-27 15:50:29 +00001419 igb_setup_tctl(adapter);
Alexander Duyck06cf2662009-10-27 15:53:25 +00001420 igb_setup_mrqc(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001421 igb_setup_rctl(adapter);
Alexander Duyck85b430b2009-10-27 15:50:29 +00001422
1423 igb_configure_tx(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001424 igb_configure_rx(adapter);
Alexander Duyck662d7202008-06-27 11:00:29 -07001425
1426 igb_rx_fifo_flush_82575(&adapter->hw);
1427
Alexander Duyckc493ea42009-03-20 00:16:50 +00001428 /* call igb_desc_unused which always leaves
Auke Kok9d5c8242008-01-24 02:22:38 -08001429 * at least 1 descriptor unused to make sure
1430 * next_to_use != next_to_clean */
1431 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00001432 struct igb_ring *ring = adapter->rx_ring[i];
Alexander Duyckc493ea42009-03-20 00:16:50 +00001433 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
Auke Kok9d5c8242008-01-24 02:22:38 -08001434 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001435}
1436
Nick Nunley88a268c2010-02-17 01:01:59 +00001437/**
1438 * igb_power_up_link - Power up the phy/serdes link
1439 * @adapter: address of board private structure
1440 **/
1441void igb_power_up_link(struct igb_adapter *adapter)
1442{
1443 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1444 igb_power_up_phy_copper(&adapter->hw);
1445 else
1446 igb_power_up_serdes_link_82575(&adapter->hw);
1447}
1448
1449/**
1450 * igb_power_down_link - Power down the phy/serdes link
1451 * @adapter: address of board private structure
1452 */
1453static void igb_power_down_link(struct igb_adapter *adapter)
1454{
1455 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1456 igb_power_down_phy_copper_82575(&adapter->hw);
1457 else
1458 igb_shutdown_serdes_link_82575(&adapter->hw);
1459}
Auke Kok9d5c8242008-01-24 02:22:38 -08001460
1461/**
1462 * igb_up - Open the interface and prepare it to handle traffic
1463 * @adapter: board private structure
1464 **/
Auke Kok9d5c8242008-01-24 02:22:38 -08001465int igb_up(struct igb_adapter *adapter)
1466{
1467 struct e1000_hw *hw = &adapter->hw;
1468 int i;
1469
1470 /* hardware has been reset, we need to reload some things */
1471 igb_configure(adapter);
1472
1473 clear_bit(__IGB_DOWN, &adapter->state);
1474
Alexander Duyck047e0032009-10-27 15:49:27 +00001475 for (i = 0; i < adapter->num_q_vectors; i++) {
1476 struct igb_q_vector *q_vector = adapter->q_vector[i];
1477 napi_enable(&q_vector->napi);
1478 }
PJ Waskiewicz844290e2008-06-27 11:00:39 -07001479 if (adapter->msix_entries)
Auke Kok9d5c8242008-01-24 02:22:38 -08001480 igb_configure_msix(adapter);
Alexander Duyckfeeb2722010-02-03 21:59:51 +00001481 else
1482 igb_assign_vector(adapter->q_vector[0], 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08001483
1484 /* Clear any pending interrupts. */
1485 rd32(E1000_ICR);
1486 igb_irq_enable(adapter);
1487
Alexander Duyckd4960302009-10-27 15:53:45 +00001488 /* notify VFs that reset has been completed */
1489 if (adapter->vfs_allocated_count) {
1490 u32 reg_data = rd32(E1000_CTRL_EXT);
1491 reg_data |= E1000_CTRL_EXT_PFRSTD;
1492 wr32(E1000_CTRL_EXT, reg_data);
1493 }
1494
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +00001495 netif_tx_start_all_queues(adapter->netdev);
1496
Alexander Duyck25568a52009-10-27 23:49:59 +00001497 /* start the watchdog. */
1498 hw->mac.get_link_status = 1;
1499 schedule_work(&adapter->watchdog_task);
1500
Auke Kok9d5c8242008-01-24 02:22:38 -08001501 return 0;
1502}
1503
1504void igb_down(struct igb_adapter *adapter)
1505{
Auke Kok9d5c8242008-01-24 02:22:38 -08001506 struct net_device *netdev = adapter->netdev;
Alexander Duyck330a6d62009-10-27 23:51:35 +00001507 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08001508 u32 tctl, rctl;
1509 int i;
1510
1511 /* signal that we're down so the interrupt handler does not
1512 * reschedule our watchdog timer */
1513 set_bit(__IGB_DOWN, &adapter->state);
1514
1515 /* disable receives in the hardware */
1516 rctl = rd32(E1000_RCTL);
1517 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1518 /* flush and sleep below */
1519
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001520 netif_tx_stop_all_queues(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001521
1522 /* disable transmits in the hardware */
1523 tctl = rd32(E1000_TCTL);
1524 tctl &= ~E1000_TCTL_EN;
1525 wr32(E1000_TCTL, tctl);
1526 /* flush both disables and wait for them to finish */
1527 wrfl();
1528 msleep(10);
1529
Alexander Duyck047e0032009-10-27 15:49:27 +00001530 for (i = 0; i < adapter->num_q_vectors; i++) {
1531 struct igb_q_vector *q_vector = adapter->q_vector[i];
1532 napi_disable(&q_vector->napi);
1533 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001534
Auke Kok9d5c8242008-01-24 02:22:38 -08001535 igb_irq_disable(adapter);
1536
1537 del_timer_sync(&adapter->watchdog_timer);
1538 del_timer_sync(&adapter->phy_info_timer);
1539
Auke Kok9d5c8242008-01-24 02:22:38 -08001540 netif_carrier_off(netdev);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001541
1542 /* record the stats before reset*/
Eric Dumazet12dcd862010-10-15 17:27:10 +00001543 spin_lock(&adapter->stats64_lock);
1544 igb_update_stats(adapter, &adapter->stats64);
1545 spin_unlock(&adapter->stats64_lock);
Alexander Duyck04fe6352009-02-06 23:22:32 +00001546
Auke Kok9d5c8242008-01-24 02:22:38 -08001547 adapter->link_speed = 0;
1548 adapter->link_duplex = 0;
1549
Jeff Kirsher30236822008-06-24 17:01:15 -07001550 if (!pci_channel_offline(adapter->pdev))
1551 igb_reset(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001552 igb_clean_all_tx_rings(adapter);
1553 igb_clean_all_rx_rings(adapter);
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00001554#ifdef CONFIG_IGB_DCA
1555
1556 /* since we reset the hardware DCA settings were cleared */
1557 igb_setup_dca(adapter);
1558#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001559}
1560
1561void igb_reinit_locked(struct igb_adapter *adapter)
1562{
1563 WARN_ON(in_interrupt());
1564 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1565 msleep(1);
1566 igb_down(adapter);
1567 igb_up(adapter);
1568 clear_bit(__IGB_RESETTING, &adapter->state);
1569}
1570
1571void igb_reset(struct igb_adapter *adapter)
1572{
Alexander Duyck090b1792009-10-27 23:51:55 +00001573 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08001574 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001575 struct e1000_mac_info *mac = &hw->mac;
1576 struct e1000_fc_info *fc = &hw->fc;
Auke Kok9d5c8242008-01-24 02:22:38 -08001577 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1578 u16 hwm;
1579
1580 /* Repartition Pba for greater than 9k mtu
1581 * To take effect CTRL.RST is required.
1582 */
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001583 switch (mac->type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00001584 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00001585 case e1000_82580:
1586 pba = rd32(E1000_RXPBS);
1587 pba = igb_rxpbs_adjust_82580(pba);
1588 break;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001589 case e1000_82576:
Alexander Duyckd249be52009-10-27 23:46:38 +00001590 pba = rd32(E1000_RXPBS);
1591 pba &= E1000_RXPBS_SIZE_MASK_82576;
Alexander Duyckfa4dfae2009-02-06 23:21:31 +00001592 break;
1593 case e1000_82575:
1594 default:
1595 pba = E1000_PBA_34K;
1596 break;
Alexander Duyck2d064c02008-07-08 15:10:12 -07001597 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001598
Alexander Duyck2d064c02008-07-08 15:10:12 -07001599 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1600 (mac->type < e1000_82576)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001601 /* adjust PBA for jumbo frames */
1602 wr32(E1000_PBA, pba);
1603
1604 /* To maintain wire speed transmits, the Tx FIFO should be
1605 * large enough to accommodate two full transmit packets,
1606 * rounded up to the next 1KB and expressed in KB. Likewise,
1607 * the Rx FIFO should be large enough to accommodate at least
1608 * one full receive packet and is similarly rounded up and
1609 * expressed in KB. */
1610 pba = rd32(E1000_PBA);
1611 /* upper 16 bits has Tx packet buffer allocation size in KB */
1612 tx_space = pba >> 16;
1613 /* lower 16 bits has Rx packet buffer allocation size in KB */
1614 pba &= 0xffff;
1615 /* the tx fifo also stores 16 bytes of information about the tx
1616 * but don't include ethernet FCS because hardware appends it */
1617 min_tx_space = (adapter->max_frame_size +
Alexander Duyck85e8d002009-02-16 00:00:20 -08001618 sizeof(union e1000_adv_tx_desc) -
Auke Kok9d5c8242008-01-24 02:22:38 -08001619 ETH_FCS_LEN) * 2;
1620 min_tx_space = ALIGN(min_tx_space, 1024);
1621 min_tx_space >>= 10;
1622 /* software strips receive CRC, so leave room for it */
1623 min_rx_space = adapter->max_frame_size;
1624 min_rx_space = ALIGN(min_rx_space, 1024);
1625 min_rx_space >>= 10;
1626
1627 /* If current Tx allocation is less than the min Tx FIFO size,
1628 * and the min Tx FIFO size is less than the current Rx FIFO
1629 * allocation, take space away from current Rx allocation */
1630 if (tx_space < min_tx_space &&
1631 ((min_tx_space - tx_space) < pba)) {
1632 pba = pba - (min_tx_space - tx_space);
1633
1634 /* if short on rx space, rx wins and must trump tx
1635 * adjustment */
1636 if (pba < min_rx_space)
1637 pba = min_rx_space;
1638 }
Alexander Duyck2d064c02008-07-08 15:10:12 -07001639 wr32(E1000_PBA, pba);
Auke Kok9d5c8242008-01-24 02:22:38 -08001640 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001641
1642 /* flow control settings */
1643 /* The high water mark must be low enough to fit one full frame
1644 * (or the size used for early receive) above it in the Rx FIFO.
1645 * Set it to the lower of:
1646 * - 90% of the Rx FIFO size, or
1647 * - the full Rx FIFO size minus one full frame */
1648 hwm = min(((pba << 10) * 9 / 10),
Alexander Duyck2d064c02008-07-08 15:10:12 -07001649 ((pba << 10) - 2 * adapter->max_frame_size));
Auke Kok9d5c8242008-01-24 02:22:38 -08001650
Alexander Duyckd405ea32009-12-23 13:21:27 +00001651 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1652 fc->low_water = fc->high_water - 16;
Auke Kok9d5c8242008-01-24 02:22:38 -08001653 fc->pause_time = 0xFFFF;
1654 fc->send_xon = 1;
Alexander Duyck0cce1192009-07-23 18:10:24 +00001655 fc->current_mode = fc->requested_mode;
Auke Kok9d5c8242008-01-24 02:22:38 -08001656
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001657 /* disable receive for all VFs and wait one second */
1658 if (adapter->vfs_allocated_count) {
1659 int i;
1660 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
Greg Rose8fa7e0f2010-11-06 05:43:21 +00001661 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001662
1663 /* ping all the active vfs to let them know we are going down */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00001664 igb_ping_all_vfs(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08001665
1666 /* disable transmits and receives */
1667 wr32(E1000_VFRE, 0);
1668 wr32(E1000_VFTE, 0);
1669 }
1670
Auke Kok9d5c8242008-01-24 02:22:38 -08001671 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00001672 hw->mac.ops.reset_hw(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001673 wr32(E1000_WUC, 0);
1674
Alexander Duyck330a6d62009-10-27 23:51:35 +00001675 if (hw->mac.ops.init_hw(hw))
Alexander Duyck090b1792009-10-27 23:51:55 +00001676 dev_err(&pdev->dev, "Hardware Error\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08001677
Alexander Duyck55cac242009-11-19 12:42:21 +00001678 if (hw->mac.type == e1000_82580) {
1679 u32 reg = rd32(E1000_PCIEMISC);
1680 wr32(E1000_PCIEMISC,
1681 reg & ~E1000_PCIEMISC_LX_DECISION);
1682 }
Nick Nunley88a268c2010-02-17 01:01:59 +00001683 if (!netif_running(adapter->netdev))
1684 igb_power_down_link(adapter);
1685
Auke Kok9d5c8242008-01-24 02:22:38 -08001686 igb_update_mng_vlan(adapter);
1687
1688 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1689 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1690
Alexander Duyck330a6d62009-10-27 23:51:35 +00001691 igb_get_phy_info(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001692}
1693
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001694static const struct net_device_ops igb_netdev_ops = {
Alexander Duyck559e9c42009-10-27 23:52:50 +00001695 .ndo_open = igb_open,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001696 .ndo_stop = igb_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08001697 .ndo_start_xmit = igb_xmit_frame_adv,
Eric Dumazet12dcd862010-10-15 17:27:10 +00001698 .ndo_get_stats64 = igb_get_stats64,
Alexander Duyckff41f8d2009-09-03 14:48:56 +00001699 .ndo_set_rx_mode = igb_set_rx_mode,
1700 .ndo_set_multicast_list = igb_set_rx_mode,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001701 .ndo_set_mac_address = igb_set_mac,
1702 .ndo_change_mtu = igb_change_mtu,
1703 .ndo_do_ioctl = igb_ioctl,
1704 .ndo_tx_timeout = igb_tx_timeout,
1705 .ndo_validate_addr = eth_validate_addr,
1706 .ndo_vlan_rx_register = igb_vlan_rx_register,
1707 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1708 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
Williams, Mitch A8151d292010-02-10 01:44:24 +00001709 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1710 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1711 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1712 .ndo_get_vf_config = igb_ndo_get_vf_config,
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001713#ifdef CONFIG_NET_POLL_CONTROLLER
1714 .ndo_poll_controller = igb_netpoll,
1715#endif
1716};
1717
Taku Izumi42bfd33a2008-06-20 12:10:30 +09001718/**
Auke Kok9d5c8242008-01-24 02:22:38 -08001719 * igb_probe - Device Initialization Routine
1720 * @pdev: PCI device information struct
1721 * @ent: entry in igb_pci_tbl
1722 *
1723 * Returns 0 on success, negative on failure
1724 *
1725 * igb_probe initializes an adapter identified by a pci_dev structure.
1726 * The OS initialization, configuring of the adapter private structure,
1727 * and a hardware reset occur.
1728 **/
1729static int __devinit igb_probe(struct pci_dev *pdev,
1730 const struct pci_device_id *ent)
1731{
1732 struct net_device *netdev;
1733 struct igb_adapter *adapter;
1734 struct e1000_hw *hw;
Alexander Duyck4337e992009-10-27 23:48:31 +00001735 u16 eeprom_data = 0;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001736 s32 ret_val;
Alexander Duyck4337e992009-10-27 23:48:31 +00001737 static int global_quad_port_a; /* global quad port a indication */
Auke Kok9d5c8242008-01-24 02:22:38 -08001738 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1739 unsigned long mmio_start, mmio_len;
David S. Miller2d6a5e92009-03-17 15:01:30 -07001740 int err, pci_using_dac;
Auke Kok9d5c8242008-01-24 02:22:38 -08001741 u16 eeprom_apme_mask = IGB_EEPROM_APME;
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00001742 u8 part_str[E1000_PBANUM_LENGTH];
Auke Kok9d5c8242008-01-24 02:22:38 -08001743
Andy Gospodarekbded64a2010-07-21 06:40:31 +00001744 /* Catch broken hardware that put the wrong VF device ID in
1745 * the PCIe SR-IOV capability.
1746 */
1747 if (pdev->is_virtfn) {
1748 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1749 pci_name(pdev), pdev->vendor, pdev->device);
1750 return -EINVAL;
1751 }
1752
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001753 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001754 if (err)
1755 return err;
1756
1757 pci_using_dac = 0;
Alexander Duyck59d71982010-04-27 13:09:25 +00001758 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001759 if (!err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001760 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Auke Kok9d5c8242008-01-24 02:22:38 -08001761 if (!err)
1762 pci_using_dac = 1;
1763 } else {
Alexander Duyck59d71982010-04-27 13:09:25 +00001764 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001765 if (err) {
Alexander Duyck59d71982010-04-27 13:09:25 +00001766 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9d5c8242008-01-24 02:22:38 -08001767 if (err) {
1768 dev_err(&pdev->dev, "No usable DMA "
1769 "configuration, aborting\n");
1770 goto err_dma;
1771 }
1772 }
1773 }
1774
Alexander Duyckaed5dec2009-02-06 23:16:04 +00001775 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1776 IORESOURCE_MEM),
1777 igb_driver_name);
Auke Kok9d5c8242008-01-24 02:22:38 -08001778 if (err)
1779 goto err_pci_reg;
1780
Frans Pop19d5afd2009-10-02 10:04:12 -07001781 pci_enable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08001782
Auke Kok9d5c8242008-01-24 02:22:38 -08001783 pci_set_master(pdev);
Auke Kokc682fc22008-04-23 11:09:34 -07001784 pci_save_state(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001785
1786 err = -ENOMEM;
Alexander Duyck1bfaf072009-02-19 20:39:23 -08001787 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1788 IGB_ABS_MAX_TX_QUEUES);
Auke Kok9d5c8242008-01-24 02:22:38 -08001789 if (!netdev)
1790 goto err_alloc_etherdev;
1791
1792 SET_NETDEV_DEV(netdev, &pdev->dev);
1793
1794 pci_set_drvdata(pdev, netdev);
1795 adapter = netdev_priv(netdev);
1796 adapter->netdev = netdev;
1797 adapter->pdev = pdev;
1798 hw = &adapter->hw;
1799 hw->back = adapter;
1800 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1801
1802 mmio_start = pci_resource_start(pdev, 0);
1803 mmio_len = pci_resource_len(pdev, 0);
1804
1805 err = -EIO;
Alexander Duyck28b07592009-02-06 23:20:31 +00001806 hw->hw_addr = ioremap(mmio_start, mmio_len);
1807 if (!hw->hw_addr)
Auke Kok9d5c8242008-01-24 02:22:38 -08001808 goto err_ioremap;
1809
Stephen Hemminger2e5c6922008-11-19 22:20:44 -08001810 netdev->netdev_ops = &igb_netdev_ops;
Auke Kok9d5c8242008-01-24 02:22:38 -08001811 igb_set_ethtool_ops(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08001812 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9d5c8242008-01-24 02:22:38 -08001813
1814 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1815
1816 netdev->mem_start = mmio_start;
1817 netdev->mem_end = mmio_start + mmio_len;
1818
Auke Kok9d5c8242008-01-24 02:22:38 -08001819 /* PCI config space info */
1820 hw->vendor_id = pdev->vendor;
1821 hw->device_id = pdev->device;
1822 hw->revision_id = pdev->revision;
1823 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1824 hw->subsystem_device_id = pdev->subsystem_device;
1825
Auke Kok9d5c8242008-01-24 02:22:38 -08001826 /* Copy the default MAC, PHY and NVM function pointers */
1827 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1828 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1829 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1830 /* Initialize skew-specific constants */
1831 err = ei->get_invariants(hw);
1832 if (err)
Alexander Duyck450c87c2009-02-06 23:22:11 +00001833 goto err_sw_init;
Auke Kok9d5c8242008-01-24 02:22:38 -08001834
Alexander Duyck450c87c2009-02-06 23:22:11 +00001835 /* setup the private structure */
Auke Kok9d5c8242008-01-24 02:22:38 -08001836 err = igb_sw_init(adapter);
1837 if (err)
1838 goto err_sw_init;
1839
1840 igb_get_bus_info_pcie(hw);
1841
1842 hw->phy.autoneg_wait_to_complete = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08001843
1844 /* Copper options */
1845 if (hw->phy.media_type == e1000_media_type_copper) {
1846 hw->phy.mdix = AUTO_ALL_MODES;
1847 hw->phy.disable_polarity_correction = false;
1848 hw->phy.ms_type = e1000_ms_hw_default;
1849 }
1850
1851 if (igb_check_reset_block(hw))
1852 dev_info(&pdev->dev,
1853 "PHY reset is blocked due to SOL/IDER session.\n");
1854
1855 netdev->features = NETIF_F_SG |
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001856 NETIF_F_IP_CSUM |
Auke Kok9d5c8242008-01-24 02:22:38 -08001857 NETIF_F_HW_VLAN_TX |
1858 NETIF_F_HW_VLAN_RX |
1859 NETIF_F_HW_VLAN_FILTER;
1860
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001861 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08001862 netdev->features |= NETIF_F_TSO;
Auke Kok9d5c8242008-01-24 02:22:38 -08001863 netdev->features |= NETIF_F_TSO6;
Herbert Xu5c0999b2009-01-19 15:20:57 -08001864 netdev->features |= NETIF_F_GRO;
Alexander Duyckd3352522008-07-08 15:12:13 -07001865
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001866 netdev->vlan_features |= NETIF_F_TSO;
1867 netdev->vlan_features |= NETIF_F_TSO6;
Alexander Duyck7d8eb292009-02-06 23:18:27 +00001868 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00001869 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsher48f29ff2008-06-05 04:06:27 -07001870 netdev->vlan_features |= NETIF_F_SG;
1871
Yi Zou7b872a52010-09-22 17:57:58 +00001872 if (pci_using_dac) {
Auke Kok9d5c8242008-01-24 02:22:38 -08001873 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001874 netdev->vlan_features |= NETIF_F_HIGHDMA;
1875 }
Auke Kok9d5c8242008-01-24 02:22:38 -08001876
Alexander Duyck5b043fb2009-10-27 23:52:31 +00001877 if (hw->mac.type >= e1000_82576)
Jesse Brandeburgb9473562009-04-27 22:36:13 +00001878 netdev->features |= NETIF_F_SCTP_CSUM;
1879
Alexander Duyck330a6d62009-10-27 23:51:35 +00001880 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08001881
1882 /* before reading the NVM, reset the controller to put the device in a
1883 * known good starting state */
1884 hw->mac.ops.reset_hw(hw);
1885
1886 /* make sure the NVM is good */
1887 if (igb_validate_nvm_checksum(hw) < 0) {
1888 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1889 err = -EIO;
1890 goto err_eeprom;
1891 }
1892
1893 /* copy the MAC address out of the NVM */
1894 if (hw->mac.ops.read_mac_addr(hw))
1895 dev_err(&pdev->dev, "NVM Read Error\n");
1896
1897 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1898 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1899
1900 if (!is_valid_ether_addr(netdev->perm_addr)) {
1901 dev_err(&pdev->dev, "Invalid MAC Address\n");
1902 err = -EIO;
1903 goto err_eeprom;
1904 }
1905
Joe Perchesc061b182010-08-23 18:20:03 +00001906 setup_timer(&adapter->watchdog_timer, igb_watchdog,
Alexander Duyck0e340482009-03-20 00:17:08 +00001907 (unsigned long) adapter);
Joe Perchesc061b182010-08-23 18:20:03 +00001908 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
Alexander Duyck0e340482009-03-20 00:17:08 +00001909 (unsigned long) adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08001910
1911 INIT_WORK(&adapter->reset_task, igb_reset_task);
1912 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1913
Alexander Duyck450c87c2009-02-06 23:22:11 +00001914 /* Initialize link properties that are user-changeable */
Auke Kok9d5c8242008-01-24 02:22:38 -08001915 adapter->fc_autoneg = true;
1916 hw->mac.autoneg = true;
1917 hw->phy.autoneg_advertised = 0x2f;
1918
Alexander Duyck0cce1192009-07-23 18:10:24 +00001919 hw->fc.requested_mode = e1000_fc_default;
1920 hw->fc.current_mode = e1000_fc_default;
Auke Kok9d5c8242008-01-24 02:22:38 -08001921
Auke Kok9d5c8242008-01-24 02:22:38 -08001922 igb_validate_mdi_setting(hw);
1923
Auke Kok9d5c8242008-01-24 02:22:38 -08001924 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1925 * enable the ACPI Magic Packet filter
1926 */
1927
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001928 if (hw->bus.func == 0)
Alexander Duyck312c75a2009-02-06 23:17:47 +00001929 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
Alexander Duyck55cac242009-11-19 12:42:21 +00001930 else if (hw->mac.type == e1000_82580)
1931 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1932 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1933 &eeprom_data);
Alexander Duycka2cf8b62009-03-13 20:41:17 +00001934 else if (hw->bus.func == 1)
1935 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
Auke Kok9d5c8242008-01-24 02:22:38 -08001936
1937 if (eeprom_data & eeprom_apme_mask)
1938 adapter->eeprom_wol |= E1000_WUFC_MAG;
1939
1940 /* now that we have the eeprom settings, apply the special cases where
1941 * the eeprom may be wrong or the board simply won't support wake on
1942 * lan on a particular port */
1943 switch (pdev->device) {
1944 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1945 adapter->eeprom_wol = 0;
1946 break;
1947 case E1000_DEV_ID_82575EB_FIBER_SERDES:
Alexander Duyck2d064c02008-07-08 15:10:12 -07001948 case E1000_DEV_ID_82576_FIBER:
1949 case E1000_DEV_ID_82576_SERDES:
Auke Kok9d5c8242008-01-24 02:22:38 -08001950 /* Wake events only supported on port A for dual fiber
1951 * regardless of eeprom setting */
1952 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1953 adapter->eeprom_wol = 0;
1954 break;
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001955 case E1000_DEV_ID_82576_QUAD_COPPER:
Stefan Assmannd5aa2252010-04-09 09:51:34 +00001956 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
Alexander Duyckc8ea5ea2009-03-13 20:42:35 +00001957 /* if quad port adapter, disable WoL on all but port A */
1958 if (global_quad_port_a != 0)
1959 adapter->eeprom_wol = 0;
1960 else
1961 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1962 /* Reset for multiple quad port adapters */
1963 if (++global_quad_port_a == 4)
1964 global_quad_port_a = 0;
1965 break;
Auke Kok9d5c8242008-01-24 02:22:38 -08001966 }
1967
1968 /* initialize the wol settings based on the eeprom settings */
1969 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\e1b86d82008-11-07 20:30:37 +00001970 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Auke Kok9d5c8242008-01-24 02:22:38 -08001971
1972 /* reset the hardware with the new settings */
1973 igb_reset(adapter);
1974
1975 /* let the f/w know that the h/w is now under the control of the
1976 * driver. */
1977 igb_get_hw_control(adapter);
1978
Auke Kok9d5c8242008-01-24 02:22:38 -08001979 strcpy(netdev->name, "eth%d");
1980 err = register_netdev(netdev);
1981 if (err)
1982 goto err_register;
1983
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00001984 /* carrier off reporting is important to ethtool even BEFORE open */
1985 netif_carrier_off(netdev);
1986
Jeff Kirsher421e02f2008-10-17 11:08:31 -07001987#ifdef CONFIG_IGB_DCA
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08001988 if (dca_add_requester(&pdev->dev) == 0) {
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07001989 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001990 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001991 igb_setup_dca(adapter);
1992 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00001993
Jeb Cramerfe4506b2008-07-08 15:07:55 -07001994#endif
Auke Kok9d5c8242008-01-24 02:22:38 -08001995 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1996 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07001997 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08001998 netdev->name,
Alexander Duyck559e9c42009-10-27 23:52:50 +00001999 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
Alexander Duyckff846f52010-04-27 01:02:40 +00002000 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
Alexander Duyck559e9c42009-10-27 23:52:50 +00002001 "unknown"),
Alexander Duyck59c3de82009-03-31 20:38:00 +00002002 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2003 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2004 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2005 "unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07002006 netdev->dev_addr);
Auke Kok9d5c8242008-01-24 02:22:38 -08002007
Carolyn Wyborny9835fd72010-11-22 17:17:21 +00002008 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2009 if (ret_val)
2010 strcpy(part_str, "Unknown");
2011 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
Auke Kok9d5c8242008-01-24 02:22:38 -08002012 dev_info(&pdev->dev,
2013 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2014 adapter->msix_entries ? "MSI-X" :
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002015 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
Auke Kok9d5c8242008-01-24 02:22:38 -08002016 adapter->num_rx_queues, adapter->num_tx_queues);
Carolyn Wyborny09b068d2011-03-11 20:42:13 -08002017 switch (hw->mac.type) {
2018 case e1000_i350:
2019 igb_set_eee_i350(hw);
2020 break;
2021 default:
2022 break;
2023 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002024 return 0;
2025
2026err_register:
2027 igb_release_hw_control(adapter);
2028err_eeprom:
2029 if (!igb_check_reset_block(hw))
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08002030 igb_reset_phy(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08002031
2032 if (hw->flash_address)
2033 iounmap(hw->flash_address);
Auke Kok9d5c8242008-01-24 02:22:38 -08002034err_sw_init:
Alexander Duyck047e0032009-10-27 15:49:27 +00002035 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002036 iounmap(hw->hw_addr);
2037err_ioremap:
2038 free_netdev(netdev);
2039err_alloc_etherdev:
Alexander Duyck559e9c42009-10-27 23:52:50 +00002040 pci_release_selected_regions(pdev,
2041 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002042err_pci_reg:
2043err_dma:
2044 pci_disable_device(pdev);
2045 return err;
2046}
2047
2048/**
2049 * igb_remove - Device Removal Routine
2050 * @pdev: PCI device information struct
2051 *
2052 * igb_remove is called by the PCI subsystem to alert the driver
2053 * that it should release a PCI device. The could be caused by a
2054 * Hot-Plug event, or because the driver is going to be removed from
2055 * memory.
2056 **/
2057static void __devexit igb_remove(struct pci_dev *pdev)
2058{
2059 struct net_device *netdev = pci_get_drvdata(pdev);
2060 struct igb_adapter *adapter = netdev_priv(netdev);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002061 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08002062
Tejun Heo760141a2010-12-12 16:45:14 +01002063 /*
2064 * The watchdog timer may be rescheduled, so explicitly
2065 * disable watchdog from being rescheduled.
2066 */
Auke Kok9d5c8242008-01-24 02:22:38 -08002067 set_bit(__IGB_DOWN, &adapter->state);
2068 del_timer_sync(&adapter->watchdog_timer);
2069 del_timer_sync(&adapter->phy_info_timer);
2070
Tejun Heo760141a2010-12-12 16:45:14 +01002071 cancel_work_sync(&adapter->reset_task);
2072 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002073
Jeff Kirsher421e02f2008-10-17 11:08:31 -07002074#ifdef CONFIG_IGB_DCA
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002075 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002076 dev_info(&pdev->dev, "DCA disabled\n");
2077 dca_remove_requester(&pdev->dev);
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07002078 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08002079 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07002080 }
2081#endif
2082
Auke Kok9d5c8242008-01-24 02:22:38 -08002083 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2084 * would have already happened in close and is redundant. */
2085 igb_release_hw_control(adapter);
2086
2087 unregister_netdev(netdev);
2088
Alexander Duyck047e0032009-10-27 15:49:27 +00002089 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002090
Alexander Duyck37680112009-02-19 20:40:30 -08002091#ifdef CONFIG_PCI_IOV
2092 /* reclaim resources allocated to VFs */
2093 if (adapter->vf_data) {
2094 /* disable iov and allow time for transactions to clear */
2095 pci_disable_sriov(pdev);
2096 msleep(500);
2097
2098 kfree(adapter->vf_data);
2099 adapter->vf_data = NULL;
2100 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2101 msleep(100);
2102 dev_info(&pdev->dev, "IOV Disabled\n");
2103 }
2104#endif
Alexander Duyck559e9c42009-10-27 23:52:50 +00002105
Alexander Duyck28b07592009-02-06 23:20:31 +00002106 iounmap(hw->hw_addr);
2107 if (hw->flash_address)
2108 iounmap(hw->flash_address);
Alexander Duyck559e9c42009-10-27 23:52:50 +00002109 pci_release_selected_regions(pdev,
2110 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9d5c8242008-01-24 02:22:38 -08002111
2112 free_netdev(netdev);
2113
Frans Pop19d5afd2009-10-02 10:04:12 -07002114 pci_disable_pcie_error_reporting(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08002115
Auke Kok9d5c8242008-01-24 02:22:38 -08002116 pci_disable_device(pdev);
2117}
2118
2119/**
Alexander Duycka6b623e2009-10-27 23:47:53 +00002120 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2121 * @adapter: board private structure to initialize
2122 *
2123 * This function initializes the vf specific data storage and then attempts to
2124 * allocate the VFs. The reason for ordering it this way is because it is much
2125 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2126 * the memory for the VFs.
2127 **/
2128static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2129{
2130#ifdef CONFIG_PCI_IOV
2131 struct pci_dev *pdev = adapter->pdev;
2132
Alexander Duycka6b623e2009-10-27 23:47:53 +00002133 if (adapter->vfs_allocated_count) {
2134 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2135 sizeof(struct vf_data_storage),
2136 GFP_KERNEL);
2137 /* if allocation failed then we do not support SR-IOV */
2138 if (!adapter->vf_data) {
2139 adapter->vfs_allocated_count = 0;
2140 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2141 "Data Storage\n");
2142 }
2143 }
2144
2145 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2146 kfree(adapter->vf_data);
2147 adapter->vf_data = NULL;
2148#endif /* CONFIG_PCI_IOV */
2149 adapter->vfs_allocated_count = 0;
2150#ifdef CONFIG_PCI_IOV
2151 } else {
2152 unsigned char mac_addr[ETH_ALEN];
2153 int i;
2154 dev_info(&pdev->dev, "%d vfs allocated\n",
2155 adapter->vfs_allocated_count);
2156 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2157 random_ether_addr(mac_addr);
2158 igb_set_vf_mac(adapter, i, mac_addr);
2159 }
2160 }
2161#endif /* CONFIG_PCI_IOV */
2162}
2163
Alexander Duyck115f4592009-11-12 18:37:00 +00002164
2165/**
2166 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2167 * @adapter: board private structure to initialize
2168 *
2169 * igb_init_hw_timer initializes the function pointer and values for the hw
2170 * timer found in hardware.
2171 **/
2172static void igb_init_hw_timer(struct igb_adapter *adapter)
2173{
2174 struct e1000_hw *hw = &adapter->hw;
2175
2176 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002177 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002178 case e1000_82580:
2179 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2180 adapter->cycles.read = igb_read_clock;
2181 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2182 adapter->cycles.mult = 1;
2183 /*
2184 * The 82580 timesync updates the system timer every 8ns by 8ns
2185 * and the value cannot be shifted. Instead we need to shift
2186 * the registers to generate a 64bit timer value. As a result
2187 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2188 * 24 in order to generate a larger value for synchronization.
2189 */
2190 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2191 /* disable system timer temporarily by setting bit 31 */
2192 wr32(E1000_TSAUXC, 0x80000000);
2193 wrfl();
2194
2195 /* Set registers so that rollover occurs soon to test this. */
2196 wr32(E1000_SYSTIMR, 0x00000000);
2197 wr32(E1000_SYSTIML, 0x80000000);
2198 wr32(E1000_SYSTIMH, 0x000000FF);
2199 wrfl();
2200
2201 /* enable system timer by clearing bit 31 */
2202 wr32(E1000_TSAUXC, 0x0);
2203 wrfl();
2204
2205 timecounter_init(&adapter->clock,
2206 &adapter->cycles,
2207 ktime_to_ns(ktime_get_real()));
2208 /*
2209 * Synchronize our NIC clock against system wall clock. NIC
2210 * time stamp reading requires ~3us per sample, each sample
2211 * was pretty stable even under load => only require 10
2212 * samples for each offset comparison.
2213 */
2214 memset(&adapter->compare, 0, sizeof(adapter->compare));
2215 adapter->compare.source = &adapter->clock;
2216 adapter->compare.target = ktime_get_real;
2217 adapter->compare.num_samples = 10;
2218 timecompare_update(&adapter->compare, 0);
2219 break;
Alexander Duyck115f4592009-11-12 18:37:00 +00002220 case e1000_82576:
2221 /*
2222 * Initialize hardware timer: we keep it running just in case
2223 * that some program needs it later on.
2224 */
2225 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2226 adapter->cycles.read = igb_read_clock;
2227 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2228 adapter->cycles.mult = 1;
2229 /**
2230 * Scale the NIC clock cycle by a large factor so that
2231 * relatively small clock corrections can be added or
2232 * substracted at each clock tick. The drawbacks of a large
2233 * factor are a) that the clock register overflows more quickly
2234 * (not such a big deal) and b) that the increment per tick has
2235 * to fit into 24 bits. As a result we need to use a shift of
2236 * 19 so we can fit a value of 16 into the TIMINCA register.
2237 */
2238 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2239 wr32(E1000_TIMINCA,
2240 (1 << E1000_TIMINCA_16NS_SHIFT) |
2241 (16 << IGB_82576_TSYNC_SHIFT));
2242
2243 /* Set registers so that rollover occurs soon to test this. */
2244 wr32(E1000_SYSTIML, 0x00000000);
2245 wr32(E1000_SYSTIMH, 0xFF800000);
2246 wrfl();
2247
2248 timecounter_init(&adapter->clock,
2249 &adapter->cycles,
2250 ktime_to_ns(ktime_get_real()));
2251 /*
2252 * Synchronize our NIC clock against system wall clock. NIC
2253 * time stamp reading requires ~3us per sample, each sample
2254 * was pretty stable even under load => only require 10
2255 * samples for each offset comparison.
2256 */
2257 memset(&adapter->compare, 0, sizeof(adapter->compare));
2258 adapter->compare.source = &adapter->clock;
2259 adapter->compare.target = ktime_get_real;
2260 adapter->compare.num_samples = 10;
2261 timecompare_update(&adapter->compare, 0);
2262 break;
2263 case e1000_82575:
2264 /* 82575 does not support timesync */
2265 default:
2266 break;
2267 }
2268
2269}
2270
Alexander Duycka6b623e2009-10-27 23:47:53 +00002271/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002272 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2273 * @adapter: board private structure to initialize
2274 *
2275 * igb_sw_init initializes the Adapter private data structure.
2276 * Fields are initialized based on PCI device information and
2277 * OS network device settings (MTU size).
2278 **/
2279static int __devinit igb_sw_init(struct igb_adapter *adapter)
2280{
2281 struct e1000_hw *hw = &adapter->hw;
2282 struct net_device *netdev = adapter->netdev;
2283 struct pci_dev *pdev = adapter->pdev;
2284
2285 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2286
Alexander Duyck68fd9912008-11-20 00:48:10 -08002287 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2288 adapter->rx_ring_count = IGB_DEFAULT_RXD;
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00002289 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2290 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2291
Auke Kok9d5c8242008-01-24 02:22:38 -08002292 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2293 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2294
Eric Dumazet12dcd862010-10-15 17:27:10 +00002295 spin_lock_init(&adapter->stats64_lock);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002296#ifdef CONFIG_PCI_IOV
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002297 switch (hw->mac.type) {
2298 case e1000_82576:
2299 case e1000_i350:
Stefan Assmann9b082d72011-02-24 20:03:31 +00002300 if (max_vfs > 7) {
2301 dev_warn(&pdev->dev,
2302 "Maximum of 7 VFs per PF, using max\n");
2303 adapter->vfs_allocated_count = 7;
2304 } else
2305 adapter->vfs_allocated_count = max_vfs;
Carolyn Wyborny6b78bb12011-01-20 06:40:45 +00002306 break;
2307 default:
2308 break;
2309 }
Alexander Duycka6b623e2009-10-27 23:47:53 +00002310#endif /* CONFIG_PCI_IOV */
Alexander Duycka99955f2009-11-12 18:37:19 +00002311 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2312
2313 /*
2314 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2315 * then we should combine the queues into a queue pair in order to
2316 * conserve interrupts due to limited supply
2317 */
2318 if ((adapter->rss_queues > 4) ||
2319 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2320 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2321
Alexander Duycka6b623e2009-10-27 23:47:53 +00002322 /* This call may decrease the number of queues */
Alexander Duyck047e0032009-10-27 15:49:27 +00002323 if (igb_init_interrupt_scheme(adapter)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08002324 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2325 return -ENOMEM;
2326 }
2327
Alexander Duyck115f4592009-11-12 18:37:00 +00002328 igb_init_hw_timer(adapter);
Alexander Duycka6b623e2009-10-27 23:47:53 +00002329 igb_probe_vfs(adapter);
2330
Auke Kok9d5c8242008-01-24 02:22:38 -08002331 /* Explicitly disable IRQ since the NIC can be in any state. */
2332 igb_irq_disable(adapter);
2333
2334 set_bit(__IGB_DOWN, &adapter->state);
2335 return 0;
2336}
2337
2338/**
2339 * igb_open - Called when a network interface is made active
2340 * @netdev: network interface device structure
2341 *
2342 * Returns 0 on success, negative value on failure
2343 *
2344 * The open entry point is called when a network interface is made
2345 * active by the system (IFF_UP). At this point all resources needed
2346 * for transmit and receive operations are allocated, the interrupt
2347 * handler is registered with the OS, the watchdog timer is started,
2348 * and the stack is notified that the interface is ready.
2349 **/
2350static int igb_open(struct net_device *netdev)
2351{
2352 struct igb_adapter *adapter = netdev_priv(netdev);
2353 struct e1000_hw *hw = &adapter->hw;
2354 int err;
2355 int i;
2356
2357 /* disallow open during test */
2358 if (test_bit(__IGB_TESTING, &adapter->state))
2359 return -EBUSY;
2360
Jesse Brandeburgb168dfc2009-04-17 20:44:32 +00002361 netif_carrier_off(netdev);
2362
Auke Kok9d5c8242008-01-24 02:22:38 -08002363 /* allocate transmit descriptors */
2364 err = igb_setup_all_tx_resources(adapter);
2365 if (err)
2366 goto err_setup_tx;
2367
2368 /* allocate receive descriptors */
2369 err = igb_setup_all_rx_resources(adapter);
2370 if (err)
2371 goto err_setup_rx;
2372
Nick Nunley88a268c2010-02-17 01:01:59 +00002373 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002374
Auke Kok9d5c8242008-01-24 02:22:38 -08002375 /* before we allocate an interrupt, we must be ready to handle it.
2376 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2377 * as soon as we call pci_request_irq, so we have to setup our
2378 * clean_rx handler before we do so. */
2379 igb_configure(adapter);
2380
2381 err = igb_request_irq(adapter);
2382 if (err)
2383 goto err_req_irq;
2384
2385 /* From here on the code is the same as igb_up() */
2386 clear_bit(__IGB_DOWN, &adapter->state);
2387
Alexander Duyck047e0032009-10-27 15:49:27 +00002388 for (i = 0; i < adapter->num_q_vectors; i++) {
2389 struct igb_q_vector *q_vector = adapter->q_vector[i];
2390 napi_enable(&q_vector->napi);
2391 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002392
2393 /* Clear any pending interrupts. */
2394 rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07002395
2396 igb_irq_enable(adapter);
2397
Alexander Duyckd4960302009-10-27 15:53:45 +00002398 /* notify VFs that reset has been completed */
2399 if (adapter->vfs_allocated_count) {
2400 u32 reg_data = rd32(E1000_CTRL_EXT);
2401 reg_data |= E1000_CTRL_EXT_PFRSTD;
2402 wr32(E1000_CTRL_EXT, reg_data);
2403 }
2404
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002405 netif_tx_start_all_queues(netdev);
2406
Alexander Duyck25568a52009-10-27 23:49:59 +00002407 /* start the watchdog. */
2408 hw->mac.get_link_status = 1;
2409 schedule_work(&adapter->watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08002410
2411 return 0;
2412
2413err_req_irq:
2414 igb_release_hw_control(adapter);
Nick Nunley88a268c2010-02-17 01:01:59 +00002415 igb_power_down_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08002416 igb_free_all_rx_resources(adapter);
2417err_setup_rx:
2418 igb_free_all_tx_resources(adapter);
2419err_setup_tx:
2420 igb_reset(adapter);
2421
2422 return err;
2423}
2424
2425/**
2426 * igb_close - Disables a network interface
2427 * @netdev: network interface device structure
2428 *
2429 * Returns 0, this is not allowed to fail
2430 *
2431 * The close entry point is called when an interface is de-activated
2432 * by the OS. The hardware is still under the driver's control, but
2433 * needs to be disabled. A global MAC reset is issued to stop the
2434 * hardware, and all transmit and receive resources are freed.
2435 **/
2436static int igb_close(struct net_device *netdev)
2437{
2438 struct igb_adapter *adapter = netdev_priv(netdev);
2439
2440 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2441 igb_down(adapter);
2442
2443 igb_free_irq(adapter);
2444
2445 igb_free_all_tx_resources(adapter);
2446 igb_free_all_rx_resources(adapter);
2447
Auke Kok9d5c8242008-01-24 02:22:38 -08002448 return 0;
2449}
2450
2451/**
2452 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002453 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2454 *
2455 * Return 0 on success, negative on failure
2456 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002457int igb_setup_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002458{
Alexander Duyck59d71982010-04-27 13:09:25 +00002459 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002460 int size;
2461
2462 size = sizeof(struct igb_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002463 tx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002464 if (!tx_ring->buffer_info)
2465 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002466
2467 /* round up to nearest 4K */
Alexander Duyck85e8d002009-02-16 00:00:20 -08002468 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
Auke Kok9d5c8242008-01-24 02:22:38 -08002469 tx_ring->size = ALIGN(tx_ring->size, 4096);
2470
Alexander Duyck59d71982010-04-27 13:09:25 +00002471 tx_ring->desc = dma_alloc_coherent(dev,
2472 tx_ring->size,
2473 &tx_ring->dma,
2474 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002475
2476 if (!tx_ring->desc)
2477 goto err;
2478
Auke Kok9d5c8242008-01-24 02:22:38 -08002479 tx_ring->next_to_use = 0;
2480 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002481 return 0;
2482
2483err:
2484 vfree(tx_ring->buffer_info);
Alexander Duyck59d71982010-04-27 13:09:25 +00002485 dev_err(dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002486 "Unable to allocate memory for the transmit descriptor ring\n");
2487 return -ENOMEM;
2488}
2489
2490/**
2491 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2492 * (Descriptors) for all queues
2493 * @adapter: board private structure
2494 *
2495 * Return 0 on success, negative on failure
2496 **/
2497static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2498{
Alexander Duyck439705e2009-10-27 23:49:20 +00002499 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002500 int i, err = 0;
2501
2502 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002503 err = igb_setup_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002504 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002505 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002506 "Allocation for Tx Queue %u failed\n", i);
2507 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002508 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002509 break;
2510 }
2511 }
2512
Alexander Duycka99955f2009-11-12 18:37:19 +00002513 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002514 int r_idx = i % adapter->num_tx_queues;
Alexander Duyck3025a442010-02-17 01:02:39 +00002515 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00002516 }
Auke Kok9d5c8242008-01-24 02:22:38 -08002517 return err;
2518}
2519
2520/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002521 * igb_setup_tctl - configure the transmit control registers
2522 * @adapter: Board private structure
Auke Kok9d5c8242008-01-24 02:22:38 -08002523 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002524void igb_setup_tctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002525{
Auke Kok9d5c8242008-01-24 02:22:38 -08002526 struct e1000_hw *hw = &adapter->hw;
2527 u32 tctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002528
Alexander Duyck85b430b2009-10-27 15:50:29 +00002529 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2530 wr32(E1000_TXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002531
2532 /* Program the Transmit Control Register */
Auke Kok9d5c8242008-01-24 02:22:38 -08002533 tctl = rd32(E1000_TCTL);
2534 tctl &= ~E1000_TCTL_CT;
2535 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2536 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2537
2538 igb_config_collision_dist(hw);
2539
Auke Kok9d5c8242008-01-24 02:22:38 -08002540 /* Enable transmits */
2541 tctl |= E1000_TCTL_EN;
2542
2543 wr32(E1000_TCTL, tctl);
2544}
2545
2546/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002547 * igb_configure_tx_ring - Configure transmit ring after Reset
2548 * @adapter: board private structure
2549 * @ring: tx ring to configure
2550 *
2551 * Configure a transmit ring after a reset.
2552 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002553void igb_configure_tx_ring(struct igb_adapter *adapter,
2554 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002555{
2556 struct e1000_hw *hw = &adapter->hw;
2557 u32 txdctl;
2558 u64 tdba = ring->dma;
2559 int reg_idx = ring->reg_idx;
2560
2561 /* disable the queue */
2562 txdctl = rd32(E1000_TXDCTL(reg_idx));
2563 wr32(E1000_TXDCTL(reg_idx),
2564 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2565 wrfl();
2566 mdelay(10);
2567
2568 wr32(E1000_TDLEN(reg_idx),
2569 ring->count * sizeof(union e1000_adv_tx_desc));
2570 wr32(E1000_TDBAL(reg_idx),
2571 tdba & 0x00000000ffffffffULL);
2572 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2573
Alexander Duyckfce99e32009-10-27 15:51:27 +00002574 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2575 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2576 writel(0, ring->head);
2577 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002578
2579 txdctl |= IGB_TX_PTHRESH;
2580 txdctl |= IGB_TX_HTHRESH << 8;
2581 txdctl |= IGB_TX_WTHRESH << 16;
2582
2583 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2584 wr32(E1000_TXDCTL(reg_idx), txdctl);
2585}
2586
2587/**
2588 * igb_configure_tx - Configure transmit Unit after Reset
2589 * @adapter: board private structure
2590 *
2591 * Configure the Tx unit of the MAC after a reset.
2592 **/
2593static void igb_configure_tx(struct igb_adapter *adapter)
2594{
2595 int i;
2596
2597 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002598 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002599}
2600
2601/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002602 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
Auke Kok9d5c8242008-01-24 02:22:38 -08002603 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2604 *
2605 * Returns 0 on success, negative on failure
2606 **/
Alexander Duyck80785292009-10-27 15:51:47 +00002607int igb_setup_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08002608{
Alexander Duyck59d71982010-04-27 13:09:25 +00002609 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002610 int size, desc_len;
2611
2612 size = sizeof(struct igb_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002613 rx_ring->buffer_info = vzalloc(size);
Auke Kok9d5c8242008-01-24 02:22:38 -08002614 if (!rx_ring->buffer_info)
2615 goto err;
Auke Kok9d5c8242008-01-24 02:22:38 -08002616
2617 desc_len = sizeof(union e1000_adv_rx_desc);
2618
2619 /* Round up to nearest 4K */
2620 rx_ring->size = rx_ring->count * desc_len;
2621 rx_ring->size = ALIGN(rx_ring->size, 4096);
2622
Alexander Duyck59d71982010-04-27 13:09:25 +00002623 rx_ring->desc = dma_alloc_coherent(dev,
2624 rx_ring->size,
2625 &rx_ring->dma,
2626 GFP_KERNEL);
Auke Kok9d5c8242008-01-24 02:22:38 -08002627
2628 if (!rx_ring->desc)
2629 goto err;
2630
2631 rx_ring->next_to_clean = 0;
2632 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08002633
Auke Kok9d5c8242008-01-24 02:22:38 -08002634 return 0;
2635
2636err:
2637 vfree(rx_ring->buffer_info);
Alexander Duyck439705e2009-10-27 23:49:20 +00002638 rx_ring->buffer_info = NULL;
Alexander Duyck59d71982010-04-27 13:09:25 +00002639 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2640 " ring\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08002641 return -ENOMEM;
2642}
2643
2644/**
2645 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2646 * (Descriptors) for all queues
2647 * @adapter: board private structure
2648 *
2649 * Return 0 on success, negative on failure
2650 **/
2651static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2652{
Alexander Duyck439705e2009-10-27 23:49:20 +00002653 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08002654 int i, err = 0;
2655
2656 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00002657 err = igb_setup_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002658 if (err) {
Alexander Duyck439705e2009-10-27 23:49:20 +00002659 dev_err(&pdev->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08002660 "Allocation for Rx Queue %u failed\n", i);
2661 for (i--; i >= 0; i--)
Alexander Duyck3025a442010-02-17 01:02:39 +00002662 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002663 break;
2664 }
2665 }
2666
2667 return err;
2668}
2669
2670/**
Alexander Duyck06cf2662009-10-27 15:53:25 +00002671 * igb_setup_mrqc - configure the multiple receive queue control registers
2672 * @adapter: Board private structure
2673 **/
2674static void igb_setup_mrqc(struct igb_adapter *adapter)
2675{
2676 struct e1000_hw *hw = &adapter->hw;
2677 u32 mrqc, rxcsum;
2678 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2679 union e1000_reta {
2680 u32 dword;
2681 u8 bytes[4];
2682 } reta;
2683 static const u8 rsshash[40] = {
2684 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2685 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2686 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2687 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2688
2689 /* Fill out hash function seeds */
2690 for (j = 0; j < 10; j++) {
2691 u32 rsskey = rsshash[(j * 4)];
2692 rsskey |= rsshash[(j * 4) + 1] << 8;
2693 rsskey |= rsshash[(j * 4) + 2] << 16;
2694 rsskey |= rsshash[(j * 4) + 3] << 24;
2695 array_wr32(E1000_RSSRK(0), j, rsskey);
2696 }
2697
Alexander Duycka99955f2009-11-12 18:37:19 +00002698 num_rx_queues = adapter->rss_queues;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002699
2700 if (adapter->vfs_allocated_count) {
2701 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2702 switch (hw->mac.type) {
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00002703 case e1000_i350:
Alexander Duyck55cac242009-11-19 12:42:21 +00002704 case e1000_82580:
2705 num_rx_queues = 1;
2706 shift = 0;
2707 break;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002708 case e1000_82576:
2709 shift = 3;
2710 num_rx_queues = 2;
2711 break;
2712 case e1000_82575:
2713 shift = 2;
2714 shift2 = 6;
2715 default:
2716 break;
2717 }
2718 } else {
2719 if (hw->mac.type == e1000_82575)
2720 shift = 6;
2721 }
2722
2723 for (j = 0; j < (32 * 4); j++) {
2724 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2725 if (shift2)
2726 reta.bytes[j & 3] |= num_rx_queues << shift2;
2727 if ((j & 3) == 3)
2728 wr32(E1000_RETA(j >> 2), reta.dword);
2729 }
2730
2731 /*
2732 * Disable raw packet checksumming so that RSS hash is placed in
2733 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2734 * offloads as they are enabled by default
2735 */
2736 rxcsum = rd32(E1000_RXCSUM);
2737 rxcsum |= E1000_RXCSUM_PCSD;
2738
2739 if (adapter->hw.mac.type >= e1000_82576)
2740 /* Enable Receive Checksum Offload for SCTP */
2741 rxcsum |= E1000_RXCSUM_CRCOFL;
2742
2743 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2744 wr32(E1000_RXCSUM, rxcsum);
2745
2746 /* If VMDq is enabled then we set the appropriate mode for that, else
2747 * we default to RSS so that an RSS hash is calculated per packet even
2748 * if we are only using one queue */
2749 if (adapter->vfs_allocated_count) {
2750 if (hw->mac.type > e1000_82575) {
2751 /* Set the default pool for the PF's first queue */
2752 u32 vtctl = rd32(E1000_VT_CTL);
2753 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2754 E1000_VT_CTL_DISABLE_DEF_POOL);
2755 vtctl |= adapter->vfs_allocated_count <<
2756 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2757 wr32(E1000_VT_CTL, vtctl);
2758 }
Alexander Duycka99955f2009-11-12 18:37:19 +00002759 if (adapter->rss_queues > 1)
Alexander Duyck06cf2662009-10-27 15:53:25 +00002760 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2761 else
2762 mrqc = E1000_MRQC_ENABLE_VMDQ;
2763 } else {
2764 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2765 }
2766 igb_vmm_control(adapter);
2767
Alexander Duyck4478a9c2010-07-01 20:01:05 +00002768 /*
2769 * Generate RSS hash based on TCP port numbers and/or
2770 * IPv4/v6 src and dst addresses since UDP cannot be
2771 * hashed reliably due to IP fragmentation
2772 */
2773 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2774 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2775 E1000_MRQC_RSS_FIELD_IPV6 |
2776 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2777 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
Alexander Duyck06cf2662009-10-27 15:53:25 +00002778
2779 wr32(E1000_MRQC, mrqc);
2780}
2781
2782/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002783 * igb_setup_rctl - configure the receive control registers
2784 * @adapter: Board private structure
2785 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002786void igb_setup_rctl(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08002787{
2788 struct e1000_hw *hw = &adapter->hw;
2789 u32 rctl;
Auke Kok9d5c8242008-01-24 02:22:38 -08002790
2791 rctl = rd32(E1000_RCTL);
2792
2793 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
Alexander Duyck69d728b2008-11-25 01:04:03 -08002794 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
Auke Kok9d5c8242008-01-24 02:22:38 -08002795
Alexander Duyck69d728b2008-11-25 01:04:03 -08002796 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
Alexander Duyck28b07592009-02-06 23:20:31 +00002797 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
Auke Kok9d5c8242008-01-24 02:22:38 -08002798
Auke Kok87cb7e82008-07-08 15:08:29 -07002799 /*
2800 * enable stripping of CRC. It's unlikely this will break BMC
2801 * redirection as it did with e1000. Newer features require
2802 * that the HW strips the CRC.
Alexander Duyck73cd78f2009-02-12 18:16:59 +00002803 */
Auke Kok87cb7e82008-07-08 15:08:29 -07002804 rctl |= E1000_RCTL_SECRC;
Auke Kok9d5c8242008-01-24 02:22:38 -08002805
Alexander Duyck559e9c42009-10-27 23:52:50 +00002806 /* disable store bad packets and clear size bits. */
Alexander Duyckec54d7d2009-01-31 00:52:57 -08002807 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
Auke Kok9d5c8242008-01-24 02:22:38 -08002808
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00002809 /* enable LPE to prevent packets larger than max_frame_size */
2810 rctl |= E1000_RCTL_LPE;
Auke Kok9d5c8242008-01-24 02:22:38 -08002811
Alexander Duyck952f72a2009-10-27 15:51:07 +00002812 /* disable queue 0 to prevent tail write w/o re-config */
2813 wr32(E1000_RXDCTL(0), 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08002814
Alexander Duycke1739522009-02-19 20:39:44 -08002815 /* Attention!!! For SR-IOV PF driver operations you must enable
2816 * queue drop for all VF and PF queues to prevent head of line blocking
2817 * if an un-trusted VF does not provide descriptors to hardware.
2818 */
2819 if (adapter->vfs_allocated_count) {
Alexander Duycke1739522009-02-19 20:39:44 -08002820 /* set all queue drop enable bits */
2821 wr32(E1000_QDE, ALL_QUEUES);
Alexander Duycke1739522009-02-19 20:39:44 -08002822 }
2823
Auke Kok9d5c8242008-01-24 02:22:38 -08002824 wr32(E1000_RCTL, rctl);
2825}
2826
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002827static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2828 int vfn)
2829{
2830 struct e1000_hw *hw = &adapter->hw;
2831 u32 vmolr;
2832
2833 /* if it isn't the PF check to see if VFs are enabled and
2834 * increase the size to support vlan tags */
2835 if (vfn < adapter->vfs_allocated_count &&
2836 adapter->vf_data[vfn].vlans_enabled)
2837 size += VLAN_TAG_SIZE;
2838
2839 vmolr = rd32(E1000_VMOLR(vfn));
2840 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2841 vmolr |= size | E1000_VMOLR_LPE;
2842 wr32(E1000_VMOLR(vfn), vmolr);
2843
2844 return 0;
2845}
2846
Auke Kok9d5c8242008-01-24 02:22:38 -08002847/**
Alexander Duycke1739522009-02-19 20:39:44 -08002848 * igb_rlpml_set - set maximum receive packet size
2849 * @adapter: board private structure
2850 *
2851 * Configure maximum receivable packet size.
2852 **/
2853static void igb_rlpml_set(struct igb_adapter *adapter)
2854{
2855 u32 max_frame_size = adapter->max_frame_size;
2856 struct e1000_hw *hw = &adapter->hw;
2857 u16 pf_id = adapter->vfs_allocated_count;
2858
2859 if (adapter->vlgrp)
2860 max_frame_size += VLAN_TAG_SIZE;
2861
2862 /* if vfs are enabled we set RLPML to the largest possible request
2863 * size and set the VMOLR RLPML to the size we need */
2864 if (pf_id) {
2865 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002866 max_frame_size = MAX_JUMBO_FRAME_SIZE;
Alexander Duycke1739522009-02-19 20:39:44 -08002867 }
2868
2869 wr32(E1000_RLPML, max_frame_size);
2870}
2871
Williams, Mitch A8151d292010-02-10 01:44:24 +00002872static inline void igb_set_vmolr(struct igb_adapter *adapter,
2873 int vfn, bool aupe)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002874{
2875 struct e1000_hw *hw = &adapter->hw;
2876 u32 vmolr;
2877
2878 /*
2879 * This register exists only on 82576 and newer so if we are older then
2880 * we should exit and do nothing
2881 */
2882 if (hw->mac.type < e1000_82576)
2883 return;
2884
2885 vmolr = rd32(E1000_VMOLR(vfn));
Williams, Mitch A8151d292010-02-10 01:44:24 +00002886 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2887 if (aupe)
2888 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2889 else
2890 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002891
2892 /* clear all bits that might not be set */
2893 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2894
Alexander Duycka99955f2009-11-12 18:37:19 +00002895 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002896 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2897 /*
2898 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2899 * multicast packets
2900 */
2901 if (vfn <= adapter->vfs_allocated_count)
2902 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2903
2904 wr32(E1000_VMOLR(vfn), vmolr);
2905}
2906
Alexander Duycke1739522009-02-19 20:39:44 -08002907/**
Alexander Duyck85b430b2009-10-27 15:50:29 +00002908 * igb_configure_rx_ring - Configure a receive ring after Reset
2909 * @adapter: board private structure
2910 * @ring: receive ring to be configured
2911 *
2912 * Configure the Rx unit of the MAC after a reset.
2913 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00002914void igb_configure_rx_ring(struct igb_adapter *adapter,
2915 struct igb_ring *ring)
Alexander Duyck85b430b2009-10-27 15:50:29 +00002916{
2917 struct e1000_hw *hw = &adapter->hw;
2918 u64 rdba = ring->dma;
2919 int reg_idx = ring->reg_idx;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002920 u32 srrctl, rxdctl;
Alexander Duyck85b430b2009-10-27 15:50:29 +00002921
2922 /* disable the queue */
2923 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2924 wr32(E1000_RXDCTL(reg_idx),
2925 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2926
2927 /* Set DMA base address registers */
2928 wr32(E1000_RDBAL(reg_idx),
2929 rdba & 0x00000000ffffffffULL);
2930 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2931 wr32(E1000_RDLEN(reg_idx),
2932 ring->count * sizeof(union e1000_adv_rx_desc));
2933
2934 /* initialize head and tail */
Alexander Duyckfce99e32009-10-27 15:51:27 +00002935 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2936 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2937 writel(0, ring->head);
2938 writel(0, ring->tail);
Alexander Duyck85b430b2009-10-27 15:50:29 +00002939
Alexander Duyck952f72a2009-10-27 15:51:07 +00002940 /* set descriptor configuration */
Alexander Duyck4c844852009-10-27 15:52:07 +00002941 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2942 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
Alexander Duyck952f72a2009-10-27 15:51:07 +00002943 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2944#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2945 srrctl |= IGB_RXBUFFER_16384 >>
2946 E1000_SRRCTL_BSIZEPKT_SHIFT;
2947#else
2948 srrctl |= (PAGE_SIZE / 2) >>
2949 E1000_SRRCTL_BSIZEPKT_SHIFT;
2950#endif
2951 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2952 } else {
Alexander Duyck4c844852009-10-27 15:52:07 +00002953 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
Alexander Duyck952f72a2009-10-27 15:51:07 +00002954 E1000_SRRCTL_BSIZEPKT_SHIFT;
2955 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2956 }
Nick Nunley757b77e2010-03-26 11:36:47 +00002957 if (hw->mac.type == e1000_82580)
2958 srrctl |= E1000_SRRCTL_TIMESTAMP;
Nick Nunleye6bdb6f2010-02-17 01:03:38 +00002959 /* Only set Drop Enable if we are supporting multiple queues */
2960 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2961 srrctl |= E1000_SRRCTL_DROP_EN;
Alexander Duyck952f72a2009-10-27 15:51:07 +00002962
2963 wr32(E1000_SRRCTL(reg_idx), srrctl);
2964
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002965 /* set filtering for VMDQ pools */
Williams, Mitch A8151d292010-02-10 01:44:24 +00002966 igb_set_vmolr(adapter, reg_idx & 0x7, true);
Alexander Duyck7d5753f2009-10-27 23:47:16 +00002967
Alexander Duyck85b430b2009-10-27 15:50:29 +00002968 /* enable receive descriptor fetching */
2969 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2970 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2971 rxdctl &= 0xFFF00000;
2972 rxdctl |= IGB_RX_PTHRESH;
2973 rxdctl |= IGB_RX_HTHRESH << 8;
2974 rxdctl |= IGB_RX_WTHRESH << 16;
2975 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2976}
2977
2978/**
Auke Kok9d5c8242008-01-24 02:22:38 -08002979 * igb_configure_rx - Configure receive Unit after Reset
2980 * @adapter: board private structure
2981 *
2982 * Configure the Rx unit of the MAC after a reset.
2983 **/
2984static void igb_configure_rx(struct igb_adapter *adapter)
2985{
Hannes Eder91075842009-02-18 19:36:04 -08002986 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08002987
Alexander Duyck68d480c2009-10-05 06:33:08 +00002988 /* set UTA to appropriate mode */
2989 igb_set_uta(adapter);
2990
Alexander Duyck26ad9172009-10-05 06:32:49 +00002991 /* set the correct pool for the PF default MAC address in entry 0 */
2992 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2993 adapter->vfs_allocated_count);
2994
Alexander Duyck06cf2662009-10-27 15:53:25 +00002995 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2996 * the Base and Length of the Rx Descriptor Ring */
2997 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00002998 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08002999}
3000
3001/**
3002 * igb_free_tx_resources - Free Tx Resources per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003003 * @tx_ring: Tx descriptor ring for a specific queue
3004 *
3005 * Free all transmit software resources
3006 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003007void igb_free_tx_resources(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003008{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003009 igb_clean_tx_ring(tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003010
3011 vfree(tx_ring->buffer_info);
3012 tx_ring->buffer_info = NULL;
3013
Alexander Duyck439705e2009-10-27 23:49:20 +00003014 /* if not set, then don't free */
3015 if (!tx_ring->desc)
3016 return;
3017
Alexander Duyck59d71982010-04-27 13:09:25 +00003018 dma_free_coherent(tx_ring->dev, tx_ring->size,
3019 tx_ring->desc, tx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003020
3021 tx_ring->desc = NULL;
3022}
3023
3024/**
3025 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3026 * @adapter: board private structure
3027 *
3028 * Free all transmit software resources
3029 **/
3030static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3031{
3032 int i;
3033
3034 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003035 igb_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003036}
3037
Alexander Duyckb1a436c2009-10-27 15:54:43 +00003038void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
3039 struct igb_buffer *buffer_info)
Auke Kok9d5c8242008-01-24 02:22:38 -08003040{
Alexander Duyck6366ad32009-12-02 16:47:18 +00003041 if (buffer_info->dma) {
3042 if (buffer_info->mapped_as_page)
Alexander Duyck59d71982010-04-27 13:09:25 +00003043 dma_unmap_page(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003044 buffer_info->dma,
3045 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003046 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003047 else
Alexander Duyck59d71982010-04-27 13:09:25 +00003048 dma_unmap_single(tx_ring->dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00003049 buffer_info->dma,
3050 buffer_info->length,
Alexander Duyck59d71982010-04-27 13:09:25 +00003051 DMA_TO_DEVICE);
Alexander Duyck6366ad32009-12-02 16:47:18 +00003052 buffer_info->dma = 0;
3053 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003054 if (buffer_info->skb) {
3055 dev_kfree_skb_any(buffer_info->skb);
3056 buffer_info->skb = NULL;
3057 }
3058 buffer_info->time_stamp = 0;
Alexander Duyck6366ad32009-12-02 16:47:18 +00003059 buffer_info->length = 0;
3060 buffer_info->next_to_watch = 0;
3061 buffer_info->mapped_as_page = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08003062}
3063
3064/**
3065 * igb_clean_tx_ring - Free Tx Buffers
Auke Kok9d5c8242008-01-24 02:22:38 -08003066 * @tx_ring: ring to be cleaned
3067 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003068static void igb_clean_tx_ring(struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003069{
3070 struct igb_buffer *buffer_info;
3071 unsigned long size;
3072 unsigned int i;
3073
3074 if (!tx_ring->buffer_info)
3075 return;
3076 /* Free all the Tx ring sk_buffs */
3077
3078 for (i = 0; i < tx_ring->count; i++) {
3079 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck80785292009-10-27 15:51:47 +00003080 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08003081 }
3082
3083 size = sizeof(struct igb_buffer) * tx_ring->count;
3084 memset(tx_ring->buffer_info, 0, size);
3085
3086 /* Zero out the descriptor ring */
Auke Kok9d5c8242008-01-24 02:22:38 -08003087 memset(tx_ring->desc, 0, tx_ring->size);
3088
3089 tx_ring->next_to_use = 0;
3090 tx_ring->next_to_clean = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003091}
3092
3093/**
3094 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3095 * @adapter: board private structure
3096 **/
3097static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3098{
3099 int i;
3100
3101 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003102 igb_clean_tx_ring(adapter->tx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003103}
3104
3105/**
3106 * igb_free_rx_resources - Free Rx Resources
Auke Kok9d5c8242008-01-24 02:22:38 -08003107 * @rx_ring: ring to clean the resources from
3108 *
3109 * Free all receive software resources
3110 **/
Alexander Duyck68fd9912008-11-20 00:48:10 -08003111void igb_free_rx_resources(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003112{
Mitch Williams3b644cf2008-06-27 10:59:48 -07003113 igb_clean_rx_ring(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08003114
3115 vfree(rx_ring->buffer_info);
3116 rx_ring->buffer_info = NULL;
3117
Alexander Duyck439705e2009-10-27 23:49:20 +00003118 /* if not set, then don't free */
3119 if (!rx_ring->desc)
3120 return;
3121
Alexander Duyck59d71982010-04-27 13:09:25 +00003122 dma_free_coherent(rx_ring->dev, rx_ring->size,
3123 rx_ring->desc, rx_ring->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08003124
3125 rx_ring->desc = NULL;
3126}
3127
3128/**
3129 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3130 * @adapter: board private structure
3131 *
3132 * Free all receive software resources
3133 **/
3134static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3135{
3136 int i;
3137
3138 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003139 igb_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003140}
3141
3142/**
3143 * igb_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9d5c8242008-01-24 02:22:38 -08003144 * @rx_ring: ring to free buffers from
3145 **/
Mitch Williams3b644cf2008-06-27 10:59:48 -07003146static void igb_clean_rx_ring(struct igb_ring *rx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08003147{
3148 struct igb_buffer *buffer_info;
Auke Kok9d5c8242008-01-24 02:22:38 -08003149 unsigned long size;
3150 unsigned int i;
3151
3152 if (!rx_ring->buffer_info)
3153 return;
Alexander Duyck439705e2009-10-27 23:49:20 +00003154
Auke Kok9d5c8242008-01-24 02:22:38 -08003155 /* Free all the Rx ring sk_buffs */
3156 for (i = 0; i < rx_ring->count; i++) {
3157 buffer_info = &rx_ring->buffer_info[i];
3158 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003159 dma_unmap_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003160 buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00003161 rx_ring->rx_buffer_len,
Alexander Duyck59d71982010-04-27 13:09:25 +00003162 DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08003163 buffer_info->dma = 0;
3164 }
3165
3166 if (buffer_info->skb) {
3167 dev_kfree_skb(buffer_info->skb);
3168 buffer_info->skb = NULL;
3169 }
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003170 if (buffer_info->page_dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00003171 dma_unmap_page(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00003172 buffer_info->page_dma,
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003173 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00003174 DMA_FROM_DEVICE);
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00003175 buffer_info->page_dma = 0;
3176 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003177 if (buffer_info->page) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003178 put_page(buffer_info->page);
3179 buffer_info->page = NULL;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07003180 buffer_info->page_offset = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003181 }
3182 }
3183
Auke Kok9d5c8242008-01-24 02:22:38 -08003184 size = sizeof(struct igb_buffer) * rx_ring->count;
3185 memset(rx_ring->buffer_info, 0, size);
3186
3187 /* Zero out the descriptor ring */
3188 memset(rx_ring->desc, 0, rx_ring->size);
3189
3190 rx_ring->next_to_clean = 0;
3191 rx_ring->next_to_use = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08003192}
3193
3194/**
3195 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3196 * @adapter: board private structure
3197 **/
3198static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3199{
3200 int i;
3201
3202 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00003203 igb_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9d5c8242008-01-24 02:22:38 -08003204}
3205
3206/**
3207 * igb_set_mac - Change the Ethernet Address of the NIC
3208 * @netdev: network interface device structure
3209 * @p: pointer to an address structure
3210 *
3211 * Returns 0 on success, negative on failure
3212 **/
3213static int igb_set_mac(struct net_device *netdev, void *p)
3214{
3215 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck28b07592009-02-06 23:20:31 +00003216 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003217 struct sockaddr *addr = p;
3218
3219 if (!is_valid_ether_addr(addr->sa_data))
3220 return -EADDRNOTAVAIL;
3221
3222 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Alexander Duyck28b07592009-02-06 23:20:31 +00003223 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9d5c8242008-01-24 02:22:38 -08003224
Alexander Duyck26ad9172009-10-05 06:32:49 +00003225 /* set the correct pool for the new PF MAC address in entry 0 */
3226 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3227 adapter->vfs_allocated_count);
Alexander Duycke1739522009-02-19 20:39:44 -08003228
Auke Kok9d5c8242008-01-24 02:22:38 -08003229 return 0;
3230}
3231
3232/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00003233 * igb_write_mc_addr_list - write multicast addresses to MTA
3234 * @netdev: network interface device structure
3235 *
3236 * Writes multicast address list to the MTA hash table.
3237 * Returns: -ENOMEM on failure
3238 * 0 on no addresses written
3239 * X on writing X addresses to MTA
3240 **/
3241static int igb_write_mc_addr_list(struct net_device *netdev)
3242{
3243 struct igb_adapter *adapter = netdev_priv(netdev);
3244 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003245 struct netdev_hw_addr *ha;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003246 u8 *mta_list;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003247 int i;
3248
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003249 if (netdev_mc_empty(netdev)) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003250 /* nothing to program, so clear mc list */
3251 igb_update_mc_addr_list(hw, NULL, 0);
3252 igb_restore_vf_multicasts(adapter);
3253 return 0;
3254 }
3255
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003256 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003257 if (!mta_list)
3258 return -ENOMEM;
3259
Alexander Duyck68d480c2009-10-05 06:33:08 +00003260 /* The shared function expects a packed array of only addresses. */
Jiri Pirko48e2f182010-02-22 09:22:26 +00003261 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00003262 netdev_for_each_mc_addr(ha, netdev)
3263 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003264
Alexander Duyck68d480c2009-10-05 06:33:08 +00003265 igb_update_mc_addr_list(hw, mta_list, i);
3266 kfree(mta_list);
3267
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00003268 return netdev_mc_count(netdev);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003269}
3270
3271/**
3272 * igb_write_uc_addr_list - write unicast addresses to RAR table
3273 * @netdev: network interface device structure
3274 *
3275 * Writes unicast address list to the RAR table.
3276 * Returns: -ENOMEM on failure/insufficient address space
3277 * 0 on no addresses written
3278 * X on writing X addresses to the RAR table
3279 **/
3280static int igb_write_uc_addr_list(struct net_device *netdev)
3281{
3282 struct igb_adapter *adapter = netdev_priv(netdev);
3283 struct e1000_hw *hw = &adapter->hw;
3284 unsigned int vfn = adapter->vfs_allocated_count;
3285 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3286 int count = 0;
3287
3288 /* return ENOMEM indicating insufficient memory for addresses */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003289 if (netdev_uc_count(netdev) > rar_entries)
Alexander Duyck68d480c2009-10-05 06:33:08 +00003290 return -ENOMEM;
3291
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003292 if (!netdev_uc_empty(netdev) && rar_entries) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003293 struct netdev_hw_addr *ha;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003294
3295 netdev_for_each_uc_addr(ha, netdev) {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003296 if (!rar_entries)
3297 break;
3298 igb_rar_set_qsel(adapter, ha->addr,
3299 rar_entries--,
3300 vfn);
3301 count++;
3302 }
3303 }
3304 /* write the addresses in reverse order to avoid write combining */
3305 for (; rar_entries > 0 ; rar_entries--) {
3306 wr32(E1000_RAH(rar_entries), 0);
3307 wr32(E1000_RAL(rar_entries), 0);
3308 }
3309 wrfl();
3310
3311 return count;
3312}
3313
3314/**
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003315 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Auke Kok9d5c8242008-01-24 02:22:38 -08003316 * @netdev: network interface device structure
3317 *
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003318 * The set_rx_mode entry point is called whenever the unicast or multicast
3319 * address lists or the network interface flags are updated. This routine is
3320 * responsible for configuring the hardware for proper unicast, multicast,
Auke Kok9d5c8242008-01-24 02:22:38 -08003321 * promiscuous mode, and all-multi behavior.
3322 **/
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003323static void igb_set_rx_mode(struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08003324{
3325 struct igb_adapter *adapter = netdev_priv(netdev);
3326 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003327 unsigned int vfn = adapter->vfs_allocated_count;
3328 u32 rctl, vmolr = 0;
3329 int count;
Auke Kok9d5c8242008-01-24 02:22:38 -08003330
3331 /* Check for Promiscuous and All Multicast modes */
Auke Kok9d5c8242008-01-24 02:22:38 -08003332 rctl = rd32(E1000_RCTL);
3333
Alexander Duyck68d480c2009-10-05 06:33:08 +00003334 /* clear the effected bits */
3335 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3336
Patrick McHardy746b9f02008-07-16 20:15:45 -07003337 if (netdev->flags & IFF_PROMISC) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003338 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Alexander Duyck68d480c2009-10-05 06:33:08 +00003339 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
Patrick McHardy746b9f02008-07-16 20:15:45 -07003340 } else {
Alexander Duyck68d480c2009-10-05 06:33:08 +00003341 if (netdev->flags & IFF_ALLMULTI) {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003342 rctl |= E1000_RCTL_MPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003343 vmolr |= E1000_VMOLR_MPME;
3344 } else {
3345 /*
3346 * Write addresses to the MTA, if the attempt fails
3347 * then we should just turn on promiscous mode so
3348 * that we can at least receive multicast traffic
3349 */
3350 count = igb_write_mc_addr_list(netdev);
3351 if (count < 0) {
3352 rctl |= E1000_RCTL_MPE;
3353 vmolr |= E1000_VMOLR_MPME;
3354 } else if (count) {
3355 vmolr |= E1000_VMOLR_ROMPE;
3356 }
3357 }
3358 /*
3359 * Write addresses to available RAR registers, if there is not
3360 * sufficient space to store all the addresses then enable
3361 * unicast promiscous mode
3362 */
3363 count = igb_write_uc_addr_list(netdev);
3364 if (count < 0) {
Alexander Duyckff41f8d2009-09-03 14:48:56 +00003365 rctl |= E1000_RCTL_UPE;
Alexander Duyck68d480c2009-10-05 06:33:08 +00003366 vmolr |= E1000_VMOLR_ROPE;
3367 }
Patrick McHardy78ed11a2008-07-16 20:16:14 -07003368 rctl |= E1000_RCTL_VFE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003369 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003370 wr32(E1000_RCTL, rctl);
3371
Alexander Duyck68d480c2009-10-05 06:33:08 +00003372 /*
3373 * In order to support SR-IOV and eventually VMDq it is necessary to set
3374 * the VMOLR to enable the appropriate modes. Without this workaround
3375 * we will have issues with VLAN tag stripping not being done for frames
3376 * that are only arriving because we are the default pool
3377 */
3378 if (hw->mac.type < e1000_82576)
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003379 return;
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003380
Alexander Duyck68d480c2009-10-05 06:33:08 +00003381 vmolr |= rd32(E1000_VMOLR(vfn)) &
3382 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3383 wr32(E1000_VMOLR(vfn), vmolr);
Alexander Duyck28fc06f2009-07-23 18:08:54 +00003384 igb_restore_vf_multicasts(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003385}
3386
Greg Rose13800462010-11-06 02:08:26 +00003387static void igb_check_wvbr(struct igb_adapter *adapter)
3388{
3389 struct e1000_hw *hw = &adapter->hw;
3390 u32 wvbr = 0;
3391
3392 switch (hw->mac.type) {
3393 case e1000_82576:
3394 case e1000_i350:
3395 if (!(wvbr = rd32(E1000_WVBR)))
3396 return;
3397 break;
3398 default:
3399 break;
3400 }
3401
3402 adapter->wvbr |= wvbr;
3403}
3404
3405#define IGB_STAGGERED_QUEUE_OFFSET 8
3406
3407static void igb_spoof_check(struct igb_adapter *adapter)
3408{
3409 int j;
3410
3411 if (!adapter->wvbr)
3412 return;
3413
3414 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3415 if (adapter->wvbr & (1 << j) ||
3416 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3417 dev_warn(&adapter->pdev->dev,
3418 "Spoof event(s) detected on VF %d\n", j);
3419 adapter->wvbr &=
3420 ~((1 << j) |
3421 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3422 }
3423 }
3424}
3425
Auke Kok9d5c8242008-01-24 02:22:38 -08003426/* Need to wait a few seconds after link up to get diagnostic information from
3427 * the phy */
3428static void igb_update_phy_info(unsigned long data)
3429{
3430 struct igb_adapter *adapter = (struct igb_adapter *) data;
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08003431 igb_get_phy_info(&adapter->hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08003432}
3433
3434/**
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003435 * igb_has_link - check shared code for link and determine up/down
3436 * @adapter: pointer to driver private info
3437 **/
Nick Nunley31455352010-02-17 01:01:21 +00003438bool igb_has_link(struct igb_adapter *adapter)
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003439{
3440 struct e1000_hw *hw = &adapter->hw;
3441 bool link_active = false;
3442 s32 ret_val = 0;
3443
3444 /* get_link_status is set on LSC (link status) interrupt or
3445 * rx sequence error interrupt. get_link_status will stay
3446 * false until the e1000_check_for_link establishes link
3447 * for copper adapters ONLY
3448 */
3449 switch (hw->phy.media_type) {
3450 case e1000_media_type_copper:
3451 if (hw->mac.get_link_status) {
3452 ret_val = hw->mac.ops.check_for_link(hw);
3453 link_active = !hw->mac.get_link_status;
3454 } else {
3455 link_active = true;
3456 }
3457 break;
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003458 case e1000_media_type_internal_serdes:
3459 ret_val = hw->mac.ops.check_for_link(hw);
3460 link_active = hw->mac.serdes_has_link;
3461 break;
3462 default:
3463 case e1000_media_type_unknown:
3464 break;
3465 }
3466
3467 return link_active;
3468}
3469
3470/**
Auke Kok9d5c8242008-01-24 02:22:38 -08003471 * igb_watchdog - Timer Call-back
3472 * @data: pointer to adapter cast into an unsigned long
3473 **/
3474static void igb_watchdog(unsigned long data)
3475{
3476 struct igb_adapter *adapter = (struct igb_adapter *)data;
3477 /* Do the rest outside of interrupt context */
3478 schedule_work(&adapter->watchdog_task);
3479}
3480
3481static void igb_watchdog_task(struct work_struct *work)
3482{
3483 struct igb_adapter *adapter = container_of(work,
Alexander Duyck559e9c42009-10-27 23:52:50 +00003484 struct igb_adapter,
3485 watchdog_task);
Auke Kok9d5c8242008-01-24 02:22:38 -08003486 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08003487 struct net_device *netdev = adapter->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003488 u32 link;
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003489 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003490
Alexander Duyck4d6b7252009-02-06 23:16:24 +00003491 link = igb_has_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08003492 if (link) {
3493 if (!netif_carrier_ok(netdev)) {
3494 u32 ctrl;
Alexander Duyck330a6d62009-10-27 23:51:35 +00003495 hw->mac.ops.get_speed_and_duplex(hw,
3496 &adapter->link_speed,
3497 &adapter->link_duplex);
Auke Kok9d5c8242008-01-24 02:22:38 -08003498
3499 ctrl = rd32(E1000_CTRL);
Alexander Duyck527d47c2008-11-27 00:21:39 -08003500 /* Links status message must follow this format */
3501 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
Auke Kok9d5c8242008-01-24 02:22:38 -08003502 "Flow Control: %s\n",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003503 netdev->name,
3504 adapter->link_speed,
3505 adapter->link_duplex == FULL_DUPLEX ?
Auke Kok9d5c8242008-01-24 02:22:38 -08003506 "Full Duplex" : "Half Duplex",
Alexander Duyck559e9c42009-10-27 23:52:50 +00003507 ((ctrl & E1000_CTRL_TFCE) &&
3508 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3509 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3510 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
Auke Kok9d5c8242008-01-24 02:22:38 -08003511
Emil Tantilovd07f3e32010-03-23 18:34:57 +00003512 /* adjust timeout factor according to speed/duplex */
Auke Kok9d5c8242008-01-24 02:22:38 -08003513 adapter->tx_timeout_factor = 1;
3514 switch (adapter->link_speed) {
3515 case SPEED_10:
Auke Kok9d5c8242008-01-24 02:22:38 -08003516 adapter->tx_timeout_factor = 14;
3517 break;
3518 case SPEED_100:
Auke Kok9d5c8242008-01-24 02:22:38 -08003519 /* maybe add some timeout factor ? */
3520 break;
3521 }
3522
3523 netif_carrier_on(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08003524
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003525 igb_ping_all_vfs(adapter);
Lior Levy17dc5662011-02-08 02:28:46 +00003526 igb_check_vf_rate_limit(adapter);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003527
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003528 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003529 if (!test_bit(__IGB_DOWN, &adapter->state))
3530 mod_timer(&adapter->phy_info_timer,
3531 round_jiffies(jiffies + 2 * HZ));
3532 }
3533 } else {
3534 if (netif_carrier_ok(netdev)) {
3535 adapter->link_speed = 0;
3536 adapter->link_duplex = 0;
Alexander Duyck527d47c2008-11-27 00:21:39 -08003537 /* Links status message must follow this format */
3538 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3539 netdev->name);
Auke Kok9d5c8242008-01-24 02:22:38 -08003540 netif_carrier_off(netdev);
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003541
Alexander Duyck4ae196d2009-02-19 20:40:07 -08003542 igb_ping_all_vfs(adapter);
3543
Alexander Duyck4b1a9872009-02-06 23:19:50 +00003544 /* link state has changed, schedule phy info update */
Auke Kok9d5c8242008-01-24 02:22:38 -08003545 if (!test_bit(__IGB_DOWN, &adapter->state))
3546 mod_timer(&adapter->phy_info_timer,
3547 round_jiffies(jiffies + 2 * HZ));
3548 }
3549 }
3550
Eric Dumazet12dcd862010-10-15 17:27:10 +00003551 spin_lock(&adapter->stats64_lock);
3552 igb_update_stats(adapter, &adapter->stats64);
3553 spin_unlock(&adapter->stats64_lock);
Auke Kok9d5c8242008-01-24 02:22:38 -08003554
Alexander Duyckdbabb062009-11-12 18:38:16 +00003555 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00003556 struct igb_ring *tx_ring = adapter->tx_ring[i];
Alexander Duyckdbabb062009-11-12 18:38:16 +00003557 if (!netif_carrier_ok(netdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003558 /* We've lost link, so the controller stops DMA,
3559 * but we've got queued Tx work that's never going
3560 * to get done, so reset controller to flush Tx.
3561 * (Do the reset outside of interrupt context). */
Alexander Duyckdbabb062009-11-12 18:38:16 +00003562 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3563 adapter->tx_timeout_count++;
3564 schedule_work(&adapter->reset_task);
3565 /* return immediately since reset is imminent */
3566 return;
3567 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003568 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003569
Alexander Duyckdbabb062009-11-12 18:38:16 +00003570 /* Force detection of hung controller every watchdog period */
3571 tx_ring->detect_tx_hung = true;
3572 }
Alexander Duyckf7ba2052009-10-27 23:48:51 +00003573
Auke Kok9d5c8242008-01-24 02:22:38 -08003574 /* Cause software interrupt to ensure rx ring is cleaned */
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003575 if (adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003576 u32 eics = 0;
3577 for (i = 0; i < adapter->num_q_vectors; i++) {
3578 struct igb_q_vector *q_vector = adapter->q_vector[i];
3579 eics |= q_vector->eims_value;
3580 }
Alexander Duyck7a6ea552008-08-26 04:25:03 -07003581 wr32(E1000_EICS, eics);
3582 } else {
3583 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3584 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003585
Greg Rose13800462010-11-06 02:08:26 +00003586 igb_spoof_check(adapter);
3587
Auke Kok9d5c8242008-01-24 02:22:38 -08003588 /* Reset the timer */
3589 if (!test_bit(__IGB_DOWN, &adapter->state))
3590 mod_timer(&adapter->watchdog_timer,
3591 round_jiffies(jiffies + 2 * HZ));
3592}
3593
3594enum latency_range {
3595 lowest_latency = 0,
3596 low_latency = 1,
3597 bulk_latency = 2,
3598 latency_invalid = 255
3599};
3600
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003601/**
3602 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3603 *
3604 * Stores a new ITR value based on strictly on packet size. This
3605 * algorithm is less sophisticated than that used in igb_update_itr,
3606 * due to the difficulty of synchronizing statistics across multiple
Stefan Weileef35c22010-08-06 21:11:15 +02003607 * receive rings. The divisors and thresholds used by this function
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003608 * were determined based on theoretical maximum wire speed and testing
3609 * data, in order to minimize response time while increasing bulk
3610 * throughput.
3611 * This functionality is controlled by the InterruptThrottleRate module
3612 * parameter (see igb_param.c)
3613 * NOTE: This function is called only when operating in a multiqueue
3614 * receive environment.
Alexander Duyck047e0032009-10-27 15:49:27 +00003615 * @q_vector: pointer to q_vector
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003616 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00003617static void igb_update_ring_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08003618{
Alexander Duyck047e0032009-10-27 15:49:27 +00003619 int new_val = q_vector->itr_val;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003620 int avg_wire_size = 0;
Alexander Duyck047e0032009-10-27 15:49:27 +00003621 struct igb_adapter *adapter = q_vector->adapter;
Eric Dumazet12dcd862010-10-15 17:27:10 +00003622 struct igb_ring *ring;
3623 unsigned int packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08003624
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003625 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3626 * ints/sec - ITR timer value of 120 ticks.
3627 */
3628 if (adapter->link_speed != SPEED_1000) {
Alexander Duyck047e0032009-10-27 15:49:27 +00003629 new_val = 976;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003630 goto set_itr_val;
3631 }
Alexander Duyck047e0032009-10-27 15:49:27 +00003632
Eric Dumazet12dcd862010-10-15 17:27:10 +00003633 ring = q_vector->rx_ring;
3634 if (ring) {
3635 packets = ACCESS_ONCE(ring->total_packets);
3636
3637 if (packets)
3638 avg_wire_size = ring->total_bytes / packets;
Alexander Duyck047e0032009-10-27 15:49:27 +00003639 }
3640
Eric Dumazet12dcd862010-10-15 17:27:10 +00003641 ring = q_vector->tx_ring;
3642 if (ring) {
3643 packets = ACCESS_ONCE(ring->total_packets);
3644
3645 if (packets)
3646 avg_wire_size = max_t(u32, avg_wire_size,
3647 ring->total_bytes / packets);
Alexander Duyck047e0032009-10-27 15:49:27 +00003648 }
3649
3650 /* if avg_wire_size isn't set no work was done */
3651 if (!avg_wire_size)
3652 goto clear_counts;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003653
3654 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3655 avg_wire_size += 24;
3656
3657 /* Don't starve jumbo frames */
3658 avg_wire_size = min(avg_wire_size, 3000);
3659
3660 /* Give a little boost to mid-size frames */
3661 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3662 new_val = avg_wire_size / 3;
3663 else
3664 new_val = avg_wire_size / 2;
3665
Nick Nunleyabe1c362010-02-17 01:03:19 +00003666 /* when in itr mode 3 do not exceed 20K ints/sec */
3667 if (adapter->rx_itr_setting == 3 && new_val < 196)
3668 new_val = 196;
3669
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003670set_itr_val:
Alexander Duyck047e0032009-10-27 15:49:27 +00003671 if (new_val != q_vector->itr_val) {
3672 q_vector->itr_val = new_val;
3673 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003674 }
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003675clear_counts:
Alexander Duyck047e0032009-10-27 15:49:27 +00003676 if (q_vector->rx_ring) {
3677 q_vector->rx_ring->total_bytes = 0;
3678 q_vector->rx_ring->total_packets = 0;
3679 }
3680 if (q_vector->tx_ring) {
3681 q_vector->tx_ring->total_bytes = 0;
3682 q_vector->tx_ring->total_packets = 0;
3683 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003684}
3685
3686/**
3687 * igb_update_itr - update the dynamic ITR value based on statistics
3688 * Stores a new ITR value based on packets and byte
3689 * counts during the last interrupt. The advantage of per interrupt
3690 * computation is faster updates and more accurate ITR for the current
3691 * traffic pattern. Constants in this function were computed
3692 * based on theoretical maximum wire speed and thresholds were set based
3693 * on testing data as well as attempting to minimize response time
3694 * while increasing bulk throughput.
3695 * this functionality is controlled by the InterruptThrottleRate module
3696 * parameter (see igb_param.c)
3697 * NOTE: These calculations are only valid when operating in a single-
3698 * queue environment.
3699 * @adapter: pointer to adapter
Alexander Duyck047e0032009-10-27 15:49:27 +00003700 * @itr_setting: current q_vector->itr_val
Auke Kok9d5c8242008-01-24 02:22:38 -08003701 * @packets: the number of packets during this measurement interval
3702 * @bytes: the number of bytes during this measurement interval
3703 **/
3704static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3705 int packets, int bytes)
3706{
3707 unsigned int retval = itr_setting;
3708
3709 if (packets == 0)
3710 goto update_itr_done;
3711
3712 switch (itr_setting) {
3713 case lowest_latency:
3714 /* handle TSO and jumbo frames */
3715 if (bytes/packets > 8000)
3716 retval = bulk_latency;
3717 else if ((packets < 5) && (bytes > 512))
3718 retval = low_latency;
3719 break;
3720 case low_latency: /* 50 usec aka 20000 ints/s */
3721 if (bytes > 10000) {
3722 /* this if handles the TSO accounting */
3723 if (bytes/packets > 8000) {
3724 retval = bulk_latency;
3725 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3726 retval = bulk_latency;
3727 } else if ((packets > 35)) {
3728 retval = lowest_latency;
3729 }
3730 } else if (bytes/packets > 2000) {
3731 retval = bulk_latency;
3732 } else if (packets <= 2 && bytes < 512) {
3733 retval = lowest_latency;
3734 }
3735 break;
3736 case bulk_latency: /* 250 usec aka 4000 ints/s */
3737 if (bytes > 25000) {
3738 if (packets > 35)
3739 retval = low_latency;
Alexander Duyck1e5c3d22009-02-12 18:17:21 +00003740 } else if (bytes < 1500) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003741 retval = low_latency;
3742 }
3743 break;
3744 }
3745
3746update_itr_done:
3747 return retval;
3748}
3749
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003750static void igb_set_itr(struct igb_adapter *adapter)
Auke Kok9d5c8242008-01-24 02:22:38 -08003751{
Alexander Duyck047e0032009-10-27 15:49:27 +00003752 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08003753 u16 current_itr;
Alexander Duyck047e0032009-10-27 15:49:27 +00003754 u32 new_itr = q_vector->itr_val;
Auke Kok9d5c8242008-01-24 02:22:38 -08003755
3756 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3757 if (adapter->link_speed != SPEED_1000) {
3758 current_itr = 0;
3759 new_itr = 4000;
3760 goto set_itr_now;
3761 }
3762
3763 adapter->rx_itr = igb_update_itr(adapter,
3764 adapter->rx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003765 q_vector->rx_ring->total_packets,
3766 q_vector->rx_ring->total_bytes);
Auke Kok9d5c8242008-01-24 02:22:38 -08003767
Alexander Duyck047e0032009-10-27 15:49:27 +00003768 adapter->tx_itr = igb_update_itr(adapter,
3769 adapter->tx_itr,
Alexander Duyck3025a442010-02-17 01:02:39 +00003770 q_vector->tx_ring->total_packets,
3771 q_vector->tx_ring->total_bytes);
Alexander Duyck047e0032009-10-27 15:49:27 +00003772 current_itr = max(adapter->rx_itr, adapter->tx_itr);
Auke Kok9d5c8242008-01-24 02:22:38 -08003773
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003774 /* conservative mode (itr 3) eliminates the lowest_latency setting */
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00003775 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003776 current_itr = low_latency;
3777
Auke Kok9d5c8242008-01-24 02:22:38 -08003778 switch (current_itr) {
3779 /* counts and packets in update_itr are dependent on these numbers */
3780 case lowest_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003781 new_itr = 56; /* aka 70,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003782 break;
3783 case low_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003784 new_itr = 196; /* aka 20,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003785 break;
3786 case bulk_latency:
Alexander Duyck78b1f6072009-04-23 11:20:29 +00003787 new_itr = 980; /* aka 4,000 ints/sec */
Auke Kok9d5c8242008-01-24 02:22:38 -08003788 break;
3789 default:
3790 break;
3791 }
3792
3793set_itr_now:
Alexander Duyck3025a442010-02-17 01:02:39 +00003794 q_vector->rx_ring->total_bytes = 0;
3795 q_vector->rx_ring->total_packets = 0;
3796 q_vector->tx_ring->total_bytes = 0;
3797 q_vector->tx_ring->total_packets = 0;
Alexander Duyck6eb5a7f2008-07-08 15:14:44 -07003798
Alexander Duyck047e0032009-10-27 15:49:27 +00003799 if (new_itr != q_vector->itr_val) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003800 /* this attempts to bias the interrupt rate towards Bulk
3801 * by adding intermediate steps when interrupt rate is
3802 * increasing */
Alexander Duyck047e0032009-10-27 15:49:27 +00003803 new_itr = new_itr > q_vector->itr_val ?
3804 max((new_itr * q_vector->itr_val) /
3805 (new_itr + (q_vector->itr_val >> 2)),
3806 new_itr) :
Auke Kok9d5c8242008-01-24 02:22:38 -08003807 new_itr;
3808 /* Don't write the value here; it resets the adapter's
3809 * internal timer, and causes us to delay far longer than
3810 * we should between interrupts. Instead, we write the ITR
3811 * value at the beginning of the next interrupt so the timing
3812 * ends up being correct.
3813 */
Alexander Duyck047e0032009-10-27 15:49:27 +00003814 q_vector->itr_val = new_itr;
3815 q_vector->set_itr = 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08003816 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003817}
3818
Auke Kok9d5c8242008-01-24 02:22:38 -08003819#define IGB_TX_FLAGS_CSUM 0x00000001
3820#define IGB_TX_FLAGS_VLAN 0x00000002
3821#define IGB_TX_FLAGS_TSO 0x00000004
3822#define IGB_TX_FLAGS_IPV4 0x00000008
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003823#define IGB_TX_FLAGS_TSTAMP 0x00000010
3824#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3825#define IGB_TX_FLAGS_VLAN_SHIFT 16
Auke Kok9d5c8242008-01-24 02:22:38 -08003826
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003827static inline int igb_tso_adv(struct igb_ring *tx_ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08003828 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3829{
3830 struct e1000_adv_tx_context_desc *context_desc;
3831 unsigned int i;
3832 int err;
3833 struct igb_buffer *buffer_info;
3834 u32 info = 0, tu_cmd = 0;
Nick Nunley91d4ee32010-02-17 01:04:56 +00003835 u32 mss_l4len_idx;
3836 u8 l4len;
Auke Kok9d5c8242008-01-24 02:22:38 -08003837
3838 if (skb_header_cloned(skb)) {
3839 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3840 if (err)
3841 return err;
3842 }
3843
3844 l4len = tcp_hdrlen(skb);
3845 *hdr_len += l4len;
3846
3847 if (skb->protocol == htons(ETH_P_IP)) {
3848 struct iphdr *iph = ip_hdr(skb);
3849 iph->tot_len = 0;
3850 iph->check = 0;
3851 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3852 iph->daddr, 0,
3853 IPPROTO_TCP,
3854 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08003855 } else if (skb_is_gso_v6(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08003856 ipv6_hdr(skb)->payload_len = 0;
3857 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3858 &ipv6_hdr(skb)->daddr,
3859 0, IPPROTO_TCP, 0);
3860 }
3861
3862 i = tx_ring->next_to_use;
3863
3864 buffer_info = &tx_ring->buffer_info[i];
3865 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3866 /* VLAN MACLEN IPLEN */
3867 if (tx_flags & IGB_TX_FLAGS_VLAN)
3868 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3869 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3870 *hdr_len += skb_network_offset(skb);
3871 info |= skb_network_header_len(skb);
3872 *hdr_len += skb_network_header_len(skb);
3873 context_desc->vlan_macip_lens = cpu_to_le32(info);
3874
3875 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3876 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3877
3878 if (skb->protocol == htons(ETH_P_IP))
3879 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3880 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3881
3882 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3883
3884 /* MSS L4LEN IDX */
3885 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3886 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3887
Alexander Duyck73cd78f2009-02-12 18:16:59 +00003888 /* For 82575, context index must be unique per ring. */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003889 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3890 mss_l4len_idx |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08003891
3892 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3893 context_desc->seqnum_seed = 0;
3894
3895 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003896 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003897 buffer_info->dma = 0;
3898 i++;
3899 if (i == tx_ring->count)
3900 i = 0;
3901
3902 tx_ring->next_to_use = i;
3903
3904 return true;
3905}
3906
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003907static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3908 struct sk_buff *skb, u32 tx_flags)
Auke Kok9d5c8242008-01-24 02:22:38 -08003909{
3910 struct e1000_adv_tx_context_desc *context_desc;
Alexander Duyck59d71982010-04-27 13:09:25 +00003911 struct device *dev = tx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08003912 struct igb_buffer *buffer_info;
3913 u32 info = 0, tu_cmd = 0;
Alexander Duyck80785292009-10-27 15:51:47 +00003914 unsigned int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003915
3916 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3917 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3918 i = tx_ring->next_to_use;
3919 buffer_info = &tx_ring->buffer_info[i];
3920 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3921
3922 if (tx_flags & IGB_TX_FLAGS_VLAN)
3923 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00003924
Auke Kok9d5c8242008-01-24 02:22:38 -08003925 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3926 if (skb->ip_summed == CHECKSUM_PARTIAL)
3927 info |= skb_network_header_len(skb);
3928
3929 context_desc->vlan_macip_lens = cpu_to_le32(info);
3930
3931 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3932
3933 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Arthur Jonesfa4a7ef2009-03-21 16:55:07 -07003934 __be16 protocol;
3935
3936 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3937 const struct vlan_ethhdr *vhdr =
3938 (const struct vlan_ethhdr*)skb->data;
3939
3940 protocol = vhdr->h_vlan_encapsulated_proto;
3941 } else {
3942 protocol = skb->protocol;
3943 }
3944
3945 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08003946 case cpu_to_be16(ETH_P_IP):
Auke Kok9d5c8242008-01-24 02:22:38 -08003947 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003948 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3949 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003950 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3951 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003952 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08003953 case cpu_to_be16(ETH_P_IPV6):
Mitch Williams44b0cda2008-03-07 10:32:13 -08003954 /* XXX what about other V6 headers?? */
3955 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3956 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburgb9473562009-04-27 22:36:13 +00003957 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3958 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
Mitch Williams44b0cda2008-03-07 10:32:13 -08003959 break;
3960 default:
3961 if (unlikely(net_ratelimit()))
Alexander Duyck59d71982010-04-27 13:09:25 +00003962 dev_warn(dev,
Mitch Williams44b0cda2008-03-07 10:32:13 -08003963 "partial checksum but proto=%x!\n",
3964 skb->protocol);
3965 break;
3966 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003967 }
3968
3969 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3970 context_desc->seqnum_seed = 0;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003971 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07003972 context_desc->mss_l4len_idx =
Alexander Duyck85ad76b2009-10-27 15:52:46 +00003973 cpu_to_le32(tx_ring->reg_idx << 4);
Auke Kok9d5c8242008-01-24 02:22:38 -08003974
3975 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003976 buffer_info->next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08003977 buffer_info->dma = 0;
3978
3979 i++;
3980 if (i == tx_ring->count)
3981 i = 0;
3982 tx_ring->next_to_use = i;
3983
3984 return true;
3985 }
Auke Kok9d5c8242008-01-24 02:22:38 -08003986 return false;
3987}
3988
3989#define IGB_MAX_TXD_PWR 16
3990#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3991
Alexander Duyck80785292009-10-27 15:51:47 +00003992static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08003993 unsigned int first)
Auke Kok9d5c8242008-01-24 02:22:38 -08003994{
3995 struct igb_buffer *buffer_info;
Alexander Duyck59d71982010-04-27 13:09:25 +00003996 struct device *dev = tx_ring->dev;
Nick Nunley28739572010-05-04 21:58:07 +00003997 unsigned int hlen = skb_headlen(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08003998 unsigned int count = 0, i;
3999 unsigned int f;
Nick Nunley28739572010-05-04 21:58:07 +00004000 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
Auke Kok9d5c8242008-01-24 02:22:38 -08004001
4002 i = tx_ring->next_to_use;
4003
4004 buffer_info = &tx_ring->buffer_info[i];
Nick Nunley28739572010-05-04 21:58:07 +00004005 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4006 buffer_info->length = hlen;
Auke Kok9d5c8242008-01-24 02:22:38 -08004007 /* set time_stamp *before* dma to help avoid a possible race */
4008 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004009 buffer_info->next_to_watch = i;
Nick Nunley28739572010-05-04 21:58:07 +00004010 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
Alexander Duyck59d71982010-04-27 13:09:25 +00004011 DMA_TO_DEVICE);
4012 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004013 goto dma_error;
Auke Kok9d5c8242008-01-24 02:22:38 -08004014
4015 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Nick Nunley28739572010-05-04 21:58:07 +00004016 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
4017 unsigned int len = frag->size;
Auke Kok9d5c8242008-01-24 02:22:38 -08004018
Alexander Duyck85811452010-01-23 01:35:00 -08004019 count++;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004020 i++;
4021 if (i == tx_ring->count)
4022 i = 0;
4023
Auke Kok9d5c8242008-01-24 02:22:38 -08004024 buffer_info = &tx_ring->buffer_info[i];
4025 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4026 buffer_info->length = len;
4027 buffer_info->time_stamp = jiffies;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004028 buffer_info->next_to_watch = i;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004029 buffer_info->mapped_as_page = true;
Alexander Duyck59d71982010-04-27 13:09:25 +00004030 buffer_info->dma = dma_map_page(dev,
Alexander Duyck6366ad32009-12-02 16:47:18 +00004031 frag->page,
4032 frag->page_offset,
4033 len,
Alexander Duyck59d71982010-04-27 13:09:25 +00004034 DMA_TO_DEVICE);
4035 if (dma_mapping_error(dev, buffer_info->dma))
Alexander Duyck6366ad32009-12-02 16:47:18 +00004036 goto dma_error;
4037
Auke Kok9d5c8242008-01-24 02:22:38 -08004038 }
4039
Auke Kok9d5c8242008-01-24 02:22:38 -08004040 tx_ring->buffer_info[i].skb = skb;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004041 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
Nick Nunley28739572010-05-04 21:58:07 +00004042 /* multiply data chunks by size of headers */
4043 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
4044 tx_ring->buffer_info[i].gso_segs = gso_segs;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004045 tx_ring->buffer_info[first].next_to_watch = i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004046
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004047 return ++count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004048
4049dma_error:
Alexander Duyck59d71982010-04-27 13:09:25 +00004050 dev_err(dev, "TX DMA map failed\n");
Alexander Duyck6366ad32009-12-02 16:47:18 +00004051
4052 /* clear timestamp and dma mappings for failed buffer_info mapping */
4053 buffer_info->dma = 0;
4054 buffer_info->time_stamp = 0;
4055 buffer_info->length = 0;
4056 buffer_info->next_to_watch = 0;
4057 buffer_info->mapped_as_page = false;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004058
4059 /* clear timestamp and dma mappings for remaining portion of packet */
Nick Nunleya77ff702010-02-17 01:06:16 +00004060 while (count--) {
4061 if (i == 0)
4062 i = tx_ring->count;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004063 i--;
Alexander Duyck6366ad32009-12-02 16:47:18 +00004064 buffer_info = &tx_ring->buffer_info[i];
4065 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4066 }
4067
4068 return 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004069}
4070
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004071static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
Nick Nunley91d4ee32010-02-17 01:04:56 +00004072 u32 tx_flags, int count, u32 paylen,
Auke Kok9d5c8242008-01-24 02:22:38 -08004073 u8 hdr_len)
4074{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004075 union e1000_adv_tx_desc *tx_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004076 struct igb_buffer *buffer_info;
4077 u32 olinfo_status = 0, cmd_type_len;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004078 unsigned int i = tx_ring->next_to_use;
Auke Kok9d5c8242008-01-24 02:22:38 -08004079
4080 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
4081 E1000_ADVTXD_DCMD_DEXT);
4082
4083 if (tx_flags & IGB_TX_FLAGS_VLAN)
4084 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
4085
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004086 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4087 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
4088
Auke Kok9d5c8242008-01-24 02:22:38 -08004089 if (tx_flags & IGB_TX_FLAGS_TSO) {
4090 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
4091
4092 /* insert tcp checksum */
4093 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4094
4095 /* insert ip checksum */
4096 if (tx_flags & IGB_TX_FLAGS_IPV4)
4097 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4098
4099 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
4100 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4101 }
4102
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004103 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4104 (tx_flags & (IGB_TX_FLAGS_CSUM |
4105 IGB_TX_FLAGS_TSO |
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004106 IGB_TX_FLAGS_VLAN)))
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004107 olinfo_status |= tx_ring->reg_idx << 4;
Auke Kok9d5c8242008-01-24 02:22:38 -08004108
4109 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4110
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004111 do {
Auke Kok9d5c8242008-01-24 02:22:38 -08004112 buffer_info = &tx_ring->buffer_info[i];
4113 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4114 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4115 tx_desc->read.cmd_type_len =
4116 cpu_to_le32(cmd_type_len | buffer_info->length);
4117 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004118 count--;
Auke Kok9d5c8242008-01-24 02:22:38 -08004119 i++;
4120 if (i == tx_ring->count)
4121 i = 0;
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004122 } while (count > 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08004123
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004124 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
Auke Kok9d5c8242008-01-24 02:22:38 -08004125 /* Force memory writes to complete before letting h/w
4126 * know there are new descriptors to fetch. (Only
4127 * applicable for weak-ordered memory model archs,
4128 * such as IA-64). */
4129 wmb();
4130
4131 tx_ring->next_to_use = i;
Alexander Duyckfce99e32009-10-27 15:51:27 +00004132 writel(i, tx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08004133 /* we need this if more than one processor can write to our tail
4134 * at a time, it syncronizes IO on IA64/Altix systems */
4135 mmiowb();
4136}
4137
Alexander Duycke694e962009-10-27 15:53:06 +00004138static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004139{
Alexander Duycke694e962009-10-27 15:53:06 +00004140 struct net_device *netdev = tx_ring->netdev;
4141
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004142 netif_stop_subqueue(netdev, tx_ring->queue_index);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004143
Auke Kok9d5c8242008-01-24 02:22:38 -08004144 /* Herbert's original patch had:
4145 * smp_mb__after_netif_stop_queue();
4146 * but since that doesn't exist yet, just open code it. */
4147 smp_mb();
4148
4149 /* We need to check again in a case another CPU has just
4150 * made room available. */
Alexander Duyckc493ea42009-03-20 00:16:50 +00004151 if (igb_desc_unused(tx_ring) < size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004152 return -EBUSY;
4153
4154 /* A reprieve! */
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004155 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00004156
4157 u64_stats_update_begin(&tx_ring->tx_syncp2);
4158 tx_ring->tx_stats.restart_queue2++;
4159 u64_stats_update_end(&tx_ring->tx_syncp2);
4160
Auke Kok9d5c8242008-01-24 02:22:38 -08004161 return 0;
4162}
4163
Nick Nunley717ba0892010-02-17 01:04:18 +00004164static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004165{
Alexander Duyckc493ea42009-03-20 00:16:50 +00004166 if (igb_desc_unused(tx_ring) >= size)
Auke Kok9d5c8242008-01-24 02:22:38 -08004167 return 0;
Alexander Duycke694e962009-10-27 15:53:06 +00004168 return __igb_maybe_stop_tx(tx_ring, size);
Auke Kok9d5c8242008-01-24 02:22:38 -08004169}
4170
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004171netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4172 struct igb_ring *tx_ring)
Auke Kok9d5c8242008-01-24 02:22:38 -08004173{
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004174 int tso = 0, count;
Nick Nunley91d4ee32010-02-17 01:04:56 +00004175 u32 tx_flags = 0;
4176 u16 first;
4177 u8 hdr_len = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08004178
Auke Kok9d5c8242008-01-24 02:22:38 -08004179 /* need: 1 descriptor per page,
4180 * + 2 desc gap to keep tail from touching head,
4181 * + 1 desc for skb->data,
4182 * + 1 desc for context descriptor,
4183 * otherwise try next time */
Alexander Duycke694e962009-10-27 15:53:06 +00004184 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004185 /* this is a hard error */
Auke Kok9d5c8242008-01-24 02:22:38 -08004186 return NETDEV_TX_BUSY;
4187 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004188
Oliver Hartkopp2244d072010-08-17 08:59:14 +00004189 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4190 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004191 tx_flags |= IGB_TX_FLAGS_TSTAMP;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00004192 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004193
Jesse Grosseab6d182010-10-20 13:56:03 +00004194 if (vlan_tx_tag_present(skb)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004195 tx_flags |= IGB_TX_FLAGS_VLAN;
4196 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4197 }
4198
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004199 if (skb->protocol == htons(ETH_P_IP))
4200 tx_flags |= IGB_TX_FLAGS_IPV4;
4201
Alexander Duyck0e014cb2008-12-26 01:33:18 -08004202 first = tx_ring->next_to_use;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004203 if (skb_is_gso(skb)) {
4204 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004205
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004206 if (tso < 0) {
4207 dev_kfree_skb_any(skb);
4208 return NETDEV_TX_OK;
4209 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004210 }
4211
4212 if (tso)
4213 tx_flags |= IGB_TX_FLAGS_TSO;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004214 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
Alexander Duyckbc1cbd32009-02-13 14:45:17 +00004215 (skb->ip_summed == CHECKSUM_PARTIAL))
4216 tx_flags |= IGB_TX_FLAGS_CSUM;
Auke Kok9d5c8242008-01-24 02:22:38 -08004217
Alexander Duyck65689fe2009-03-20 00:17:43 +00004218 /*
Alexander Duyckcdfd01fc2009-10-27 23:50:57 +00004219 * count reflects descriptors mapped, if 0 or less then mapping error
Alexander Duyck65689fe2009-03-20 00:17:43 +00004220 * has occured and we need to rewind the descriptor queue
4221 */
Alexander Duyck80785292009-10-27 15:51:47 +00004222 count = igb_tx_map_adv(tx_ring, skb, first);
Alexander Duyck6366ad32009-12-02 16:47:18 +00004223 if (!count) {
Alexander Duyck65689fe2009-03-20 00:17:43 +00004224 dev_kfree_skb_any(skb);
4225 tx_ring->buffer_info[first].time_stamp = 0;
4226 tx_ring->next_to_use = first;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004227 return NETDEV_TX_OK;
Alexander Duyck65689fe2009-03-20 00:17:43 +00004228 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004229
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004230 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
4231
4232 /* Make sure there is space in the ring for the next send. */
Alexander Duycke694e962009-10-27 15:53:06 +00004233 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
Alexander Duyck85ad76b2009-10-27 15:52:46 +00004234
Auke Kok9d5c8242008-01-24 02:22:38 -08004235 return NETDEV_TX_OK;
4236}
4237
Stephen Hemminger3b29a562009-08-31 19:50:55 +00004238static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
4239 struct net_device *netdev)
Auke Kok9d5c8242008-01-24 02:22:38 -08004240{
4241 struct igb_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004242 struct igb_ring *tx_ring;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004243 int r_idx = 0;
Alexander Duyckb1a436c2009-10-27 15:54:43 +00004244
4245 if (test_bit(__IGB_DOWN, &adapter->state)) {
4246 dev_kfree_skb_any(skb);
4247 return NETDEV_TX_OK;
4248 }
4249
4250 if (skb->len <= 0) {
4251 dev_kfree_skb_any(skb);
4252 return NETDEV_TX_OK;
4253 }
4254
Alexander Duyck1bfaf072009-02-19 20:39:23 -08004255 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004256 tx_ring = adapter->multi_tx_table[r_idx];
Auke Kok9d5c8242008-01-24 02:22:38 -08004257
4258 /* This goes back to the question of how to logically map a tx queue
4259 * to a flow. Right now, performance is impacted slightly negatively
4260 * if using multiple tx queues. If the stack breaks away from a
4261 * single qdisc implementation, we can look at this again. */
Alexander Duycke694e962009-10-27 15:53:06 +00004262 return igb_xmit_frame_ring_adv(skb, tx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08004263}
4264
4265/**
4266 * igb_tx_timeout - Respond to a Tx Hang
4267 * @netdev: network interface device structure
4268 **/
4269static void igb_tx_timeout(struct net_device *netdev)
4270{
4271 struct igb_adapter *adapter = netdev_priv(netdev);
4272 struct e1000_hw *hw = &adapter->hw;
4273
4274 /* Do the reset outside of interrupt context */
4275 adapter->tx_timeout_count++;
Alexander Duyckf7ba2052009-10-27 23:48:51 +00004276
Alexander Duyck55cac242009-11-19 12:42:21 +00004277 if (hw->mac.type == e1000_82580)
4278 hw->dev_spec._82575.global_device_reset = true;
4279
Auke Kok9d5c8242008-01-24 02:22:38 -08004280 schedule_work(&adapter->reset_task);
Alexander Duyck265de402009-02-06 23:22:52 +00004281 wr32(E1000_EICS,
4282 (adapter->eims_enable_mask & ~adapter->eims_other));
Auke Kok9d5c8242008-01-24 02:22:38 -08004283}
4284
4285static void igb_reset_task(struct work_struct *work)
4286{
4287 struct igb_adapter *adapter;
4288 adapter = container_of(work, struct igb_adapter, reset_task);
4289
Taku Izumic97ec422010-04-27 14:39:30 +00004290 igb_dump(adapter);
4291 netdev_err(adapter->netdev, "Reset adapter\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004292 igb_reinit_locked(adapter);
4293}
4294
4295/**
Eric Dumazet12dcd862010-10-15 17:27:10 +00004296 * igb_get_stats64 - Get System Network Statistics
Auke Kok9d5c8242008-01-24 02:22:38 -08004297 * @netdev: network interface device structure
Eric Dumazet12dcd862010-10-15 17:27:10 +00004298 * @stats: rtnl_link_stats64 pointer
Auke Kok9d5c8242008-01-24 02:22:38 -08004299 *
Auke Kok9d5c8242008-01-24 02:22:38 -08004300 **/
Eric Dumazet12dcd862010-10-15 17:27:10 +00004301static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4302 struct rtnl_link_stats64 *stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004303{
Eric Dumazet12dcd862010-10-15 17:27:10 +00004304 struct igb_adapter *adapter = netdev_priv(netdev);
4305
4306 spin_lock(&adapter->stats64_lock);
4307 igb_update_stats(adapter, &adapter->stats64);
4308 memcpy(stats, &adapter->stats64, sizeof(*stats));
4309 spin_unlock(&adapter->stats64_lock);
4310
4311 return stats;
Auke Kok9d5c8242008-01-24 02:22:38 -08004312}
4313
4314/**
4315 * igb_change_mtu - Change the Maximum Transfer Unit
4316 * @netdev: network interface device structure
4317 * @new_mtu: new value for maximum frame size
4318 *
4319 * Returns 0 on success, negative on failure
4320 **/
4321static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4322{
4323 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004324 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08004325 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck4c844852009-10-27 15:52:07 +00004326 u32 rx_buffer_len, i;
Auke Kok9d5c8242008-01-24 02:22:38 -08004327
Alexander Duyckc809d222009-10-27 23:52:13 +00004328 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004329 dev_err(&pdev->dev, "Invalid MTU setting\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004330 return -EINVAL;
4331 }
4332
Auke Kok9d5c8242008-01-24 02:22:38 -08004333 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
Alexander Duyck090b1792009-10-27 23:51:55 +00004334 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08004335 return -EINVAL;
4336 }
4337
4338 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4339 msleep(1);
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004340
Auke Kok9d5c8242008-01-24 02:22:38 -08004341 /* igb_down has a dependency on max_frame_size */
4342 adapter->max_frame_size = max_frame;
Alexander Duyck559e9c42009-10-27 23:52:50 +00004343
Auke Kok9d5c8242008-01-24 02:22:38 -08004344 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4345 * means we reserve 2 more, this pushes us to allocate from the next
4346 * larger slab size.
4347 * i.e. RXBUFFER_2048 --> size-4096 slab
4348 */
4349
Nick Nunley757b77e2010-03-26 11:36:47 +00004350 if (adapter->hw.mac.type == e1000_82580)
4351 max_frame += IGB_TS_HDR_LEN;
4352
Alexander Duyck7d95b712009-10-27 15:50:08 +00004353 if (max_frame <= IGB_RXBUFFER_1024)
Alexander Duyck4c844852009-10-27 15:52:07 +00004354 rx_buffer_len = IGB_RXBUFFER_1024;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004355 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
Alexander Duyck4c844852009-10-27 15:52:07 +00004356 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00004357 else
Alexander Duyck4c844852009-10-27 15:52:07 +00004358 rx_buffer_len = IGB_RXBUFFER_128;
4359
Nick Nunley757b77e2010-03-26 11:36:47 +00004360 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4361 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4362 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4363
4364 if ((adapter->hw.mac.type == e1000_82580) &&
4365 (rx_buffer_len == IGB_RXBUFFER_128))
4366 rx_buffer_len += IGB_RXBUFFER_64;
4367
Alexander Duyck4c844852009-10-27 15:52:07 +00004368 if (netif_running(netdev))
4369 igb_down(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08004370
Alexander Duyck090b1792009-10-27 23:51:55 +00004371 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
Auke Kok9d5c8242008-01-24 02:22:38 -08004372 netdev->mtu, new_mtu);
4373 netdev->mtu = new_mtu;
4374
Alexander Duyck4c844852009-10-27 15:52:07 +00004375 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyck3025a442010-02-17 01:02:39 +00004376 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
Alexander Duyck4c844852009-10-27 15:52:07 +00004377
Auke Kok9d5c8242008-01-24 02:22:38 -08004378 if (netif_running(netdev))
4379 igb_up(adapter);
4380 else
4381 igb_reset(adapter);
4382
4383 clear_bit(__IGB_RESETTING, &adapter->state);
4384
4385 return 0;
4386}
4387
4388/**
4389 * igb_update_stats - Update the board statistics counters
4390 * @adapter: board private structure
4391 **/
4392
Eric Dumazet12dcd862010-10-15 17:27:10 +00004393void igb_update_stats(struct igb_adapter *adapter,
4394 struct rtnl_link_stats64 *net_stats)
Auke Kok9d5c8242008-01-24 02:22:38 -08004395{
4396 struct e1000_hw *hw = &adapter->hw;
4397 struct pci_dev *pdev = adapter->pdev;
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004398 u32 reg, mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004399 u16 phy_tmp;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004400 int i;
4401 u64 bytes, packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004402 unsigned int start;
4403 u64 _bytes, _packets;
Auke Kok9d5c8242008-01-24 02:22:38 -08004404
4405#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4406
4407 /*
4408 * Prevent stats update while adapter is being reset, or if the pci
4409 * connection is down.
4410 */
4411 if (adapter->link_speed == 0)
4412 return;
4413 if (pci_channel_offline(pdev))
4414 return;
4415
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004416 bytes = 0;
4417 packets = 0;
4418 for (i = 0; i < adapter->num_rx_queues; i++) {
4419 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
Alexander Duyck3025a442010-02-17 01:02:39 +00004420 struct igb_ring *ring = adapter->rx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004421
Alexander Duyck3025a442010-02-17 01:02:39 +00004422 ring->rx_stats.drops += rqdpc_tmp;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004423 net_stats->rx_fifo_errors += rqdpc_tmp;
Eric Dumazet12dcd862010-10-15 17:27:10 +00004424
4425 do {
4426 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4427 _bytes = ring->rx_stats.bytes;
4428 _packets = ring->rx_stats.packets;
4429 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4430 bytes += _bytes;
4431 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004432 }
4433
Alexander Duyck128e45e2009-11-12 18:37:38 +00004434 net_stats->rx_bytes = bytes;
4435 net_stats->rx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004436
4437 bytes = 0;
4438 packets = 0;
4439 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyck3025a442010-02-17 01:02:39 +00004440 struct igb_ring *ring = adapter->tx_ring[i];
Eric Dumazet12dcd862010-10-15 17:27:10 +00004441 do {
4442 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4443 _bytes = ring->tx_stats.bytes;
4444 _packets = ring->tx_stats.packets;
4445 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4446 bytes += _bytes;
4447 packets += _packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004448 }
Alexander Duyck128e45e2009-11-12 18:37:38 +00004449 net_stats->tx_bytes = bytes;
4450 net_stats->tx_packets = packets;
Alexander Duyck3f9c0162009-10-27 23:48:12 +00004451
4452 /* read stats registers */
Auke Kok9d5c8242008-01-24 02:22:38 -08004453 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4454 adapter->stats.gprc += rd32(E1000_GPRC);
4455 adapter->stats.gorc += rd32(E1000_GORCL);
4456 rd32(E1000_GORCH); /* clear GORCL */
4457 adapter->stats.bprc += rd32(E1000_BPRC);
4458 adapter->stats.mprc += rd32(E1000_MPRC);
4459 adapter->stats.roc += rd32(E1000_ROC);
4460
4461 adapter->stats.prc64 += rd32(E1000_PRC64);
4462 adapter->stats.prc127 += rd32(E1000_PRC127);
4463 adapter->stats.prc255 += rd32(E1000_PRC255);
4464 adapter->stats.prc511 += rd32(E1000_PRC511);
4465 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4466 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4467 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4468 adapter->stats.sec += rd32(E1000_SEC);
4469
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004470 mpc = rd32(E1000_MPC);
4471 adapter->stats.mpc += mpc;
4472 net_stats->rx_fifo_errors += mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004473 adapter->stats.scc += rd32(E1000_SCC);
4474 adapter->stats.ecol += rd32(E1000_ECOL);
4475 adapter->stats.mcc += rd32(E1000_MCC);
4476 adapter->stats.latecol += rd32(E1000_LATECOL);
4477 adapter->stats.dc += rd32(E1000_DC);
4478 adapter->stats.rlec += rd32(E1000_RLEC);
4479 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4480 adapter->stats.xontxc += rd32(E1000_XONTXC);
4481 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4482 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4483 adapter->stats.fcruc += rd32(E1000_FCRUC);
4484 adapter->stats.gptc += rd32(E1000_GPTC);
4485 adapter->stats.gotc += rd32(E1000_GOTCL);
4486 rd32(E1000_GOTCH); /* clear GOTCL */
Mitch Williamsfa3d9a62010-03-23 18:34:38 +00004487 adapter->stats.rnbc += rd32(E1000_RNBC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004488 adapter->stats.ruc += rd32(E1000_RUC);
4489 adapter->stats.rfc += rd32(E1000_RFC);
4490 adapter->stats.rjc += rd32(E1000_RJC);
4491 adapter->stats.tor += rd32(E1000_TORH);
4492 adapter->stats.tot += rd32(E1000_TOTH);
4493 adapter->stats.tpr += rd32(E1000_TPR);
4494
4495 adapter->stats.ptc64 += rd32(E1000_PTC64);
4496 adapter->stats.ptc127 += rd32(E1000_PTC127);
4497 adapter->stats.ptc255 += rd32(E1000_PTC255);
4498 adapter->stats.ptc511 += rd32(E1000_PTC511);
4499 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4500 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4501
4502 adapter->stats.mptc += rd32(E1000_MPTC);
4503 adapter->stats.bptc += rd32(E1000_BPTC);
4504
Nick Nunley2d0b0f62010-02-17 01:02:59 +00004505 adapter->stats.tpt += rd32(E1000_TPT);
4506 adapter->stats.colc += rd32(E1000_COLC);
Auke Kok9d5c8242008-01-24 02:22:38 -08004507
4508 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
Nick Nunley43915c7c2010-02-17 01:03:58 +00004509 /* read internal phy specific stats */
4510 reg = rd32(E1000_CTRL_EXT);
4511 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4512 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4513 adapter->stats.tncrs += rd32(E1000_TNCRS);
4514 }
4515
Auke Kok9d5c8242008-01-24 02:22:38 -08004516 adapter->stats.tsctc += rd32(E1000_TSCTC);
4517 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4518
4519 adapter->stats.iac += rd32(E1000_IAC);
4520 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4521 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4522 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4523 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4524 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4525 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4526 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4527 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4528
4529 /* Fill out the OS statistics structure */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004530 net_stats->multicast = adapter->stats.mprc;
4531 net_stats->collisions = adapter->stats.colc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004532
4533 /* Rx Errors */
4534
4535 /* RLEC on some newer hardware can be incorrect so build
Jesper Dangaard Brouer8c0ab702009-05-26 13:50:31 +00004536 * our own version based on RUC and ROC */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004537 net_stats->rx_errors = adapter->stats.rxerrc +
Auke Kok9d5c8242008-01-24 02:22:38 -08004538 adapter->stats.crcerrs + adapter->stats.algnerrc +
4539 adapter->stats.ruc + adapter->stats.roc +
4540 adapter->stats.cexterr;
Alexander Duyck128e45e2009-11-12 18:37:38 +00004541 net_stats->rx_length_errors = adapter->stats.ruc +
4542 adapter->stats.roc;
4543 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4544 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4545 net_stats->rx_missed_errors = adapter->stats.mpc;
Auke Kok9d5c8242008-01-24 02:22:38 -08004546
4547 /* Tx Errors */
Alexander Duyck128e45e2009-11-12 18:37:38 +00004548 net_stats->tx_errors = adapter->stats.ecol +
4549 adapter->stats.latecol;
4550 net_stats->tx_aborted_errors = adapter->stats.ecol;
4551 net_stats->tx_window_errors = adapter->stats.latecol;
4552 net_stats->tx_carrier_errors = adapter->stats.tncrs;
Auke Kok9d5c8242008-01-24 02:22:38 -08004553
4554 /* Tx Dropped needs to be maintained elsewhere */
4555
4556 /* Phy Stats */
4557 if (hw->phy.media_type == e1000_media_type_copper) {
4558 if ((adapter->link_speed == SPEED_1000) &&
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004559 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
Auke Kok9d5c8242008-01-24 02:22:38 -08004560 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4561 adapter->phy_stats.idle_errors += phy_tmp;
4562 }
4563 }
4564
4565 /* Management Stats */
4566 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4567 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4568 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
Carolyn Wyborny0a915b92011-02-26 07:42:37 +00004569
4570 /* OS2BMC Stats */
4571 reg = rd32(E1000_MANC);
4572 if (reg & E1000_MANC_EN_BMC2OS) {
4573 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4574 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4575 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4576 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4577 }
Auke Kok9d5c8242008-01-24 02:22:38 -08004578}
4579
Auke Kok9d5c8242008-01-24 02:22:38 -08004580static irqreturn_t igb_msix_other(int irq, void *data)
4581{
Alexander Duyck047e0032009-10-27 15:49:27 +00004582 struct igb_adapter *adapter = data;
Auke Kok9d5c8242008-01-24 02:22:38 -08004583 struct e1000_hw *hw = &adapter->hw;
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004584 u32 icr = rd32(E1000_ICR);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004585 /* reading ICR causes bit 31 of EICR to be cleared */
Alexander Duyckdda0e082009-02-06 23:19:08 +00004586
Alexander Duyck7f081d42010-01-07 17:41:00 +00004587 if (icr & E1000_ICR_DRSTA)
4588 schedule_work(&adapter->reset_task);
4589
Alexander Duyck047e0032009-10-27 15:49:27 +00004590 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00004591 /* HW is reporting DMA is out of sync */
4592 adapter->stats.doosync++;
Greg Rose13800462010-11-06 02:08:26 +00004593 /* The DMA Out of Sync is also indication of a spoof event
4594 * in IOV mode. Check the Wrong VM Behavior register to
4595 * see if it is really a spoof event. */
4596 igb_check_wvbr(adapter);
Alexander Duyckdda0e082009-02-06 23:19:08 +00004597 }
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00004598
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004599 /* Check for a mailbox event */
4600 if (icr & E1000_ICR_VMMB)
4601 igb_msg_task(adapter);
4602
4603 if (icr & E1000_ICR_LSC) {
4604 hw->mac.get_link_status = 1;
4605 /* guard against interrupt when we're going down */
4606 if (!test_bit(__IGB_DOWN, &adapter->state))
4607 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4608 }
4609
Alexander Duyck25568a52009-10-27 23:49:59 +00004610 if (adapter->vfs_allocated_count)
4611 wr32(E1000_IMS, E1000_IMS_LSC |
4612 E1000_IMS_VMMB |
4613 E1000_IMS_DOUTSYNC);
4614 else
4615 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
PJ Waskiewicz844290e2008-06-27 11:00:39 -07004616 wr32(E1000_EIMS, adapter->eims_other);
Auke Kok9d5c8242008-01-24 02:22:38 -08004617
4618 return IRQ_HANDLED;
4619}
4620
Alexander Duyck047e0032009-10-27 15:49:27 +00004621static void igb_write_itr(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08004622{
Alexander Duyck26b39272010-02-17 01:00:41 +00004623 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck047e0032009-10-27 15:49:27 +00004624 u32 itr_val = q_vector->itr_val & 0x7FFC;
Auke Kok9d5c8242008-01-24 02:22:38 -08004625
Alexander Duyck047e0032009-10-27 15:49:27 +00004626 if (!q_vector->set_itr)
4627 return;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00004628
Alexander Duyck047e0032009-10-27 15:49:27 +00004629 if (!itr_val)
4630 itr_val = 0x4;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004631
Alexander Duyck26b39272010-02-17 01:00:41 +00004632 if (adapter->hw.mac.type == e1000_82575)
4633 itr_val |= itr_val << 16;
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004634 else
Alexander Duyck047e0032009-10-27 15:49:27 +00004635 itr_val |= 0x8000000;
4636
4637 writel(itr_val, q_vector->itr_register);
4638 q_vector->set_itr = 0;
4639}
4640
4641static irqreturn_t igb_msix_ring(int irq, void *data)
4642{
4643 struct igb_q_vector *q_vector = data;
4644
4645 /* Write the ITR value calculated from the previous interrupt. */
4646 igb_write_itr(q_vector);
4647
4648 napi_schedule(&q_vector->napi);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07004649
Auke Kok9d5c8242008-01-24 02:22:38 -08004650 return IRQ_HANDLED;
4651}
4652
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004653#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00004654static void igb_update_dca(struct igb_q_vector *q_vector)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004655{
Alexander Duyck047e0032009-10-27 15:49:27 +00004656 struct igb_adapter *adapter = q_vector->adapter;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004657 struct e1000_hw *hw = &adapter->hw;
4658 int cpu = get_cpu();
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004659
Alexander Duyck047e0032009-10-27 15:49:27 +00004660 if (q_vector->cpu == cpu)
4661 goto out_no_update;
4662
4663 if (q_vector->tx_ring) {
4664 int q = q_vector->tx_ring->reg_idx;
4665 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4666 if (hw->mac.type == e1000_82575) {
4667 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4668 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4669 } else {
4670 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4671 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4672 E1000_DCA_TXCTRL_CPUID_SHIFT;
4673 }
4674 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4675 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4676 }
4677 if (q_vector->rx_ring) {
4678 int q = q_vector->rx_ring->reg_idx;
4679 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4680 if (hw->mac.type == e1000_82575) {
4681 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4682 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4683 } else {
Alexander Duyck2d064c02008-07-08 15:10:12 -07004684 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
Maciej Sosnowski92be7912009-03-13 20:40:21 +00004685 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
Alexander Duyck2d064c02008-07-08 15:10:12 -07004686 E1000_DCA_RXCTRL_CPUID_SHIFT;
Alexander Duyck2d064c02008-07-08 15:10:12 -07004687 }
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004688 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4689 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4690 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4691 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004692 }
Alexander Duyck047e0032009-10-27 15:49:27 +00004693 q_vector->cpu = cpu;
4694out_no_update:
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004695 put_cpu();
4696}
4697
4698static void igb_setup_dca(struct igb_adapter *adapter)
4699{
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004700 struct e1000_hw *hw = &adapter->hw;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004701 int i;
4702
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004703 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004704 return;
4705
Alexander Duyck7e0e99e2009-05-21 13:06:56 +00004706 /* Always use CB2 mode, difference is masked in the CB driver. */
4707 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4708
Alexander Duyck047e0032009-10-27 15:49:27 +00004709 for (i = 0; i < adapter->num_q_vectors; i++) {
Alexander Duyck26b39272010-02-17 01:00:41 +00004710 adapter->q_vector[i]->cpu = -1;
4711 igb_update_dca(adapter->q_vector[i]);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004712 }
4713}
4714
4715static int __igb_notify_dca(struct device *dev, void *data)
4716{
4717 struct net_device *netdev = dev_get_drvdata(dev);
4718 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004719 struct pci_dev *pdev = adapter->pdev;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004720 struct e1000_hw *hw = &adapter->hw;
4721 unsigned long event = *(unsigned long *)data;
4722
4723 switch (event) {
4724 case DCA_PROVIDER_ADD:
4725 /* if already enabled, don't do it again */
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004726 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004727 break;
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004728 if (dca_add_requester(dev) == 0) {
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004729 adapter->flags |= IGB_FLAG_DCA_ENABLED;
Alexander Duyck090b1792009-10-27 23:51:55 +00004730 dev_info(&pdev->dev, "DCA enabled\n");
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004731 igb_setup_dca(adapter);
4732 break;
4733 }
4734 /* Fall Through since DCA is disabled. */
4735 case DCA_PROVIDER_REMOVE:
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004736 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004737 /* without this a class_device is left
Alexander Duyck047e0032009-10-27 15:49:27 +00004738 * hanging around in the sysfs model */
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004739 dca_remove_requester(dev);
Alexander Duyck090b1792009-10-27 23:51:55 +00004740 dev_info(&pdev->dev, "DCA disabled\n");
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07004741 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
Alexander Duyckcbd347a2009-02-15 23:59:44 -08004742 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004743 }
4744 break;
4745 }
Alexander Duyckbbd98fe2009-01-31 00:52:30 -08004746
Jeb Cramerfe4506b2008-07-08 15:07:55 -07004747 return 0;
4748}
4749
4750static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4751 void *p)
4752{
4753 int ret_val;
4754
4755 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4756 __igb_notify_dca);
4757
4758 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4759}
Jeff Kirsher421e02f2008-10-17 11:08:31 -07004760#endif /* CONFIG_IGB_DCA */
Auke Kok9d5c8242008-01-24 02:22:38 -08004761
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004762static void igb_ping_all_vfs(struct igb_adapter *adapter)
4763{
4764 struct e1000_hw *hw = &adapter->hw;
4765 u32 ping;
4766 int i;
4767
4768 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4769 ping = E1000_PF_CONTROL_MSG;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00004770 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004771 ping |= E1000_VT_MSGTYPE_CTS;
4772 igb_write_mbx(hw, &ping, 1, i);
4773 }
4774}
4775
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004776static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4777{
4778 struct e1000_hw *hw = &adapter->hw;
4779 u32 vmolr = rd32(E1000_VMOLR(vf));
4780 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4781
Alexander Duyckd85b90042010-09-22 17:56:20 +00004782 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004783 IGB_VF_FLAG_MULTI_PROMISC);
4784 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4785
4786 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4787 vmolr |= E1000_VMOLR_MPME;
Alexander Duyckd85b90042010-09-22 17:56:20 +00004788 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004789 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4790 } else {
4791 /*
4792 * if we have hashes and we are clearing a multicast promisc
4793 * flag we need to write the hashes to the MTA as this step
4794 * was previously skipped
4795 */
4796 if (vf_data->num_vf_mc_hashes > 30) {
4797 vmolr |= E1000_VMOLR_MPME;
4798 } else if (vf_data->num_vf_mc_hashes) {
4799 int j;
4800 vmolr |= E1000_VMOLR_ROMPE;
4801 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4802 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4803 }
4804 }
4805
4806 wr32(E1000_VMOLR(vf), vmolr);
4807
4808 /* there are flags left unprocessed, likely not supported */
4809 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4810 return -EINVAL;
4811
4812 return 0;
4813
4814}
4815
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004816static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4817 u32 *msgbuf, u32 vf)
4818{
4819 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4820 u16 *hash_list = (u16 *)&msgbuf[1];
4821 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4822 int i;
4823
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004824 /* salt away the number of multicast addresses assigned
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004825 * to this VF for later use to restore when the PF multi cast
4826 * list changes
4827 */
4828 vf_data->num_vf_mc_hashes = n;
4829
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004830 /* only up to 30 hash values supported */
4831 if (n > 30)
4832 n = 30;
4833
4834 /* store the hashes for later use */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004835 for (i = 0; i < n; i++)
Joe Perchesa419aef2009-08-18 11:18:35 -07004836 vf_data->vf_mc_hashes[i] = hash_list[i];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004837
4838 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00004839 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004840
4841 return 0;
4842}
4843
4844static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4845{
4846 struct e1000_hw *hw = &adapter->hw;
4847 struct vf_data_storage *vf_data;
4848 int i, j;
4849
4850 for (i = 0; i < adapter->vfs_allocated_count; i++) {
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004851 u32 vmolr = rd32(E1000_VMOLR(i));
4852 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4853
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004854 vf_data = &adapter->vf_data[i];
Alexander Duyck7d5753f2009-10-27 23:47:16 +00004855
4856 if ((vf_data->num_vf_mc_hashes > 30) ||
4857 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4858 vmolr |= E1000_VMOLR_MPME;
4859 } else if (vf_data->num_vf_mc_hashes) {
4860 vmolr |= E1000_VMOLR_ROMPE;
4861 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4862 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4863 }
4864 wr32(E1000_VMOLR(i), vmolr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004865 }
4866}
4867
4868static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4869{
4870 struct e1000_hw *hw = &adapter->hw;
4871 u32 pool_mask, reg, vid;
4872 int i;
4873
4874 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4875
4876 /* Find the vlan filter for this id */
4877 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4878 reg = rd32(E1000_VLVF(i));
4879
4880 /* remove the vf from the pool */
4881 reg &= ~pool_mask;
4882
4883 /* if pool is empty then remove entry from vfta */
4884 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4885 (reg & E1000_VLVF_VLANID_ENABLE)) {
4886 reg = 0;
4887 vid = reg & E1000_VLVF_VLANID_MASK;
4888 igb_vfta_set(hw, vid, false);
4889 }
4890
4891 wr32(E1000_VLVF(i), reg);
4892 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004893
4894 adapter->vf_data[vf].vlans_enabled = 0;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004895}
4896
4897static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4898{
4899 struct e1000_hw *hw = &adapter->hw;
4900 u32 reg, i;
4901
Alexander Duyck51466232009-10-27 23:47:35 +00004902 /* The vlvf table only exists on 82576 hardware and newer */
4903 if (hw->mac.type < e1000_82576)
4904 return -1;
4905
4906 /* we only need to do this if VMDq is enabled */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004907 if (!adapter->vfs_allocated_count)
4908 return -1;
4909
4910 /* Find the vlan filter for this id */
4911 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4912 reg = rd32(E1000_VLVF(i));
4913 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4914 vid == (reg & E1000_VLVF_VLANID_MASK))
4915 break;
4916 }
4917
4918 if (add) {
4919 if (i == E1000_VLVF_ARRAY_SIZE) {
4920 /* Did not find a matching VLAN ID entry that was
4921 * enabled. Search for a free filter entry, i.e.
4922 * one without the enable bit set
4923 */
4924 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4925 reg = rd32(E1000_VLVF(i));
4926 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4927 break;
4928 }
4929 }
4930 if (i < E1000_VLVF_ARRAY_SIZE) {
4931 /* Found an enabled/available entry */
4932 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4933
4934 /* if !enabled we need to set this up in vfta */
4935 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
Alexander Duyck51466232009-10-27 23:47:35 +00004936 /* add VID to filter table */
4937 igb_vfta_set(hw, vid, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004938 reg |= E1000_VLVF_VLANID_ENABLE;
4939 }
Alexander Duyckcad6d052009-03-13 20:41:37 +00004940 reg &= ~E1000_VLVF_VLANID_MASK;
4941 reg |= vid;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004942 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004943
4944 /* do not modify RLPML for PF devices */
4945 if (vf >= adapter->vfs_allocated_count)
4946 return 0;
4947
4948 if (!adapter->vf_data[vf].vlans_enabled) {
4949 u32 size;
4950 reg = rd32(E1000_VMOLR(vf));
4951 size = reg & E1000_VMOLR_RLPML_MASK;
4952 size += 4;
4953 reg &= ~E1000_VMOLR_RLPML_MASK;
4954 reg |= size;
4955 wr32(E1000_VMOLR(vf), reg);
4956 }
Alexander Duyckae641bd2009-09-03 14:49:33 +00004957
Alexander Duyck51466232009-10-27 23:47:35 +00004958 adapter->vf_data[vf].vlans_enabled++;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004959 return 0;
4960 }
4961 } else {
4962 if (i < E1000_VLVF_ARRAY_SIZE) {
4963 /* remove vf from the pool */
4964 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4965 /* if pool is empty then remove entry from vfta */
4966 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4967 reg = 0;
4968 igb_vfta_set(hw, vid, false);
4969 }
4970 wr32(E1000_VLVF(i), reg);
Alexander Duyckae641bd2009-09-03 14:49:33 +00004971
4972 /* do not modify RLPML for PF devices */
4973 if (vf >= adapter->vfs_allocated_count)
4974 return 0;
4975
4976 adapter->vf_data[vf].vlans_enabled--;
4977 if (!adapter->vf_data[vf].vlans_enabled) {
4978 u32 size;
4979 reg = rd32(E1000_VMOLR(vf));
4980 size = reg & E1000_VMOLR_RLPML_MASK;
4981 size -= 4;
4982 reg &= ~E1000_VMOLR_RLPML_MASK;
4983 reg |= size;
4984 wr32(E1000_VMOLR(vf), reg);
4985 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08004986 }
4987 }
Williams, Mitch A8151d292010-02-10 01:44:24 +00004988 return 0;
4989}
4990
4991static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4992{
4993 struct e1000_hw *hw = &adapter->hw;
4994
4995 if (vid)
4996 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4997 else
4998 wr32(E1000_VMVIR(vf), 0);
4999}
5000
5001static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5002 int vf, u16 vlan, u8 qos)
5003{
5004 int err = 0;
5005 struct igb_adapter *adapter = netdev_priv(netdev);
5006
5007 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5008 return -EINVAL;
5009 if (vlan || qos) {
5010 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5011 if (err)
5012 goto out;
5013 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5014 igb_set_vmolr(adapter, vf, !vlan);
5015 adapter->vf_data[vf].pf_vlan = vlan;
5016 adapter->vf_data[vf].pf_qos = qos;
5017 dev_info(&adapter->pdev->dev,
5018 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5019 if (test_bit(__IGB_DOWN, &adapter->state)) {
5020 dev_warn(&adapter->pdev->dev,
5021 "The VF VLAN has been set,"
5022 " but the PF device is not up.\n");
5023 dev_warn(&adapter->pdev->dev,
5024 "Bring the PF device up before"
5025 " attempting to use the VF device.\n");
5026 }
5027 } else {
5028 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5029 false, vf);
5030 igb_set_vmvir(adapter, vlan, vf);
5031 igb_set_vmolr(adapter, vf, true);
5032 adapter->vf_data[vf].pf_vlan = 0;
5033 adapter->vf_data[vf].pf_qos = 0;
5034 }
5035out:
5036 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005037}
5038
5039static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5040{
5041 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5042 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5043
5044 return igb_vlvf_set(adapter, vid, add, vf);
5045}
5046
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005047static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005048{
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005049 /* clear flags - except flag that indicates PF has set the MAC */
5050 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005051 adapter->vf_data[vf].last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005052
5053 /* reset offloads to defaults */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005054 igb_set_vmolr(adapter, vf, true);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005055
5056 /* reset vlans for device */
5057 igb_clear_vf_vfta(adapter, vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005058 if (adapter->vf_data[vf].pf_vlan)
5059 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5060 adapter->vf_data[vf].pf_vlan,
5061 adapter->vf_data[vf].pf_qos);
5062 else
5063 igb_clear_vf_vfta(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005064
5065 /* reset multicast table array for vf */
5066 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5067
5068 /* Flush and reset the mta with the new values */
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005069 igb_set_rx_mode(adapter->netdev);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005070}
5071
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005072static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5073{
5074 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5075
5076 /* generate a new mac address as we were hotplug removed/added */
Williams, Mitch A8151d292010-02-10 01:44:24 +00005077 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5078 random_ether_addr(vf_mac);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005079
5080 /* process remaining reset events */
5081 igb_vf_reset(adapter, vf);
5082}
5083
5084static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005085{
5086 struct e1000_hw *hw = &adapter->hw;
5087 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00005088 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005089 u32 reg, msgbuf[3];
5090 u8 *addr = (u8 *)(&msgbuf[1]);
5091
5092 /* process all the same items cleared in a function level reset */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005093 igb_vf_reset(adapter, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005094
5095 /* set vf mac address */
Alexander Duyck26ad9172009-10-05 06:32:49 +00005096 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005097
5098 /* enable transmit and receive for vf */
5099 reg = rd32(E1000_VFTE);
5100 wr32(E1000_VFTE, reg | (1 << vf));
5101 reg = rd32(E1000_VFRE);
5102 wr32(E1000_VFRE, reg | (1 << vf));
5103
Greg Rose8fa7e0f2010-11-06 05:43:21 +00005104 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005105
5106 /* reply to reset with ack and vf mac address */
5107 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5108 memcpy(addr, vf_mac, 6);
5109 igb_write_mbx(hw, msgbuf, 3, vf);
5110}
5111
5112static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5113{
Greg Rosede42edd2010-07-01 13:39:23 +00005114 /*
5115 * The VF MAC Address is stored in a packed array of bytes
5116 * starting at the second 32 bit word of the msg array
5117 */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005118 unsigned char *addr = (char *)&msg[1];
5119 int err = -1;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005120
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005121 if (is_valid_ether_addr(addr))
5122 err = igb_set_vf_mac(adapter, vf, addr);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005123
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005124 return err;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005125}
5126
5127static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5128{
5129 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005130 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005131 u32 msg = E1000_VT_MSGTYPE_NACK;
5132
5133 /* if device isn't clear to send it shouldn't be reading either */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005134 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5135 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005136 igb_write_mbx(hw, &msg, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005137 vf_data->last_nack = jiffies;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005138 }
5139}
5140
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005141static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005142{
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005143 struct pci_dev *pdev = adapter->pdev;
5144 u32 msgbuf[E1000_VFMAILBOX_SIZE];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005145 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005146 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005147 s32 retval;
5148
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005149 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005150
Alexander Duyckfef45f42009-12-11 22:57:34 -08005151 if (retval) {
5152 /* if receive failed revoke VF CTS stats and restart init */
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005153 dev_err(&pdev->dev, "Error receiving message from VF\n");
Alexander Duyckfef45f42009-12-11 22:57:34 -08005154 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5155 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5156 return;
5157 goto out;
5158 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005159
5160 /* this is a message we already processed, do nothing */
5161 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005162 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005163
5164 /*
5165 * until the vf completes a reset it should not be
5166 * allowed to start any configuration.
5167 */
5168
5169 if (msgbuf[0] == E1000_VF_RESET) {
5170 igb_vf_reset_msg(adapter, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005171 return;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005172 }
5173
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005174 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
Alexander Duyckfef45f42009-12-11 22:57:34 -08005175 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5176 return;
5177 retval = -1;
5178 goto out;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005179 }
5180
5181 switch ((msgbuf[0] & 0xFFFF)) {
5182 case E1000_VF_SET_MAC_ADDR:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005183 retval = -EINVAL;
5184 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5185 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5186 else
5187 dev_warn(&pdev->dev,
5188 "VF %d attempted to override administratively "
5189 "set MAC address\nReload the VF driver to "
5190 "resume operations\n", vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005191 break;
Alexander Duyck7d5753f2009-10-27 23:47:16 +00005192 case E1000_VF_SET_PROMISC:
5193 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5194 break;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005195 case E1000_VF_SET_MULTICAST:
5196 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5197 break;
5198 case E1000_VF_SET_LPE:
5199 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5200 break;
5201 case E1000_VF_SET_VLAN:
Greg Rosea6b5ea32010-11-06 05:42:59 +00005202 retval = -1;
5203 if (vf_data->pf_vlan)
5204 dev_warn(&pdev->dev,
5205 "VF %d attempted to override administratively "
5206 "set VLAN tag\nReload the VF driver to "
5207 "resume operations\n", vf);
Williams, Mitch A8151d292010-02-10 01:44:24 +00005208 else
5209 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005210 break;
5211 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00005212 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005213 retval = -1;
5214 break;
5215 }
5216
Alexander Duyckfef45f42009-12-11 22:57:34 -08005217 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5218out:
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005219 /* notify the VF of the results of what it sent us */
5220 if (retval)
5221 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5222 else
5223 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5224
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005225 igb_write_mbx(hw, msgbuf, 1, vf);
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005226}
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005227
Alexander Duyckf2ca0db2009-10-27 23:46:57 +00005228static void igb_msg_task(struct igb_adapter *adapter)
5229{
5230 struct e1000_hw *hw = &adapter->hw;
5231 u32 vf;
5232
5233 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5234 /* process any reset requests */
5235 if (!igb_check_for_rst(hw, vf))
5236 igb_vf_reset_event(adapter, vf);
5237
5238 /* process any messages pending */
5239 if (!igb_check_for_msg(hw, vf))
5240 igb_rcv_msg_from_vf(adapter, vf);
5241
5242 /* process any acks */
5243 if (!igb_check_for_ack(hw, vf))
5244 igb_rcv_ack_from_vf(adapter, vf);
5245 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08005246}
5247
Auke Kok9d5c8242008-01-24 02:22:38 -08005248/**
Alexander Duyck68d480c2009-10-05 06:33:08 +00005249 * igb_set_uta - Set unicast filter table address
5250 * @adapter: board private structure
5251 *
5252 * The unicast table address is a register array of 32-bit registers.
5253 * The table is meant to be used in a way similar to how the MTA is used
5254 * however due to certain limitations in the hardware it is necessary to
5255 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
5256 * enable bit to allow vlan tag stripping when promiscous mode is enabled
5257 **/
5258static void igb_set_uta(struct igb_adapter *adapter)
5259{
5260 struct e1000_hw *hw = &adapter->hw;
5261 int i;
5262
5263 /* The UTA table only exists on 82576 hardware and newer */
5264 if (hw->mac.type < e1000_82576)
5265 return;
5266
5267 /* we only need to do this if VMDq is enabled */
5268 if (!adapter->vfs_allocated_count)
5269 return;
5270
5271 for (i = 0; i < hw->mac.uta_reg_count; i++)
5272 array_wr32(E1000_UTA, i, ~0);
5273}
5274
5275/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005276 * igb_intr_msi - Interrupt Handler
5277 * @irq: interrupt number
5278 * @data: pointer to a network interface device structure
5279 **/
5280static irqreturn_t igb_intr_msi(int irq, void *data)
5281{
Alexander Duyck047e0032009-10-27 15:49:27 +00005282 struct igb_adapter *adapter = data;
5283 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005284 struct e1000_hw *hw = &adapter->hw;
5285 /* read ICR disables interrupts using IAM */
5286 u32 icr = rd32(E1000_ICR);
5287
Alexander Duyck047e0032009-10-27 15:49:27 +00005288 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005289
Alexander Duyck7f081d42010-01-07 17:41:00 +00005290 if (icr & E1000_ICR_DRSTA)
5291 schedule_work(&adapter->reset_task);
5292
Alexander Duyck047e0032009-10-27 15:49:27 +00005293 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005294 /* HW is reporting DMA is out of sync */
5295 adapter->stats.doosync++;
5296 }
5297
Auke Kok9d5c8242008-01-24 02:22:38 -08005298 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5299 hw->mac.get_link_status = 1;
5300 if (!test_bit(__IGB_DOWN, &adapter->state))
5301 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5302 }
5303
Alexander Duyck047e0032009-10-27 15:49:27 +00005304 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005305
5306 return IRQ_HANDLED;
5307}
5308
5309/**
Alexander Duyck4a3c6432009-02-06 23:20:49 +00005310 * igb_intr - Legacy Interrupt Handler
Auke Kok9d5c8242008-01-24 02:22:38 -08005311 * @irq: interrupt number
5312 * @data: pointer to a network interface device structure
5313 **/
5314static irqreturn_t igb_intr(int irq, void *data)
5315{
Alexander Duyck047e0032009-10-27 15:49:27 +00005316 struct igb_adapter *adapter = data;
5317 struct igb_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9d5c8242008-01-24 02:22:38 -08005318 struct e1000_hw *hw = &adapter->hw;
5319 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5320 * need for the IMC write */
5321 u32 icr = rd32(E1000_ICR);
Auke Kok9d5c8242008-01-24 02:22:38 -08005322 if (!icr)
5323 return IRQ_NONE; /* Not our interrupt */
5324
Alexander Duyck047e0032009-10-27 15:49:27 +00005325 igb_write_itr(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005326
5327 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5328 * not set, then the adapter didn't send an interrupt */
5329 if (!(icr & E1000_ICR_INT_ASSERTED))
5330 return IRQ_NONE;
5331
Alexander Duyck7f081d42010-01-07 17:41:00 +00005332 if (icr & E1000_ICR_DRSTA)
5333 schedule_work(&adapter->reset_task);
5334
Alexander Duyck047e0032009-10-27 15:49:27 +00005335 if (icr & E1000_ICR_DOUTSYNC) {
Alexander Duyckdda0e082009-02-06 23:19:08 +00005336 /* HW is reporting DMA is out of sync */
5337 adapter->stats.doosync++;
5338 }
5339
Auke Kok9d5c8242008-01-24 02:22:38 -08005340 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5341 hw->mac.get_link_status = 1;
5342 /* guard against interrupt when we're going down */
5343 if (!test_bit(__IGB_DOWN, &adapter->state))
5344 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5345 }
5346
Alexander Duyck047e0032009-10-27 15:49:27 +00005347 napi_schedule(&q_vector->napi);
Auke Kok9d5c8242008-01-24 02:22:38 -08005348
5349 return IRQ_HANDLED;
5350}
5351
Alexander Duyck047e0032009-10-27 15:49:27 +00005352static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
Alexander Duyck46544252009-02-19 20:39:04 -08005353{
Alexander Duyck047e0032009-10-27 15:49:27 +00005354 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyck46544252009-02-19 20:39:04 -08005355 struct e1000_hw *hw = &adapter->hw;
5356
Alexander Duyck4fc82ad2009-10-27 23:45:42 +00005357 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5358 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
Alexander Duyck047e0032009-10-27 15:49:27 +00005359 if (!adapter->msix_entries)
Alexander Duyck46544252009-02-19 20:39:04 -08005360 igb_set_itr(adapter);
5361 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005362 igb_update_ring_itr(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005363 }
5364
5365 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5366 if (adapter->msix_entries)
Alexander Duyck047e0032009-10-27 15:49:27 +00005367 wr32(E1000_EIMS, q_vector->eims_value);
Alexander Duyck46544252009-02-19 20:39:04 -08005368 else
5369 igb_irq_enable(adapter);
5370 }
5371}
5372
Auke Kok9d5c8242008-01-24 02:22:38 -08005373/**
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005374 * igb_poll - NAPI Rx polling callback
5375 * @napi: napi polling structure
5376 * @budget: count of how many packets we should handle
Auke Kok9d5c8242008-01-24 02:22:38 -08005377 **/
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005378static int igb_poll(struct napi_struct *napi, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005379{
Alexander Duyck047e0032009-10-27 15:49:27 +00005380 struct igb_q_vector *q_vector = container_of(napi,
5381 struct igb_q_vector,
5382 napi);
5383 int tx_clean_complete = 1, work_done = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005384
Jeff Kirsher421e02f2008-10-17 11:08:31 -07005385#ifdef CONFIG_IGB_DCA
Alexander Duyck047e0032009-10-27 15:49:27 +00005386 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5387 igb_update_dca(q_vector);
Jeb Cramerfe4506b2008-07-08 15:07:55 -07005388#endif
Alexander Duyck047e0032009-10-27 15:49:27 +00005389 if (q_vector->tx_ring)
5390 tx_clean_complete = igb_clean_tx_irq(q_vector);
Auke Kok9d5c8242008-01-24 02:22:38 -08005391
Alexander Duyck047e0032009-10-27 15:49:27 +00005392 if (q_vector->rx_ring)
5393 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
5394
5395 if (!tx_clean_complete)
5396 work_done = budget;
Auke Kok9d5c8242008-01-24 02:22:38 -08005397
Alexander Duyck46544252009-02-19 20:39:04 -08005398 /* If not enough Rx work done, exit the polling mode */
Alexander Duyck5e6d5b12009-03-13 20:40:38 +00005399 if (work_done < budget) {
Alexander Duyck46544252009-02-19 20:39:04 -08005400 napi_complete(napi);
Alexander Duyck047e0032009-10-27 15:49:27 +00005401 igb_ring_irq_enable(q_vector);
Alexander Duyck46544252009-02-19 20:39:04 -08005402 }
5403
5404 return work_done;
Auke Kok9d5c8242008-01-24 02:22:38 -08005405}
Al Viro6d8126f2008-03-16 22:23:24 +00005406
Auke Kok9d5c8242008-01-24 02:22:38 -08005407/**
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005408 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005409 * @adapter: board private structure
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005410 * @shhwtstamps: timestamp structure to update
5411 * @regval: unsigned 64bit system time value.
5412 *
5413 * We need to convert the system time value stored in the RX/TXSTMP registers
5414 * into a hwtstamp which can be used by the upper level timestamping functions
5415 */
5416static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5417 struct skb_shared_hwtstamps *shhwtstamps,
5418 u64 regval)
5419{
5420 u64 ns;
5421
Alexander Duyck55cac242009-11-19 12:42:21 +00005422 /*
5423 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5424 * 24 to match clock shift we setup earlier.
5425 */
5426 if (adapter->hw.mac.type == e1000_82580)
5427 regval <<= IGB_82580_TSYNC_SHIFT;
5428
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005429 ns = timecounter_cyc2time(&adapter->clock, regval);
5430 timecompare_update(&adapter->compare, ns);
5431 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5432 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5433 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5434}
5435
5436/**
5437 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5438 * @q_vector: pointer to q_vector containing needed info
Nick Nunley28739572010-05-04 21:58:07 +00005439 * @buffer: pointer to igb_buffer structure
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005440 *
5441 * If we were asked to do hardware stamping and such a time stamp is
5442 * available, then it must have been for this skb here because we only
5443 * allow only one such packet into the queue.
5444 */
Nick Nunley28739572010-05-04 21:58:07 +00005445static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005446{
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005447 struct igb_adapter *adapter = q_vector->adapter;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005448 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005449 struct skb_shared_hwtstamps shhwtstamps;
5450 u64 regval;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005451
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005452 /* if skb does not support hw timestamp or TX stamp not valid exit */
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005453 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005454 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5455 return;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005456
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005457 regval = rd32(E1000_TXSTMPL);
5458 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5459
5460 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
Nick Nunley28739572010-05-04 21:58:07 +00005461 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005462}
5463
5464/**
Auke Kok9d5c8242008-01-24 02:22:38 -08005465 * igb_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyck047e0032009-10-27 15:49:27 +00005466 * @q_vector: pointer to q_vector containing needed info
Auke Kok9d5c8242008-01-24 02:22:38 -08005467 * returns true if ring is completely cleaned
5468 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005469static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
Auke Kok9d5c8242008-01-24 02:22:38 -08005470{
Alexander Duyck047e0032009-10-27 15:49:27 +00005471 struct igb_adapter *adapter = q_vector->adapter;
5472 struct igb_ring *tx_ring = q_vector->tx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005473 struct net_device *netdev = tx_ring->netdev;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005474 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08005475 struct igb_buffer *buffer_info;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005476 union e1000_adv_tx_desc *tx_desc, *eop_desc;
Auke Kok9d5c8242008-01-24 02:22:38 -08005477 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005478 unsigned int i, eop, count = 0;
5479 bool cleaned = false;
Auke Kok9d5c8242008-01-24 02:22:38 -08005480
Auke Kok9d5c8242008-01-24 02:22:38 -08005481 i = tx_ring->next_to_clean;
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005482 eop = tx_ring->buffer_info[i].next_to_watch;
5483 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5484
5485 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5486 (count < tx_ring->count)) {
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00005487 rmb(); /* read buffer_info after eop_desc status */
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005488 for (cleaned = false; !cleaned; count++) {
5489 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
Auke Kok9d5c8242008-01-24 02:22:38 -08005490 buffer_info = &tx_ring->buffer_info[i];
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005491 cleaned = (i == eop);
Auke Kok9d5c8242008-01-24 02:22:38 -08005492
Nick Nunley28739572010-05-04 21:58:07 +00005493 if (buffer_info->skb) {
5494 total_bytes += buffer_info->bytecount;
Auke Kok9d5c8242008-01-24 02:22:38 -08005495 /* gso_segs is currently only valid for tcp */
Nick Nunley28739572010-05-04 21:58:07 +00005496 total_packets += buffer_info->gso_segs;
5497 igb_tx_hwtstamp(q_vector, buffer_info);
Auke Kok9d5c8242008-01-24 02:22:38 -08005498 }
5499
Alexander Duyck80785292009-10-27 15:51:47 +00005500 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005501 tx_desc->wb.status = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005502
5503 i++;
5504 if (i == tx_ring->count)
5505 i = 0;
Auke Kok9d5c8242008-01-24 02:22:38 -08005506 }
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005507 eop = tx_ring->buffer_info[i].next_to_watch;
5508 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5509 }
5510
Auke Kok9d5c8242008-01-24 02:22:38 -08005511 tx_ring->next_to_clean = i;
5512
Alexander Duyckfc7d3452008-08-26 04:25:08 -07005513 if (unlikely(count &&
Auke Kok9d5c8242008-01-24 02:22:38 -08005514 netif_carrier_ok(netdev) &&
Alexander Duyckc493ea42009-03-20 00:16:50 +00005515 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005516 /* Make sure that anybody stopping the queue after this
5517 * sees the new next_to_clean.
5518 */
5519 smp_mb();
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005520 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5521 !(test_bit(__IGB_DOWN, &adapter->state))) {
5522 netif_wake_subqueue(netdev, tx_ring->queue_index);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005523
5524 u64_stats_update_begin(&tx_ring->tx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005525 tx_ring->tx_stats.restart_queue++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005526 u64_stats_update_end(&tx_ring->tx_syncp);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005527 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005528 }
5529
5530 if (tx_ring->detect_tx_hung) {
5531 /* Detect a transmit hang in hardware, this serializes the
5532 * check with the clearing of time_stamp and movement of i */
5533 tx_ring->detect_tx_hung = false;
5534 if (tx_ring->buffer_info[i].time_stamp &&
5535 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00005536 (adapter->tx_timeout_factor * HZ)) &&
5537 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005538
Auke Kok9d5c8242008-01-24 02:22:38 -08005539 /* detected Tx unit hang */
Alexander Duyck59d71982010-04-27 13:09:25 +00005540 dev_err(tx_ring->dev,
Auke Kok9d5c8242008-01-24 02:22:38 -08005541 "Detected Tx Unit Hang\n"
Alexander Duyck2d064c02008-07-08 15:10:12 -07005542 " Tx Queue <%d>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005543 " TDH <%x>\n"
5544 " TDT <%x>\n"
5545 " next_to_use <%x>\n"
5546 " next_to_clean <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005547 "buffer_info[next_to_clean]\n"
5548 " time_stamp <%lx>\n"
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005549 " next_to_watch <%x>\n"
Auke Kok9d5c8242008-01-24 02:22:38 -08005550 " jiffies <%lx>\n"
5551 " desc.status <%x>\n",
Alexander Duyck2d064c02008-07-08 15:10:12 -07005552 tx_ring->queue_index,
Alexander Duyckfce99e32009-10-27 15:51:27 +00005553 readl(tx_ring->head),
5554 readl(tx_ring->tail),
Auke Kok9d5c8242008-01-24 02:22:38 -08005555 tx_ring->next_to_use,
5556 tx_ring->next_to_clean,
Alexander Duyckf7ba2052009-10-27 23:48:51 +00005557 tx_ring->buffer_info[eop].time_stamp,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005558 eop,
Auke Kok9d5c8242008-01-24 02:22:38 -08005559 jiffies,
Alexander Duyck0e014cb2008-12-26 01:33:18 -08005560 eop_desc->wb.status);
Peter P Waskiewicz Jr661086d2008-07-08 15:06:51 -07005561 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005562 }
5563 }
5564 tx_ring->total_bytes += total_bytes;
5565 tx_ring->total_packets += total_packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005566 u64_stats_update_begin(&tx_ring->tx_syncp);
Alexander Duycke21ed352008-07-08 15:07:24 -07005567 tx_ring->tx_stats.bytes += total_bytes;
5568 tx_ring->tx_stats.packets += total_packets;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005569 u64_stats_update_end(&tx_ring->tx_syncp);
Eric Dumazet807540b2010-09-23 05:40:09 +00005570 return count < tx_ring->count;
Auke Kok9d5c8242008-01-24 02:22:38 -08005571}
5572
Auke Kok9d5c8242008-01-24 02:22:38 -08005573/**
5574 * igb_receive_skb - helper function to handle rx indications
Alexander Duyck047e0032009-10-27 15:49:27 +00005575 * @q_vector: structure containing interrupt and ring information
5576 * @skb: packet to send up
5577 * @vlan_tag: vlan tag for packet
Auke Kok9d5c8242008-01-24 02:22:38 -08005578 **/
Alexander Duyck047e0032009-10-27 15:49:27 +00005579static void igb_receive_skb(struct igb_q_vector *q_vector,
5580 struct sk_buff *skb,
5581 u16 vlan_tag)
Auke Kok9d5c8242008-01-24 02:22:38 -08005582{
Alexander Duyck047e0032009-10-27 15:49:27 +00005583 struct igb_adapter *adapter = q_vector->adapter;
Alexander Duyckd3352522008-07-08 15:12:13 -07005584
Alexander Duyck31b24b92010-03-23 18:35:18 +00005585 if (vlan_tag && adapter->vlgrp)
Alexander Duyck047e0032009-10-27 15:49:27 +00005586 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5587 vlan_tag, skb);
Alexander Duyck182ff8d2009-04-27 22:35:33 +00005588 else
Alexander Duyck047e0032009-10-27 15:49:27 +00005589 napi_gro_receive(&q_vector->napi, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005590}
5591
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005592static inline void igb_rx_checksum_adv(struct igb_ring *ring,
Auke Kok9d5c8242008-01-24 02:22:38 -08005593 u32 status_err, struct sk_buff *skb)
5594{
Eric Dumazetbc8acf22010-09-02 13:07:41 -07005595 skb_checksum_none_assert(skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005596
5597 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005598 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5599 (status_err & E1000_RXD_STAT_IXSM))
Auke Kok9d5c8242008-01-24 02:22:38 -08005600 return;
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005601
Auke Kok9d5c8242008-01-24 02:22:38 -08005602 /* TCP/UDP checksum error bit is set */
5603 if (status_err &
5604 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
Jesse Brandeburgb9473562009-04-27 22:36:13 +00005605 /*
5606 * work around errata with sctp packets where the TCPE aka
5607 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5608 * packets, (aka let the stack check the crc32c)
5609 */
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005610 if ((skb->len == 60) &&
Eric Dumazet12dcd862010-10-15 17:27:10 +00005611 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5612 u64_stats_update_begin(&ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005613 ring->rx_stats.csum_err++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005614 u64_stats_update_end(&ring->rx_syncp);
5615 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005616 /* let the stack verify checksum errors */
Auke Kok9d5c8242008-01-24 02:22:38 -08005617 return;
5618 }
5619 /* It must be a TCP or UDP packet with a valid checksum */
5620 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5621 skb->ip_summed = CHECKSUM_UNNECESSARY;
5622
Alexander Duyck59d71982010-04-27 13:09:25 +00005623 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
Auke Kok9d5c8242008-01-24 02:22:38 -08005624}
5625
Nick Nunley757b77e2010-03-26 11:36:47 +00005626static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005627 struct sk_buff *skb)
5628{
5629 struct igb_adapter *adapter = q_vector->adapter;
5630 struct e1000_hw *hw = &adapter->hw;
5631 u64 regval;
5632
5633 /*
5634 * If this bit is set, then the RX registers contain the time stamp. No
5635 * other packet will be time stamped until we read these registers, so
5636 * read the registers to make them available again. Because only one
5637 * packet can be time stamped at a time, we know that the register
5638 * values must belong to this one here and therefore we don't need to
5639 * compare any of the additional attributes stored for it.
5640 *
Oliver Hartkopp2244d072010-08-17 08:59:14 +00005641 * If nothing went wrong, then it should have a shared tx_flags that we
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005642 * can turn into a skb_shared_hwtstamps.
5643 */
Nick Nunley757b77e2010-03-26 11:36:47 +00005644 if (staterr & E1000_RXDADV_STAT_TSIP) {
5645 u32 *stamp = (u32 *)skb->data;
5646 regval = le32_to_cpu(*(stamp + 2));
5647 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5648 skb_pull(skb, IGB_TS_HDR_LEN);
5649 } else {
5650 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5651 return;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005652
Nick Nunley757b77e2010-03-26 11:36:47 +00005653 regval = rd32(E1000_RXSTMPL);
5654 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5655 }
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005656
5657 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5658}
Alexander Duyck4c844852009-10-27 15:52:07 +00005659static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005660 union e1000_adv_rx_desc *rx_desc)
5661{
5662 /* HW will not DMA in data larger than the given buffer, even if it
5663 * parses the (NFS, of course) header to be larger. In that case, it
5664 * fills the header buffer and spills the rest into the page.
5665 */
5666 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5667 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
Alexander Duyck4c844852009-10-27 15:52:07 +00005668 if (hlen > rx_ring->rx_buffer_len)
5669 hlen = rx_ring->rx_buffer_len;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005670 return hlen;
5671}
5672
Alexander Duyck047e0032009-10-27 15:49:27 +00005673static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5674 int *work_done, int budget)
Auke Kok9d5c8242008-01-24 02:22:38 -08005675{
Alexander Duyck047e0032009-10-27 15:49:27 +00005676 struct igb_ring *rx_ring = q_vector->rx_ring;
Alexander Duycke694e962009-10-27 15:53:06 +00005677 struct net_device *netdev = rx_ring->netdev;
Alexander Duyck59d71982010-04-27 13:09:25 +00005678 struct device *dev = rx_ring->dev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005679 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5680 struct igb_buffer *buffer_info , *next_buffer;
5681 struct sk_buff *skb;
Auke Kok9d5c8242008-01-24 02:22:38 -08005682 bool cleaned = false;
5683 int cleaned_count = 0;
Alexander Duyckd1eff352009-11-12 18:38:35 +00005684 int current_node = numa_node_id();
Auke Kok9d5c8242008-01-24 02:22:38 -08005685 unsigned int total_bytes = 0, total_packets = 0;
Alexander Duyck73cd78f2009-02-12 18:16:59 +00005686 unsigned int i;
Alexander Duyck2d94d8a2009-07-23 18:10:06 +00005687 u32 staterr;
5688 u16 length;
Alexander Duyck047e0032009-10-27 15:49:27 +00005689 u16 vlan_tag;
Auke Kok9d5c8242008-01-24 02:22:38 -08005690
5691 i = rx_ring->next_to_clean;
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005692 buffer_info = &rx_ring->buffer_info[i];
Auke Kok9d5c8242008-01-24 02:22:38 -08005693 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5694 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5695
5696 while (staterr & E1000_RXD_STAT_DD) {
5697 if (*work_done >= budget)
5698 break;
5699 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00005700 rmb(); /* read descriptor and rx_buffer_info after status DD */
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005701
5702 skb = buffer_info->skb;
5703 prefetch(skb->data - NET_IP_ALIGN);
5704 buffer_info->skb = NULL;
5705
5706 i++;
5707 if (i == rx_ring->count)
5708 i = 0;
Alexander Duyck42d07812009-10-27 23:51:16 +00005709
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005710 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5711 prefetch(next_rxd);
5712 next_buffer = &rx_ring->buffer_info[i];
5713
5714 length = le16_to_cpu(rx_desc->wb.upper.length);
5715 cleaned = true;
5716 cleaned_count++;
5717
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005718 if (buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005719 dma_unmap_single(dev, buffer_info->dma,
Alexander Duyck4c844852009-10-27 15:52:07 +00005720 rx_ring->rx_buffer_len,
Alexander Duyck59d71982010-04-27 13:09:25 +00005721 DMA_FROM_DEVICE);
Jesse Brandeburg91615f72009-06-30 12:45:15 +00005722 buffer_info->dma = 0;
Alexander Duyck4c844852009-10-27 15:52:07 +00005723 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005724 skb_put(skb, length);
5725 goto send_up;
5726 }
Alexander Duyck4c844852009-10-27 15:52:07 +00005727 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005728 }
5729
5730 if (length) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005731 dma_unmap_page(dev, buffer_info->page_dma,
5732 PAGE_SIZE / 2, DMA_FROM_DEVICE);
Auke Kok9d5c8242008-01-24 02:22:38 -08005733 buffer_info->page_dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005734
Koki Sanagiaa913402010-04-27 01:01:19 +00005735 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005736 buffer_info->page,
5737 buffer_info->page_offset,
5738 length);
5739
Alexander Duyckd1eff352009-11-12 18:38:35 +00005740 if ((page_count(buffer_info->page) != 1) ||
5741 (page_to_nid(buffer_info->page) != current_node))
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005742 buffer_info->page = NULL;
5743 else
5744 get_page(buffer_info->page);
Auke Kok9d5c8242008-01-24 02:22:38 -08005745
5746 skb->len += length;
5747 skb->data_len += length;
5748 skb->truesize += length;
Auke Kok9d5c8242008-01-24 02:22:38 -08005749 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005750
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005751 if (!(staterr & E1000_RXD_STAT_EOP)) {
Alexander Duyckb2d56532008-11-20 00:47:34 -08005752 buffer_info->skb = next_buffer->skb;
5753 buffer_info->dma = next_buffer->dma;
5754 next_buffer->skb = skb;
5755 next_buffer->dma = 0;
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005756 goto next_desc;
5757 }
Alexander Duyck69d3ca52009-02-06 23:15:04 +00005758send_up:
Auke Kok9d5c8242008-01-24 02:22:38 -08005759 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5760 dev_kfree_skb_irq(skb);
5761 goto next_desc;
5762 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005763
Nick Nunley757b77e2010-03-26 11:36:47 +00005764 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5765 igb_rx_hwtstamp(q_vector, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005766 total_bytes += skb->len;
5767 total_packets++;
5768
Alexander Duyck85ad76b2009-10-27 15:52:46 +00005769 igb_rx_checksum_adv(rx_ring, staterr, skb);
Auke Kok9d5c8242008-01-24 02:22:38 -08005770
5771 skb->protocol = eth_type_trans(skb, netdev);
Alexander Duyck047e0032009-10-27 15:49:27 +00005772 skb_record_rx_queue(skb, rx_ring->queue_index);
Auke Kok9d5c8242008-01-24 02:22:38 -08005773
Alexander Duyck047e0032009-10-27 15:49:27 +00005774 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5775 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5776
5777 igb_receive_skb(q_vector, skb, vlan_tag);
Auke Kok9d5c8242008-01-24 02:22:38 -08005778
Auke Kok9d5c8242008-01-24 02:22:38 -08005779next_desc:
5780 rx_desc->wb.upper.status_error = 0;
5781
5782 /* return some buffers to hardware, one at a time is too slow */
5783 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
Mitch Williams3b644cf2008-06-27 10:59:48 -07005784 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005785 cleaned_count = 0;
5786 }
5787
5788 /* use prefetched values */
5789 rx_desc = next_rxd;
5790 buffer_info = next_buffer;
Auke Kok9d5c8242008-01-24 02:22:38 -08005791 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5792 }
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005793
Auke Kok9d5c8242008-01-24 02:22:38 -08005794 rx_ring->next_to_clean = i;
Alexander Duyckc493ea42009-03-20 00:16:50 +00005795 cleaned_count = igb_desc_unused(rx_ring);
Auke Kok9d5c8242008-01-24 02:22:38 -08005796
5797 if (cleaned_count)
Mitch Williams3b644cf2008-06-27 10:59:48 -07005798 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
Auke Kok9d5c8242008-01-24 02:22:38 -08005799
5800 rx_ring->total_packets += total_packets;
5801 rx_ring->total_bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005802 u64_stats_update_begin(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005803 rx_ring->rx_stats.packets += total_packets;
5804 rx_ring->rx_stats.bytes += total_bytes;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005805 u64_stats_update_end(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005806 return cleaned;
5807}
5808
Auke Kok9d5c8242008-01-24 02:22:38 -08005809/**
5810 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5811 * @adapter: address of board private structure
5812 **/
Alexander Duyckd7ee5b32009-10-27 15:54:23 +00005813void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
Auke Kok9d5c8242008-01-24 02:22:38 -08005814{
Alexander Duycke694e962009-10-27 15:53:06 +00005815 struct net_device *netdev = rx_ring->netdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08005816 union e1000_adv_rx_desc *rx_desc;
5817 struct igb_buffer *buffer_info;
5818 struct sk_buff *skb;
5819 unsigned int i;
Alexander Duyckdb761762009-02-06 23:15:25 +00005820 int bufsz;
Auke Kok9d5c8242008-01-24 02:22:38 -08005821
5822 i = rx_ring->next_to_use;
5823 buffer_info = &rx_ring->buffer_info[i];
5824
Alexander Duyck4c844852009-10-27 15:52:07 +00005825 bufsz = rx_ring->rx_buffer_len;
Alexander Duyckdb761762009-02-06 23:15:25 +00005826
Auke Kok9d5c8242008-01-24 02:22:38 -08005827 while (cleaned_count--) {
5828 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5829
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005830 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005831 if (!buffer_info->page) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005832 buffer_info->page = netdev_alloc_page(netdev);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005833 if (unlikely(!buffer_info->page)) {
5834 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005835 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005836 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005837 goto no_buffers;
5838 }
5839 buffer_info->page_offset = 0;
5840 } else {
5841 buffer_info->page_offset ^= PAGE_SIZE / 2;
Auke Kok9d5c8242008-01-24 02:22:38 -08005842 }
5843 buffer_info->page_dma =
Alexander Duyck59d71982010-04-27 13:09:25 +00005844 dma_map_page(rx_ring->dev, buffer_info->page,
Alexander Duyckbf36c1a2008-07-08 15:11:40 -07005845 buffer_info->page_offset,
5846 PAGE_SIZE / 2,
Alexander Duyck59d71982010-04-27 13:09:25 +00005847 DMA_FROM_DEVICE);
5848 if (dma_mapping_error(rx_ring->dev,
5849 buffer_info->page_dma)) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005850 buffer_info->page_dma = 0;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005851 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005852 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005853 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005854 goto no_buffers;
5855 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005856 }
5857
Alexander Duyck42d07812009-10-27 23:51:16 +00005858 skb = buffer_info->skb;
5859 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00005860 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Eric Dumazet12dcd862010-10-15 17:27:10 +00005861 if (unlikely(!skb)) {
5862 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck04a5fcaa2009-10-27 15:52:27 +00005863 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005864 u64_stats_update_end(&rx_ring->rx_syncp);
Auke Kok9d5c8242008-01-24 02:22:38 -08005865 goto no_buffers;
5866 }
5867
Auke Kok9d5c8242008-01-24 02:22:38 -08005868 buffer_info->skb = skb;
Alexander Duyck42d07812009-10-27 23:51:16 +00005869 }
5870 if (!buffer_info->dma) {
Alexander Duyck59d71982010-04-27 13:09:25 +00005871 buffer_info->dma = dma_map_single(rx_ring->dev,
Alexander Duyck80785292009-10-27 15:51:47 +00005872 skb->data,
Auke Kok9d5c8242008-01-24 02:22:38 -08005873 bufsz,
Alexander Duyck59d71982010-04-27 13:09:25 +00005874 DMA_FROM_DEVICE);
5875 if (dma_mapping_error(rx_ring->dev,
5876 buffer_info->dma)) {
Alexander Duyck42d07812009-10-27 23:51:16 +00005877 buffer_info->dma = 0;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005878 u64_stats_update_begin(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005879 rx_ring->rx_stats.alloc_failed++;
Eric Dumazet12dcd862010-10-15 17:27:10 +00005880 u64_stats_update_end(&rx_ring->rx_syncp);
Alexander Duyck42d07812009-10-27 23:51:16 +00005881 goto no_buffers;
5882 }
Auke Kok9d5c8242008-01-24 02:22:38 -08005883 }
5884 /* Refresh the desc even if buffer_addrs didn't change because
5885 * each write-back erases this info. */
Alexander Duyck6ec43fe2009-10-27 15:50:48 +00005886 if (bufsz < IGB_RXBUFFER_1024) {
Auke Kok9d5c8242008-01-24 02:22:38 -08005887 rx_desc->read.pkt_addr =
5888 cpu_to_le64(buffer_info->page_dma);
5889 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5890 } else {
Alexander Duyck42d07812009-10-27 23:51:16 +00005891 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
Auke Kok9d5c8242008-01-24 02:22:38 -08005892 rx_desc->read.hdr_addr = 0;
5893 }
5894
5895 i++;
5896 if (i == rx_ring->count)
5897 i = 0;
5898 buffer_info = &rx_ring->buffer_info[i];
5899 }
5900
5901no_buffers:
5902 if (rx_ring->next_to_use != i) {
5903 rx_ring->next_to_use = i;
5904 if (i == 0)
5905 i = (rx_ring->count - 1);
5906 else
5907 i--;
5908
5909 /* Force memory writes to complete before letting h/w
5910 * know there are new descriptors to fetch. (Only
5911 * applicable for weak-ordered memory model archs,
5912 * such as IA-64). */
5913 wmb();
Alexander Duyckfce99e32009-10-27 15:51:27 +00005914 writel(i, rx_ring->tail);
Auke Kok9d5c8242008-01-24 02:22:38 -08005915 }
5916}
5917
5918/**
5919 * igb_mii_ioctl -
5920 * @netdev:
5921 * @ifreq:
5922 * @cmd:
5923 **/
5924static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5925{
5926 struct igb_adapter *adapter = netdev_priv(netdev);
5927 struct mii_ioctl_data *data = if_mii(ifr);
5928
5929 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5930 return -EOPNOTSUPP;
5931
5932 switch (cmd) {
5933 case SIOCGMIIPHY:
5934 data->phy_id = adapter->hw.phy.addr;
5935 break;
5936 case SIOCGMIIREG:
Alexander Duyckf5f4cf02008-11-21 21:30:24 -08005937 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5938 &data->val_out))
Auke Kok9d5c8242008-01-24 02:22:38 -08005939 return -EIO;
5940 break;
5941 case SIOCSMIIREG:
5942 default:
5943 return -EOPNOTSUPP;
5944 }
5945 return 0;
5946}
5947
5948/**
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005949 * igb_hwtstamp_ioctl - control hardware time stamping
5950 * @netdev:
5951 * @ifreq:
5952 * @cmd:
5953 *
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005954 * Outgoing time stamping can be enabled and disabled. Play nice and
5955 * disable it when requested, although it shouldn't case any overhead
5956 * when no packet needs it. At most one packet in the queue may be
5957 * marked for time stamping, otherwise it would be impossible to tell
5958 * for sure to which packet the hardware time stamp belongs.
5959 *
5960 * Incoming time stamping has to be configured via the hardware
5961 * filters. Not all combinations are supported, in particular event
5962 * type has to be specified. Matching the kind of event packet is
5963 * not supported, with the exception of "all V2 events regardless of
5964 * level 2 or 4".
5965 *
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005966 **/
5967static int igb_hwtstamp_ioctl(struct net_device *netdev,
5968 struct ifreq *ifr, int cmd)
5969{
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005970 struct igb_adapter *adapter = netdev_priv(netdev);
5971 struct e1000_hw *hw = &adapter->hw;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005972 struct hwtstamp_config config;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005973 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5974 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005975 u32 tsync_rx_cfg = 0;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005976 bool is_l4 = false;
5977 bool is_l2 = false;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005978 u32 regval;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005979
5980 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5981 return -EFAULT;
5982
5983 /* reserved for future extensions */
5984 if (config.flags)
5985 return -EINVAL;
5986
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005987 switch (config.tx_type) {
5988 case HWTSTAMP_TX_OFF:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005989 tsync_tx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005990 case HWTSTAMP_TX_ON:
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005991 break;
5992 default:
5993 return -ERANGE;
5994 }
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00005995
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005996 switch (config.rx_filter) {
5997 case HWTSTAMP_FILTER_NONE:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00005998 tsync_rx_ctl = 0;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00005999 break;
6000 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6001 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6002 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6003 case HWTSTAMP_FILTER_ALL:
6004 /*
6005 * register TSYNCRXCFG must be set, therefore it is not
6006 * possible to time stamp both Sync and Delay_Req messages
6007 * => fall back to time stamping all packets
6008 */
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006009 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006010 config.rx_filter = HWTSTAMP_FILTER_ALL;
6011 break;
6012 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006013 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006014 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006015 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006016 break;
6017 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006018 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006019 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006020 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006021 break;
6022 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6023 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006024 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006025 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006026 is_l2 = true;
6027 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006028 config.rx_filter = HWTSTAMP_FILTER_SOME;
6029 break;
6030 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6031 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006032 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006033 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006034 is_l2 = true;
6035 is_l4 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006036 config.rx_filter = HWTSTAMP_FILTER_SOME;
6037 break;
6038 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6039 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6040 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006041 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006042 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006043 is_l2 = true;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006044 break;
6045 default:
6046 return -ERANGE;
6047 }
6048
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006049 if (hw->mac.type == e1000_82575) {
6050 if (tsync_rx_ctl | tsync_tx_ctl)
6051 return -EINVAL;
6052 return 0;
6053 }
6054
Nick Nunley757b77e2010-03-26 11:36:47 +00006055 /*
6056 * Per-packet timestamping only works if all packets are
6057 * timestamped, so enable timestamping in all packets as
6058 * long as one rx filter was configured.
6059 */
6060 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6061 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6062 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6063 }
6064
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006065 /* enable/disable TX */
6066 regval = rd32(E1000_TSYNCTXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006067 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6068 regval |= tsync_tx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006069 wr32(E1000_TSYNCTXCTL, regval);
6070
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006071 /* enable/disable RX */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006072 regval = rd32(E1000_TSYNCRXCTL);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006073 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6074 regval |= tsync_rx_ctl;
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006075 wr32(E1000_TSYNCRXCTL, regval);
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006076
6077 /* define which PTP packets are time stamped */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006078 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6079
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006080 /* define ethertype filter for timestamped packets */
6081 if (is_l2)
6082 wr32(E1000_ETQF(3),
6083 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6084 E1000_ETQF_1588 | /* enable timestamping */
6085 ETH_P_1588)); /* 1588 eth protocol type */
6086 else
6087 wr32(E1000_ETQF(3), 0);
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006088
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006089#define PTP_PORT 319
6090 /* L4 Queue Filter[3]: filter by destination port and protocol */
6091 if (is_l4) {
6092 u32 ftqf = (IPPROTO_UDP /* UDP */
6093 | E1000_FTQF_VF_BP /* VF not compared */
6094 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6095 | E1000_FTQF_MASK); /* mask all inputs */
6096 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006097
Alexander Duyckc5b9bd52009-10-27 23:46:01 +00006098 wr32(E1000_IMIR(3), htons(PTP_PORT));
6099 wr32(E1000_IMIREXT(3),
6100 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6101 if (hw->mac.type == e1000_82576) {
6102 /* enable source port check */
6103 wr32(E1000_SPQF(3), htons(PTP_PORT));
6104 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6105 }
6106 wr32(E1000_FTQF(3), ftqf);
6107 } else {
6108 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6109 }
Patrick Ohly33af6bc2009-02-12 05:03:43 +00006110 wrfl();
6111
6112 adapter->hwtstamp_config = config;
6113
6114 /* clear TX/RX time stamp registers, just to be sure */
6115 regval = rd32(E1000_TXSTMPH);
6116 regval = rd32(E1000_RXSTMPH);
6117
6118 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6119 -EFAULT : 0;
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006120}
6121
6122/**
Auke Kok9d5c8242008-01-24 02:22:38 -08006123 * igb_ioctl -
6124 * @netdev:
6125 * @ifreq:
6126 * @cmd:
6127 **/
6128static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6129{
6130 switch (cmd) {
6131 case SIOCGMIIPHY:
6132 case SIOCGMIIREG:
6133 case SIOCSMIIREG:
6134 return igb_mii_ioctl(netdev, ifr, cmd);
Patrick Ohlyc6cb0902009-02-12 05:03:42 +00006135 case SIOCSHWTSTAMP:
6136 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
Auke Kok9d5c8242008-01-24 02:22:38 -08006137 default:
6138 return -EOPNOTSUPP;
6139 }
6140}
6141
Alexander Duyck009bc062009-07-23 18:08:35 +00006142s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6143{
6144 struct igb_adapter *adapter = hw->back;
6145 u16 cap_offset;
6146
6147 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
6148 if (!cap_offset)
6149 return -E1000_ERR_CONFIG;
6150
6151 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6152
6153 return 0;
6154}
6155
6156s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6157{
6158 struct igb_adapter *adapter = hw->back;
6159 u16 cap_offset;
6160
6161 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
6162 if (!cap_offset)
6163 return -E1000_ERR_CONFIG;
6164
6165 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6166
6167 return 0;
6168}
6169
Auke Kok9d5c8242008-01-24 02:22:38 -08006170static void igb_vlan_rx_register(struct net_device *netdev,
6171 struct vlan_group *grp)
6172{
6173 struct igb_adapter *adapter = netdev_priv(netdev);
6174 struct e1000_hw *hw = &adapter->hw;
6175 u32 ctrl, rctl;
6176
6177 igb_irq_disable(adapter);
6178 adapter->vlgrp = grp;
6179
6180 if (grp) {
6181 /* enable VLAN tag insert/strip */
6182 ctrl = rd32(E1000_CTRL);
6183 ctrl |= E1000_CTRL_VME;
6184 wr32(E1000_CTRL, ctrl);
6185
Alexander Duyck51466232009-10-27 23:47:35 +00006186 /* Disable CFI check */
Auke Kok9d5c8242008-01-24 02:22:38 -08006187 rctl = rd32(E1000_RCTL);
Auke Kok9d5c8242008-01-24 02:22:38 -08006188 rctl &= ~E1000_RCTL_CFIEN;
6189 wr32(E1000_RCTL, rctl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006190 } else {
6191 /* disable VLAN tag insert/strip */
6192 ctrl = rd32(E1000_CTRL);
6193 ctrl &= ~E1000_CTRL_VME;
6194 wr32(E1000_CTRL, ctrl);
Auke Kok9d5c8242008-01-24 02:22:38 -08006195 }
6196
Alexander Duycke1739522009-02-19 20:39:44 -08006197 igb_rlpml_set(adapter);
6198
Auke Kok9d5c8242008-01-24 02:22:38 -08006199 if (!test_bit(__IGB_DOWN, &adapter->state))
6200 igb_irq_enable(adapter);
6201}
6202
6203static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6204{
6205 struct igb_adapter *adapter = netdev_priv(netdev);
6206 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006207 int pf_id = adapter->vfs_allocated_count;
Auke Kok9d5c8242008-01-24 02:22:38 -08006208
Alexander Duyck51466232009-10-27 23:47:35 +00006209 /* attempt to add filter to vlvf array */
6210 igb_vlvf_set(adapter, vid, true, pf_id);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006211
Alexander Duyck51466232009-10-27 23:47:35 +00006212 /* add the filter since PF can receive vlans w/o entry in vlvf */
6213 igb_vfta_set(hw, vid, true);
Auke Kok9d5c8242008-01-24 02:22:38 -08006214}
6215
6216static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6217{
6218 struct igb_adapter *adapter = netdev_priv(netdev);
6219 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006220 int pf_id = adapter->vfs_allocated_count;
Alexander Duyck51466232009-10-27 23:47:35 +00006221 s32 err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006222
6223 igb_irq_disable(adapter);
6224 vlan_group_set_device(adapter->vlgrp, vid, NULL);
6225
6226 if (!test_bit(__IGB_DOWN, &adapter->state))
6227 igb_irq_enable(adapter);
6228
Alexander Duyck51466232009-10-27 23:47:35 +00006229 /* remove vlan from VLVF table array */
6230 err = igb_vlvf_set(adapter, vid, false, pf_id);
Auke Kok9d5c8242008-01-24 02:22:38 -08006231
Alexander Duyck51466232009-10-27 23:47:35 +00006232 /* if vid was not present in VLVF just remove it from table */
6233 if (err)
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006234 igb_vfta_set(hw, vid, false);
Auke Kok9d5c8242008-01-24 02:22:38 -08006235}
6236
6237static void igb_restore_vlan(struct igb_adapter *adapter)
6238{
6239 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
6240
6241 if (adapter->vlgrp) {
6242 u16 vid;
Jesse Grossb7381272010-10-20 13:56:02 +00006243 for (vid = 0; vid < VLAN_N_VID; vid++) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006244 if (!vlan_group_get_device(adapter->vlgrp, vid))
6245 continue;
6246 igb_vlan_rx_add_vid(adapter->netdev, vid);
6247 }
6248 }
6249}
6250
6251int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6252{
Alexander Duyck090b1792009-10-27 23:51:55 +00006253 struct pci_dev *pdev = adapter->pdev;
Auke Kok9d5c8242008-01-24 02:22:38 -08006254 struct e1000_mac_info *mac = &adapter->hw.mac;
6255
6256 mac->autoneg = 0;
6257
Carolyn Wybornycd2638a2010-10-12 22:27:02 +00006258 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6259 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6260 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
6261 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6262 return -EINVAL;
6263 }
6264
Auke Kok9d5c8242008-01-24 02:22:38 -08006265 switch (spddplx) {
6266 case SPEED_10 + DUPLEX_HALF:
6267 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6268 break;
6269 case SPEED_10 + DUPLEX_FULL:
6270 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6271 break;
6272 case SPEED_100 + DUPLEX_HALF:
6273 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6274 break;
6275 case SPEED_100 + DUPLEX_FULL:
6276 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6277 break;
6278 case SPEED_1000 + DUPLEX_FULL:
6279 mac->autoneg = 1;
6280 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6281 break;
6282 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6283 default:
Alexander Duyck090b1792009-10-27 23:51:55 +00006284 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
Auke Kok9d5c8242008-01-24 02:22:38 -08006285 return -EINVAL;
6286 }
6287 return 0;
6288}
6289
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006290static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
Auke Kok9d5c8242008-01-24 02:22:38 -08006291{
6292 struct net_device *netdev = pci_get_drvdata(pdev);
6293 struct igb_adapter *adapter = netdev_priv(netdev);
6294 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck2d064c02008-07-08 15:10:12 -07006295 u32 ctrl, rctl, status;
Auke Kok9d5c8242008-01-24 02:22:38 -08006296 u32 wufc = adapter->wol;
6297#ifdef CONFIG_PM
6298 int retval = 0;
6299#endif
6300
6301 netif_device_detach(netdev);
6302
Alexander Duycka88f10e2008-07-08 15:13:38 -07006303 if (netif_running(netdev))
6304 igb_close(netdev);
6305
Alexander Duyck047e0032009-10-27 15:49:27 +00006306 igb_clear_interrupt_scheme(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006307
6308#ifdef CONFIG_PM
6309 retval = pci_save_state(pdev);
6310 if (retval)
6311 return retval;
6312#endif
6313
6314 status = rd32(E1000_STATUS);
6315 if (status & E1000_STATUS_LU)
6316 wufc &= ~E1000_WUFC_LNKC;
6317
6318 if (wufc) {
6319 igb_setup_rctl(adapter);
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006320 igb_set_rx_mode(netdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006321
6322 /* turn on all-multi mode if wake on multicast is enabled */
6323 if (wufc & E1000_WUFC_MC) {
6324 rctl = rd32(E1000_RCTL);
6325 rctl |= E1000_RCTL_MPE;
6326 wr32(E1000_RCTL, rctl);
6327 }
6328
6329 ctrl = rd32(E1000_CTRL);
6330 /* advertise wake from D3Cold */
6331 #define E1000_CTRL_ADVD3WUC 0x00100000
6332 /* phy power management enable */
6333 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6334 ctrl |= E1000_CTRL_ADVD3WUC;
6335 wr32(E1000_CTRL, ctrl);
6336
Auke Kok9d5c8242008-01-24 02:22:38 -08006337 /* Allow time for pending master requests to run */
Alexander Duyck330a6d62009-10-27 23:51:35 +00006338 igb_disable_pcie_master(hw);
Auke Kok9d5c8242008-01-24 02:22:38 -08006339
6340 wr32(E1000_WUC, E1000_WUC_PME_EN);
6341 wr32(E1000_WUFC, wufc);
Auke Kok9d5c8242008-01-24 02:22:38 -08006342 } else {
6343 wr32(E1000_WUC, 0);
6344 wr32(E1000_WUFC, 0);
Auke Kok9d5c8242008-01-24 02:22:38 -08006345 }
6346
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006347 *enable_wake = wufc || adapter->en_mng_pt;
6348 if (!*enable_wake)
Nick Nunley88a268c2010-02-17 01:01:59 +00006349 igb_power_down_link(adapter);
6350 else
6351 igb_power_up_link(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006352
6353 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6354 * would have already happened in close and is redundant. */
6355 igb_release_hw_control(adapter);
6356
6357 pci_disable_device(pdev);
6358
Auke Kok9d5c8242008-01-24 02:22:38 -08006359 return 0;
6360}
6361
6362#ifdef CONFIG_PM
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006363static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6364{
6365 int retval;
6366 bool wake;
6367
6368 retval = __igb_shutdown(pdev, &wake);
6369 if (retval)
6370 return retval;
6371
6372 if (wake) {
6373 pci_prepare_to_sleep(pdev);
6374 } else {
6375 pci_wake_from_d3(pdev, false);
6376 pci_set_power_state(pdev, PCI_D3hot);
6377 }
6378
6379 return 0;
6380}
6381
Auke Kok9d5c8242008-01-24 02:22:38 -08006382static int igb_resume(struct pci_dev *pdev)
6383{
6384 struct net_device *netdev = pci_get_drvdata(pdev);
6385 struct igb_adapter *adapter = netdev_priv(netdev);
6386 struct e1000_hw *hw = &adapter->hw;
6387 u32 err;
6388
6389 pci_set_power_state(pdev, PCI_D0);
6390 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006391 pci_save_state(pdev);
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006392
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006393 err = pci_enable_device_mem(pdev);
Auke Kok9d5c8242008-01-24 02:22:38 -08006394 if (err) {
6395 dev_err(&pdev->dev,
6396 "igb: Cannot enable PCI device from suspend\n");
6397 return err;
6398 }
6399 pci_set_master(pdev);
6400
6401 pci_enable_wake(pdev, PCI_D3hot, 0);
6402 pci_enable_wake(pdev, PCI_D3cold, 0);
6403
Alexander Duyck047e0032009-10-27 15:49:27 +00006404 if (igb_init_interrupt_scheme(adapter)) {
Alexander Duycka88f10e2008-07-08 15:13:38 -07006405 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6406 return -ENOMEM;
Auke Kok9d5c8242008-01-24 02:22:38 -08006407 }
6408
Auke Kok9d5c8242008-01-24 02:22:38 -08006409 igb_reset(adapter);
Alexander Duycka8564f02009-02-06 23:21:10 +00006410
6411 /* let the f/w know that the h/w is now under the control of the
6412 * driver. */
6413 igb_get_hw_control(adapter);
6414
Auke Kok9d5c8242008-01-24 02:22:38 -08006415 wr32(E1000_WUS, ~0);
6416
Alexander Duycka88f10e2008-07-08 15:13:38 -07006417 if (netif_running(netdev)) {
6418 err = igb_open(netdev);
6419 if (err)
6420 return err;
6421 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006422
6423 netif_device_attach(netdev);
6424
Auke Kok9d5c8242008-01-24 02:22:38 -08006425 return 0;
6426}
6427#endif
6428
6429static void igb_shutdown(struct pci_dev *pdev)
6430{
Rafael J. Wysocki3fe7c4c2009-03-31 21:23:50 +00006431 bool wake;
6432
6433 __igb_shutdown(pdev, &wake);
6434
6435 if (system_state == SYSTEM_POWER_OFF) {
6436 pci_wake_from_d3(pdev, wake);
6437 pci_set_power_state(pdev, PCI_D3hot);
6438 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006439}
6440
6441#ifdef CONFIG_NET_POLL_CONTROLLER
6442/*
6443 * Polling 'interrupt' - used by things like netconsole to send skbs
6444 * without having to re-enable interrupts. It's not called while
6445 * the interrupt routine is executing.
6446 */
6447static void igb_netpoll(struct net_device *netdev)
6448{
6449 struct igb_adapter *adapter = netdev_priv(netdev);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006450 struct e1000_hw *hw = &adapter->hw;
Auke Kok9d5c8242008-01-24 02:22:38 -08006451 int i;
Auke Kok9d5c8242008-01-24 02:22:38 -08006452
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006453 if (!adapter->msix_entries) {
Alexander Duyck047e0032009-10-27 15:49:27 +00006454 struct igb_q_vector *q_vector = adapter->q_vector[0];
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006455 igb_irq_disable(adapter);
Alexander Duyck047e0032009-10-27 15:49:27 +00006456 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006457 return;
6458 }
Alexander Duyck7dfc16f2008-07-08 15:10:46 -07006459
Alexander Duyck047e0032009-10-27 15:49:27 +00006460 for (i = 0; i < adapter->num_q_vectors; i++) {
6461 struct igb_q_vector *q_vector = adapter->q_vector[i];
6462 wr32(E1000_EIMC, q_vector->eims_value);
6463 napi_schedule(&q_vector->napi);
Alexander Duyckeebbbdb2009-02-06 23:19:29 +00006464 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006465}
6466#endif /* CONFIG_NET_POLL_CONTROLLER */
6467
6468/**
6469 * igb_io_error_detected - called when PCI error is detected
6470 * @pdev: Pointer to PCI device
6471 * @state: The current pci connection state
6472 *
6473 * This function is called after a PCI bus error affecting
6474 * this device has been detected.
6475 */
6476static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6477 pci_channel_state_t state)
6478{
6479 struct net_device *netdev = pci_get_drvdata(pdev);
6480 struct igb_adapter *adapter = netdev_priv(netdev);
6481
6482 netif_device_detach(netdev);
6483
Alexander Duyck59ed6ee2009-06-30 12:46:34 +00006484 if (state == pci_channel_io_perm_failure)
6485 return PCI_ERS_RESULT_DISCONNECT;
6486
Auke Kok9d5c8242008-01-24 02:22:38 -08006487 if (netif_running(netdev))
6488 igb_down(adapter);
6489 pci_disable_device(pdev);
6490
6491 /* Request a slot slot reset. */
6492 return PCI_ERS_RESULT_NEED_RESET;
6493}
6494
6495/**
6496 * igb_io_slot_reset - called after the pci bus has been reset.
6497 * @pdev: Pointer to PCI device
6498 *
6499 * Restart the card from scratch, as if from a cold-boot. Implementation
6500 * resembles the first-half of the igb_resume routine.
6501 */
6502static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6503{
6504 struct net_device *netdev = pci_get_drvdata(pdev);
6505 struct igb_adapter *adapter = netdev_priv(netdev);
6506 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck40a914f2008-11-27 00:24:37 -08006507 pci_ers_result_t result;
Taku Izumi42bfd33a2008-06-20 12:10:30 +09006508 int err;
Auke Kok9d5c8242008-01-24 02:22:38 -08006509
Alexander Duyckaed5dec2009-02-06 23:16:04 +00006510 if (pci_enable_device_mem(pdev)) {
Auke Kok9d5c8242008-01-24 02:22:38 -08006511 dev_err(&pdev->dev,
6512 "Cannot re-enable PCI device after reset.\n");
Alexander Duyck40a914f2008-11-27 00:24:37 -08006513 result = PCI_ERS_RESULT_DISCONNECT;
6514 } else {
6515 pci_set_master(pdev);
6516 pci_restore_state(pdev);
Nick Nunleyb94f2d72010-02-17 01:02:19 +00006517 pci_save_state(pdev);
Alexander Duyck40a914f2008-11-27 00:24:37 -08006518
6519 pci_enable_wake(pdev, PCI_D3hot, 0);
6520 pci_enable_wake(pdev, PCI_D3cold, 0);
6521
6522 igb_reset(adapter);
6523 wr32(E1000_WUS, ~0);
6524 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9d5c8242008-01-24 02:22:38 -08006525 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006526
Jeff Kirsherea943d42008-12-11 20:34:19 -08006527 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6528 if (err) {
6529 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6530 "failed 0x%0x\n", err);
6531 /* non-fatal, continue */
6532 }
Auke Kok9d5c8242008-01-24 02:22:38 -08006533
Alexander Duyck40a914f2008-11-27 00:24:37 -08006534 return result;
Auke Kok9d5c8242008-01-24 02:22:38 -08006535}
6536
6537/**
6538 * igb_io_resume - called when traffic can start flowing again.
6539 * @pdev: Pointer to PCI device
6540 *
6541 * This callback is called when the error recovery driver tells us that
6542 * its OK to resume normal operation. Implementation resembles the
6543 * second-half of the igb_resume routine.
6544 */
6545static void igb_io_resume(struct pci_dev *pdev)
6546{
6547 struct net_device *netdev = pci_get_drvdata(pdev);
6548 struct igb_adapter *adapter = netdev_priv(netdev);
6549
Auke Kok9d5c8242008-01-24 02:22:38 -08006550 if (netif_running(netdev)) {
6551 if (igb_up(adapter)) {
6552 dev_err(&pdev->dev, "igb_up failed after reset\n");
6553 return;
6554 }
6555 }
6556
6557 netif_device_attach(netdev);
6558
6559 /* let the f/w know that the h/w is now under the control of the
6560 * driver. */
6561 igb_get_hw_control(adapter);
Auke Kok9d5c8242008-01-24 02:22:38 -08006562}
6563
Alexander Duyck26ad9172009-10-05 06:32:49 +00006564static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6565 u8 qsel)
6566{
6567 u32 rar_low, rar_high;
6568 struct e1000_hw *hw = &adapter->hw;
6569
6570 /* HW expects these in little endian so we reverse the byte order
6571 * from network order (big endian) to little endian
6572 */
6573 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6574 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6575 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6576
6577 /* Indicate to hardware the Address is Valid. */
6578 rar_high |= E1000_RAH_AV;
6579
6580 if (hw->mac.type == e1000_82575)
6581 rar_high |= E1000_RAH_POOL_1 * qsel;
6582 else
6583 rar_high |= E1000_RAH_POOL_1 << qsel;
6584
6585 wr32(E1000_RAL(index), rar_low);
6586 wrfl();
6587 wr32(E1000_RAH(index), rar_high);
6588 wrfl();
6589}
6590
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006591static int igb_set_vf_mac(struct igb_adapter *adapter,
6592 int vf, unsigned char *mac_addr)
6593{
6594 struct e1000_hw *hw = &adapter->hw;
Alexander Duyckff41f8d2009-09-03 14:48:56 +00006595 /* VF MAC addresses start at end of receive addresses and moves
6596 * torwards the first, as a result a collision should not be possible */
6597 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006598
Alexander Duyck37680112009-02-19 20:40:30 -08006599 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006600
Alexander Duyck26ad9172009-10-05 06:32:49 +00006601 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006602
6603 return 0;
6604}
6605
Williams, Mitch A8151d292010-02-10 01:44:24 +00006606static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6607{
6608 struct igb_adapter *adapter = netdev_priv(netdev);
6609 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6610 return -EINVAL;
6611 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6612 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6613 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6614 " change effective.");
6615 if (test_bit(__IGB_DOWN, &adapter->state)) {
6616 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6617 " but the PF device is not up.\n");
6618 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6619 " attempting to use the VF device.\n");
6620 }
6621 return igb_set_vf_mac(adapter, vf, mac);
6622}
6623
Lior Levy17dc5662011-02-08 02:28:46 +00006624static int igb_link_mbps(int internal_link_speed)
6625{
6626 switch (internal_link_speed) {
6627 case SPEED_100:
6628 return 100;
6629 case SPEED_1000:
6630 return 1000;
6631 default:
6632 return 0;
6633 }
6634}
6635
6636static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6637 int link_speed)
6638{
6639 int rf_dec, rf_int;
6640 u32 bcnrc_val;
6641
6642 if (tx_rate != 0) {
6643 /* Calculate the rate factor values to set */
6644 rf_int = link_speed / tx_rate;
6645 rf_dec = (link_speed - (rf_int * tx_rate));
6646 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6647
6648 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6649 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6650 E1000_RTTBCNRC_RF_INT_MASK);
6651 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6652 } else {
6653 bcnrc_val = 0;
6654 }
6655
6656 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6657 wr32(E1000_RTTBCNRC, bcnrc_val);
6658}
6659
6660static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6661{
6662 int actual_link_speed, i;
6663 bool reset_rate = false;
6664
6665 /* VF TX rate limit was not set or not supported */
6666 if ((adapter->vf_rate_link_speed == 0) ||
6667 (adapter->hw.mac.type != e1000_82576))
6668 return;
6669
6670 actual_link_speed = igb_link_mbps(adapter->link_speed);
6671 if (actual_link_speed != adapter->vf_rate_link_speed) {
6672 reset_rate = true;
6673 adapter->vf_rate_link_speed = 0;
6674 dev_info(&adapter->pdev->dev,
6675 "Link speed has been changed. VF Transmit "
6676 "rate is disabled\n");
6677 }
6678
6679 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6680 if (reset_rate)
6681 adapter->vf_data[i].tx_rate = 0;
6682
6683 igb_set_vf_rate_limit(&adapter->hw, i,
6684 adapter->vf_data[i].tx_rate,
6685 actual_link_speed);
6686 }
6687}
6688
Williams, Mitch A8151d292010-02-10 01:44:24 +00006689static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6690{
Lior Levy17dc5662011-02-08 02:28:46 +00006691 struct igb_adapter *adapter = netdev_priv(netdev);
6692 struct e1000_hw *hw = &adapter->hw;
6693 int actual_link_speed;
6694
6695 if (hw->mac.type != e1000_82576)
6696 return -EOPNOTSUPP;
6697
6698 actual_link_speed = igb_link_mbps(adapter->link_speed);
6699 if ((vf >= adapter->vfs_allocated_count) ||
6700 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6701 (tx_rate < 0) || (tx_rate > actual_link_speed))
6702 return -EINVAL;
6703
6704 adapter->vf_rate_link_speed = actual_link_speed;
6705 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6706 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6707
6708 return 0;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006709}
6710
6711static int igb_ndo_get_vf_config(struct net_device *netdev,
6712 int vf, struct ifla_vf_info *ivi)
6713{
6714 struct igb_adapter *adapter = netdev_priv(netdev);
6715 if (vf >= adapter->vfs_allocated_count)
6716 return -EINVAL;
6717 ivi->vf = vf;
6718 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
Lior Levy17dc5662011-02-08 02:28:46 +00006719 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
Williams, Mitch A8151d292010-02-10 01:44:24 +00006720 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6721 ivi->qos = adapter->vf_data[vf].pf_qos;
6722 return 0;
6723}
6724
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006725static void igb_vmm_control(struct igb_adapter *adapter)
6726{
6727 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck10d8e902009-10-27 15:54:04 +00006728 u32 reg;
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006729
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006730 switch (hw->mac.type) {
6731 case e1000_82575:
6732 default:
6733 /* replication is not supported for 82575 */
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006734 return;
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006735 case e1000_82576:
6736 /* notify HW that the MAC is adding vlan tags */
6737 reg = rd32(E1000_DTXCTL);
6738 reg |= E1000_DTXCTL_VLAN_ADDED;
6739 wr32(E1000_DTXCTL, reg);
6740 case e1000_82580:
6741 /* enable replication vlan tag stripping */
6742 reg = rd32(E1000_RPLOLR);
6743 reg |= E1000_RPLOLR_STRVLAN;
6744 wr32(E1000_RPLOLR, reg);
Alexander Duyckd2ba2ed2010-03-22 14:08:06 +00006745 case e1000_i350:
6746 /* none of the above registers are supported by i350 */
Alexander Duyck52a1dd42010-03-22 14:07:46 +00006747 break;
6748 }
Alexander Duyck10d8e902009-10-27 15:54:04 +00006749
Alexander Duyckd4960302009-10-27 15:53:45 +00006750 if (adapter->vfs_allocated_count) {
6751 igb_vmdq_set_loopback_pf(hw, true);
6752 igb_vmdq_set_replication_pf(hw, true);
Greg Rose13800462010-11-06 02:08:26 +00006753 igb_vmdq_set_anti_spoofing_pf(hw, true,
6754 adapter->vfs_allocated_count);
Alexander Duyckd4960302009-10-27 15:53:45 +00006755 } else {
6756 igb_vmdq_set_loopback_pf(hw, false);
6757 igb_vmdq_set_replication_pf(hw, false);
6758 }
Alexander Duyck4ae196d2009-02-19 20:40:07 -08006759}
6760
Auke Kok9d5c8242008-01-24 02:22:38 -08006761/* igb_main.c */