blob: 222bfaff4622959df30eb7b89f25a2f7764dcc32 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000049static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000148 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800149 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000159static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Jiri Pirko5622e402011-07-21 03:26:31 +0000164static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
Jiri Pirko8e586132011-12-08 19:52:37 -0500169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
Auke Kok6fdfef12006-06-27 09:06:36 -0700173#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int e1000_resume(struct pci_dev *pdev);
176#endif
Auke Kokc653e632006-05-23 13:35:57 -0700177static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void e1000_netpoll (struct net_device *netdev);
182#endif
183
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100184#define COPYBREAK_DEFAULT 256
185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186module_param(copybreak, uint, 0644);
187MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
189
Auke Kok90267292006-06-08 09:30:24 -0700190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193static void e1000_io_resume(struct pci_dev *pdev);
194
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700195static const struct pci_error_handlers e1000_err_handler = {
Auke Kok90267292006-06-08 09:30:24 -0700196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
199};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
Auke Kokc4e24f02006-09-27 12:53:19 -0700206#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300207 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700209 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif
Auke Kok90267292006-06-08 09:30:24 -0700211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213};
214
215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_VERSION);
219
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222module_param(debug, int, 0);
223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
228 *
229 **/
230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231{
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
234}
235
236/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 * e1000_init_module - Driver Registration Routine
238 *
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
241 **/
242
Joe Perches64798842008-07-11 15:17:02 -0700243static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Emil Tantilov675ad472010-04-27 14:02:58 +0000248 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jeff Garzik29917622006-08-19 17:48:59 -0400250 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100251 if (copybreak != COPYBREAK_DEFAULT) {
252 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000253 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100254 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 return ret;
259}
260
261module_init(e1000_init_module);
262
263/**
264 * e1000_exit_module - Driver Exit Cleanup Routine
265 *
266 * e1000_exit_module is called just before the driver is removed
267 * from memory.
268 **/
269
Joe Perches64798842008-07-11 15:17:02 -0700270static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
Auke Kok2db10a02006-06-27 09:06:28 -0700277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000280 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700281 int irq_flags = IRQF_SHARED;
282 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700283
Auke Koke94bd232007-05-16 01:49:46 -0700284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700288 }
Auke Kok2db10a02006-06-27 09:06:28 -0700289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
304
Joe Perches64798842008-07-11 15:17:02 -0700305static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Joe Perches1dc32912008-07-11 15:17:08 -0700307 struct e1000_hw *hw = &adapter->hw;
308
309 ew32(IMC, ~0);
310 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 synchronize_irq(adapter->pdev->irq);
312}
313
314/**
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 **/
318
Joe Perches64798842008-07-11 15:17:02 -0700319static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Joe Perches1dc32912008-07-11 15:17:08 -0700321 struct e1000_hw *hw = &adapter->hw;
322
323 ew32(IMS, IMS_ENABLE_MASK);
324 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100326
Joe Perches64798842008-07-11 15:17:02 -0700327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700328{
Joe Perches1dc32912008-07-11 15:17:08 -0700329 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700330 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700331 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700332 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800333
Jiri Pirko5622e402011-07-21 03:26:31 +0000334 if (!e1000_vlan_used(adapter))
335 return;
336
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800341 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000342 } else {
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344 }
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346 (vid != old_vid) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
349 } else {
350 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700351 }
352}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800353
Joe Perches64798842008-07-11 15:17:02 -0700354static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500355{
Joe Perches1dc32912008-07-11 15:17:08 -0700356 struct e1000_hw *hw = &adapter->hw;
357
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500358 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700359 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500360
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
363
Joe Perches1dc32912008-07-11 15:17:08 -0700364 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500365 }
366}
367
Joe Perches64798842008-07-11 15:17:02 -0700368static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500369{
Joe Perches1dc32912008-07-11 15:17:08 -0700370 struct e1000_hw *hw = &adapter->hw;
371
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500372 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700373 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500374
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
377
Joe Perches1dc32912008-07-11 15:17:08 -0700378 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500379 }
380}
381
Auke Koke0aac5a2007-03-06 08:57:21 -0800382/**
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
385 **/
386static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700389 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Patrick McHardydb0ce502007-11-13 20:54:59 -0800391 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500394 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800402 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800407}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800408
Auke Koke0aac5a2007-03-06 08:57:21 -0800409int e1000_up(struct e1000_adapter *adapter)
410{
Joe Perches1dc32912008-07-11 15:17:08 -0700411 struct e1000_hw *hw = &adapter->hw;
412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700415
Auke Kok1314bbf2006-09-27 12:54:02 -0700416 clear_bit(__E1000_DOWN, &adapter->flags);
417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700419
Auke Koke0aac5a2007-03-06 08:57:21 -0800420 e1000_irq_enable(adapter);
421
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000422 netif_wake_queue(adapter->netdev);
423
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100424 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700425 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return 0;
427}
428
Auke Kok79f05bf2006-06-27 09:06:32 -0700429/**
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
432 *
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
436 *
437 **/
438
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700439void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700440{
Joe Perches1dc32912008-07-11 15:17:08 -0700441 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700442 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700443
444 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700445 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
Joe Perches1dc32912008-07-11 15:17:08 -0700448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700449 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700451 }
452}
453
454static void e1000_power_down_phy(struct e1000_adapter *adapter)
455{
Joe Perches1dc32912008-07-11 15:17:08 -0700456 struct e1000_hw *hw = &adapter->hw;
457
Bruce Allan61c25052006-09-27 12:53:54 -0700458 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700459 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700460 * (a) WoL is enabled
461 * (b) AMT is active
462 * (c) SoL/IDER session is active */
Joe Perches1dc32912008-07-11 15:17:08 -0700463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700465 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700466
Joe Perches1dc32912008-07-11 15:17:08 -0700467 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000472 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700479 goto out;
480 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700481 default:
482 goto out;
483 }
Joe Perches1dc32912008-07-11 15:17:08 -0700484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700485 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000487 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700488 }
Bruce Allan61c25052006-09-27 12:53:54 -0700489out:
490 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700491}
492
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
504}
505
Joe Perches64798842008-07-11 15:17:02 -0700506void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000508 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000510 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Auke Kok1314bbf2006-09-27 12:54:02 -0700512
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000513 /* disable receives in the hardware */
514 rctl = er32(RCTL);
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
517
Jesse Brandeburg51851072009-09-25 12:17:01 +0000518 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519
520 /* disable transmits in the hardware */
521 tctl = er32(TCTL);
522 tctl &= ~E1000_TCTL_EN;
523 ew32(TCTL, tctl);
524 /* flush both disables and wait for them to finish */
525 E1000_WRITE_FLUSH();
526 msleep(10);
527
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700528 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800531
Anupam Chandaab088532010-11-21 09:54:21 -0800532 /*
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000535 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800536 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000537 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
stephen hemminger38df7a32010-10-21 07:50:57 +0000548static void e1000_reinit_safe(struct e1000_adapter *adapter)
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000549{
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551 msleep(1);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000552 mutex_lock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000553 e1000_down(adapter);
554 e1000_up(adapter);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000555 mutex_unlock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000556 clear_bit(__E1000_RESETTING, &adapter->flags);
557}
558
Joe Perches64798842008-07-11 15:17:02 -0700559void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700560{
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000561 /* if rtnl_lock is not held the call path is bogus */
562 ASSERT_RTNL();
Auke Kok2db10a02006-06-27 09:06:28 -0700563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565 msleep(1);
566 e1000_down(adapter);
567 e1000_up(adapter);
568 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Joe Perches64798842008-07-11 15:17:02 -0700571void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Joe Perches1dc32912008-07-11 15:17:08 -0700573 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700575 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000576 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
580 */
581
Joe Perches1dc32912008-07-11 15:17:08 -0700582 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
585 case e1000_82543:
586 case e1000_82544:
587 case e1000_82540:
588 case e1000_82541:
589 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700590 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100591 pba = E1000_PBA_48K;
592 break;
593 case e1000_82545:
594 case e1000_82545_rev_3:
595 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000596 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100597 case e1000_82546_rev_3:
598 pba = E1000_PBA_48K;
599 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700600 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700601 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700602 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700603 pba = E1000_PBA_30K;
604 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100605 case e1000_undefined:
606 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700607 break;
608 }
609
Joe Perchesc3033b02008-03-21 11:06:25 -0700610 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100612 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700613
Joe Perches1dc32912008-07-11 15:17:08 -0700614 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
620 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100622 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700623 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700624
Bruce Allan018ea442006-12-15 10:39:45 +0100625 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000626 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100627 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000628 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
Joe Perches1dc32912008-07-11 15:17:08 -0700631 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 pba &= 0xffff;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000636 /*
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
639 */
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
642 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700643 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100644 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700647 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100648 min_rx_space >>= 10;
649
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
656
657 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700658 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
661 break;
662 default:
663 break;
664 }
665
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000668 if (pba < min_rx_space)
669 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700672
Joe Perches1dc32912008-07-11 15:17:08 -0700673 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000675 /*
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
684 */
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800687
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700691 hw->fc_send_xon = 1;
692 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700694 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700695 e1000_reset_hw(hw);
696 if (hw->mac_type >= e1000_82544)
697 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700698
Joe Perches1dc32912008-07-11 15:17:08 -0700699 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700700 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700701 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100702
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700704 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700705 hw->autoneg == 1 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700712 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Joe Perches1dc32912008-07-11 15:17:08 -0700718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700720
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500721 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Ben Hutchings1aa8b472012-07-10 10:56:59 +0000724/* Dump the eeprom for users having checksum issues */
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800726{
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000738 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800739 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
Emil Tantilov675ad472010-04-27 14:02:58 +0000749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800752
Emil Tantilov675ad472010-04-27 14:02:58 +0000753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
Emil Tantilov675ad472010-04-27 14:02:58 +0000757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800768
769 kfree(data);
770}
771
772/**
Taku Izumi81250292008-07-11 15:17:44 -0700773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
775 *
776 * Return true if an adapter needs ioport resources
777 **/
778static int e1000_is_need_ioport(struct pci_dev *pdev)
779{
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806}
807
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000808static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000810{
811 /*
812 * Since there is no support for separate rx/tx vlan accel
813 * enable/disable make sure tx flag is always in same state as rx.
814 */
815 if (features & NETIF_F_HW_VLAN_RX)
816 features |= NETIF_F_HW_VLAN_TX;
817 else
818 features &= ~NETIF_F_HW_VLAN_TX;
819
820 return features;
821}
822
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000823static int e1000_set_features(struct net_device *netdev,
824 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000825{
826 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000827 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000828
Jiri Pirko5622e402011-07-21 03:26:31 +0000829 if (changed & NETIF_F_HW_VLAN_RX)
830 e1000_vlan_mode(netdev, features);
831
Ben Greeare825b732012-04-04 06:01:29 +0000832 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000833 return 0;
834
Ben Greeare825b732012-04-04 06:01:29 +0000835 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000836 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
837
838 if (netif_running(netdev))
839 e1000_reinit_locked(adapter);
840 else
841 e1000_reset(adapter);
842
843 return 0;
844}
845
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800846static const struct net_device_ops e1000_netdev_ops = {
847 .ndo_open = e1000_open,
848 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800849 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800850 .ndo_get_stats = e1000_get_stats,
851 .ndo_set_rx_mode = e1000_set_rx_mode,
852 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000853 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800854 .ndo_change_mtu = e1000_change_mtu,
855 .ndo_do_ioctl = e1000_ioctl,
856 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800857 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
858 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
859#ifdef CONFIG_NET_POLL_CONTROLLER
860 .ndo_poll_controller = e1000_netpoll,
861#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000862 .ndo_fix_features = e1000_fix_features,
863 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800864};
865
Taku Izumi81250292008-07-11 15:17:44 -0700866/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000867 * e1000_init_hw_struct - initialize members of hw struct
868 * @adapter: board private struct
869 * @hw: structure used by e1000_hw.c
870 *
871 * Factors out initialization of the e1000_hw struct to its own function
872 * that can be called very early at init (just after struct allocation).
873 * Fields are initialized based on PCI device information and
874 * OS network device settings (MTU size).
875 * Returns negative error codes if MAC type setup fails.
876 */
877static int e1000_init_hw_struct(struct e1000_adapter *adapter,
878 struct e1000_hw *hw)
879{
880 struct pci_dev *pdev = adapter->pdev;
881
882 /* PCI config space info */
883 hw->vendor_id = pdev->vendor;
884 hw->device_id = pdev->device;
885 hw->subsystem_vendor_id = pdev->subsystem_vendor;
886 hw->subsystem_id = pdev->subsystem_device;
887 hw->revision_id = pdev->revision;
888
889 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
890
891 hw->max_frame_size = adapter->netdev->mtu +
892 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
893 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
894
895 /* identify the MAC */
896 if (e1000_set_mac_type(hw)) {
897 e_err(probe, "Unknown MAC Type\n");
898 return -EIO;
899 }
900
901 switch (hw->mac_type) {
902 default:
903 break;
904 case e1000_82541:
905 case e1000_82547:
906 case e1000_82541_rev_2:
907 case e1000_82547_rev_2:
908 hw->phy_init_script = 1;
909 break;
910 }
911
912 e1000_set_media_type(hw);
913 e1000_get_bus_info(hw);
914
915 hw->wait_autoneg_complete = false;
916 hw->tbi_compatibility_en = true;
917 hw->adaptive_ifs = true;
918
919 /* Copper options */
920
921 if (hw->media_type == e1000_media_type_copper) {
922 hw->mdix = AUTO_ALL_MODES;
923 hw->disable_polarity_correction = false;
924 hw->master_slave = E1000_MASTER_SLAVE;
925 }
926
927 return 0;
928}
929
930/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 * e1000_probe - Device Initialization Routine
932 * @pdev: PCI device information struct
933 * @ent: entry in e1000_pci_tbl
934 *
935 * Returns 0 on success, negative on failure
936 *
937 * e1000_probe initializes an adapter identified by a pci_dev structure.
938 * The OS initialization, configuring of the adapter private structure,
939 * and a hardware reset occur.
940 **/
Joe Perches1dc32912008-07-11 15:17:08 -0700941static int __devinit e1000_probe(struct pci_dev *pdev,
942 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
944 struct net_device *netdev;
945 struct e1000_adapter *adapter;
Joe Perches1dc32912008-07-11 15:17:08 -0700946 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 static int cards_found = 0;
Jesse Brandeburg120cd572006-08-31 14:27:46 -0700949 static int global_quad_port_a = 0; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700950 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700951 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000952 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700953 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700954 int bars, need_ioport;
Joe Perches0795af52007-10-03 17:59:30 -0700955
Taku Izumi81250292008-07-11 15:17:44 -0700956 /* do not allocate ioport bars when not needed */
957 need_ioport = e1000_is_need_ioport(pdev);
958 if (need_ioport) {
959 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
960 err = pci_enable_device(pdev);
961 } else {
962 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800963 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700964 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700965 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 return err;
967
Taku Izumi81250292008-07-11 15:17:44 -0700968 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700969 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700970 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000973 err = pci_save_state(pdev);
974 if (err)
975 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700977 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700979 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 SET_NETDEV_DEV(netdev, &pdev->dev);
983
984 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700985 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 adapter->netdev = netdev;
987 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000988 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700989 adapter->bars = bars;
990 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Joe Perches1dc32912008-07-11 15:17:08 -0700992 hw = &adapter->hw;
993 hw->back = adapter;
994
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700995 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700996 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700997 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Taku Izumi81250292008-07-11 15:17:44 -07001000 if (adapter->need_ioport) {
1001 for (i = BAR_1; i <= BAR_5; i++) {
1002 if (pci_resource_len(pdev, i) == 0)
1003 continue;
1004 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1005 hw->io_base = pci_resource_start(pdev, i);
1006 break;
1007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
1009 }
1010
Jesse Brandeburge508be12010-09-07 21:01:12 +00001011 /* make ready for any if (hw->...) below */
1012 err = e1000_init_hw_struct(adapter, hw);
1013 if (err)
1014 goto err_sw_init;
1015
1016 /*
1017 * there is a workaround being applied below that limits
1018 * 64-bit DMA addresses to 64-bit hardware. There are some
1019 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1020 */
1021 pci_using_dac = 0;
1022 if ((hw->bus_type == e1000_bus_type_pcix) &&
1023 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1024 /*
1025 * according to DMA-API-HOWTO, coherent calls will always
1026 * succeed if the set call did
1027 */
1028 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1029 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001030 } else {
Dean Nelson19a0b672010-11-11 05:50:25 +00001031 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1032 if (err) {
1033 pr_err("No usable DMA config, aborting\n");
1034 goto err_dma;
1035 }
1036 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Jesse Brandeburge508be12010-09-07 21:01:12 +00001037 }
1038
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001039 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001042 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001043
Auke Kok0eb5a342006-09-27 12:53:17 -07001044 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 adapter->bd_number = cards_found;
1047
1048 /* setup the private structure */
1049
Joe Perchesc7be73b2008-07-11 15:17:28 -07001050 err = e1000_sw_init(adapter);
1051 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 goto err_sw_init;
1053
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001054 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001055 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001056 hw->ce4100_gbe_mdio_base_virt =
1057 ioremap(pci_resource_start(pdev, BAR_1),
Dirk Brandewie5377a412011-01-06 14:29:54 +00001058 pci_resource_len(pdev, BAR_1));
1059
Florian Fainelli13acde82012-01-04 20:23:35 +00001060 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001061 goto err_mdio_ioremap;
1062 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001063
Joe Perches1dc32912008-07-11 15:17:08 -07001064 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001065 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001066 NETIF_F_HW_CSUM |
1067 NETIF_F_HW_VLAN_RX;
Michał Mirosławe97d3202011-06-08 08:36:42 +00001068 netdev->features = NETIF_F_HW_VLAN_TX |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 NETIF_F_HW_VLAN_FILTER;
1070 }
1071
Joe Perches1dc32912008-07-11 15:17:08 -07001072 if ((hw->mac_type >= e1000_82544) &&
1073 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001074 netdev->hw_features |= NETIF_F_TSO;
1075
Ben Greear11a78dc2012-02-11 15:40:01 +00001076 netdev->priv_flags |= IFF_SUPP_NOFCS;
1077
Michał Mirosławe97d3202011-06-08 08:36:42 +00001078 netdev->features |= netdev->hw_features;
Tushar Dave75006732012-06-12 13:03:29 +00001079 netdev->hw_features |= (NETIF_F_RXCSUM |
1080 NETIF_F_RXALL |
1081 NETIF_F_RXFCS);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001082
Yi Zou7b872a52010-09-22 17:57:58 +00001083 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Tushar Dave75006732012-06-12 13:03:29 +00001088 netdev->vlan_features |= (NETIF_F_TSO |
1089 NETIF_F_HW_CSUM |
1090 NETIF_F_SG);
Patrick McHardy20501a62008-10-11 12:25:59 -07001091
Jiri Pirko01789342011-08-16 06:29:00 +00001092 netdev->priv_flags |= IFF_UNICAST_FLT;
1093
Joe Perches1dc32912008-07-11 15:17:08 -07001094 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001095
Auke Kokcd94dd02006-06-27 09:08:22 -07001096 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001097 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001098 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001099 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001100 }
1101
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001102 /* before reading the EEPROM, reset the controller to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 * put the device in a known good starting state */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001104
Joe Perches1dc32912008-07-11 15:17:08 -07001105 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001108 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001109 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001110 e1000_dump_eeprom(adapter);
1111 /*
1112 * set MAC address to all zeroes to invalidate and temporary
1113 * disable this device for the user. This blocks regular
1114 * traffic while still permitting ethtool ioctls from reaching
1115 * the hardware as well as allowing the user to run the
1116 * interface after manually setting a hw addr using
1117 * `ip set address`
1118 */
Joe Perches1dc32912008-07-11 15:17:08 -07001119 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001120 } else {
1121 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001122 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001123 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
Auke Kok67b3c272007-12-17 13:50:23 -08001125 /* don't block initalization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001126 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1127 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Auke Kok67b3c272007-12-17 13:50:23 -08001129 if (!is_valid_ether_addr(netdev->perm_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001130 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001133 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1134 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1135 e1000_82547_tx_fifo_stall_task);
1136 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001137 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 e1000_check_options(adapter);
1140
1141 /* Initial Wake on LAN setting
1142 * If APM wake is enabled in the EEPROM,
1143 * enable the ACPI Magic Packet filter
1144 */
1145
Joe Perches1dc32912008-07-11 15:17:08 -07001146 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 case e1000_82542_rev2_0:
1148 case e1000_82542_rev2_1:
1149 case e1000_82543:
1150 break;
1151 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001152 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break;
1156 case e1000_82546:
1157 case e1000_82546_rev_3:
Joe Perches1dc32912008-07-11 15:17:08 -07001158 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1159 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1161 break;
1162 }
1163 /* Fall Through */
1164 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001165 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1167 break;
1168 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001169 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001170 adapter->eeprom_wol |= E1000_WUFC_MAG;
1171
1172 /* now that we have the eeprom settings, apply the special cases
1173 * where the eeprom may be wrong or the board simply won't support
1174 * wake on lan on a particular port */
1175 switch (pdev->device) {
1176 case E1000_DEV_ID_82546GB_PCIE:
1177 adapter->eeprom_wol = 0;
1178 break;
1179 case E1000_DEV_ID_82546EB_FIBER:
1180 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001181 /* Wake events only supported on port A for dual fiber
1182 * regardless of eeprom setting */
Joe Perches1dc32912008-07-11 15:17:08 -07001183 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001184 adapter->eeprom_wol = 0;
1185 break;
1186 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1187 /* if quad port adapter, disable WoL on all but port A */
1188 if (global_quad_port_a != 0)
1189 adapter->eeprom_wol = 0;
1190 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001191 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001192 /* Reset for multiple quad port adapters */
1193 if (++global_quad_port_a == 4)
1194 global_quad_port_a = 0;
1195 break;
1196 }
1197
1198 /* initialize the wol settings based on the eeprom settings */
1199 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001200 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Dirk Brandewie5377a412011-01-06 14:29:54 +00001202 /* Auto detect PHY address */
1203 if (hw->mac_type == e1000_ce4100) {
1204 for (i = 0; i < 32; i++) {
1205 hw->phy_addr = i;
1206 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207 if (tmp == 0 || tmp == 0xFF) {
1208 if (i == 31)
1209 goto err_eeprom;
1210 continue;
1211 } else
1212 break;
1213 }
1214 }
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* reset the hardware with the new settings */
1217 e1000_reset(adapter);
1218
Auke Kok416b5d12007-06-01 10:22:39 -07001219 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001220 err = register_netdev(netdev);
1221 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001222 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001223
Jiri Pirko52f55092012-03-20 18:10:01 +00001224 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001225
Emil Tantilov675ad472010-04-27 14:02:58 +00001226 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001227 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001228 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1234 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001235
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001236 /* carrier off reporting is important to ethtool even BEFORE open */
1237 netif_carrier_off(netdev);
1238
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001239 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 cards_found++;
1242 return 0;
1243
1244err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001245err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001246 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001247
Joe Perches1dc32912008-07-11 15:17:08 -07001248 if (hw->flash_address)
1249 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001250 kfree(adapter->tx_ring);
1251 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001252err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001254err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001255 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001256 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257err_ioremap:
1258 free_netdev(netdev);
1259err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001260 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001261err_pci_reg:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001262 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 return err;
1264}
1265
1266/**
1267 * e1000_remove - Device Removal Routine
1268 * @pdev: PCI device information struct
1269 *
1270 * e1000_remove is called by the PCI subsystem to alert the driver
1271 * that it should release a PCI device. The could be caused by a
1272 * Hot-Plug event, or because the driver is going to be removed from
1273 * memory.
1274 **/
1275
Joe Perches64798842008-07-11 15:17:02 -07001276static void __devexit e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277{
1278 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001279 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001280 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001282 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001283 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001285 unregister_netdev(netdev);
1286
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001287 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001289 kfree(adapter->tx_ring);
1290 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001291
Florian Fainelli1c267502012-01-04 20:23:34 +00001292 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001293 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001294 iounmap(hw->hw_addr);
1295 if (hw->flash_address)
1296 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001297 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 free_netdev(netdev);
1300
1301 pci_disable_device(pdev);
1302}
1303
1304/**
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1307 *
1308 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001309 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 **/
1311
Joe Perches64798842008-07-11 15:17:02 -07001312static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Auke Kokeb0f8052006-07-14 16:14:48 -07001314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001316 adapter->num_tx_queues = 1;
1317 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001318
1319 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001320 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001321 return -ENOMEM;
1322 }
1323
Herbert Xu47313052007-05-29 15:07:31 -07001324 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001325 e1000_irq_disable(adapter);
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 spin_lock_init(&adapter->stats_lock);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00001328 mutex_init(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Auke Kok1314bbf2006-09-27 12:54:02 -07001330 set_bit(__E1000_DOWN, &adapter->flags);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return 0;
1333}
1334
1335/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001336 * e1000_alloc_queues - Allocate memory for all rings
1337 * @adapter: board private structure to initialize
1338 *
1339 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001340 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001341 **/
1342
Joe Perches64798842008-07-11 15:17:02 -07001343static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001344{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001345 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001347 if (!adapter->tx_ring)
1348 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001349
Yan Burman1c7e5b12007-03-06 08:58:04 -08001350 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1351 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001352 if (!adapter->rx_ring) {
1353 kfree(adapter->tx_ring);
1354 return -ENOMEM;
1355 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001356
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001357 return E1000_SUCCESS;
1358}
1359
1360/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 * e1000_open - Called when a network interface is made active
1362 * @netdev: network interface device structure
1363 *
1364 * Returns 0 on success, negative value on failure
1365 *
1366 * The open entry point is called when a network interface is made
1367 * active by the system (IFF_UP). At this point all resources needed
1368 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001369 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 * and the stack is notified that the interface is ready.
1371 **/
1372
Joe Perches64798842008-07-11 15:17:02 -07001373static int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001375 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001376 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 int err;
1378
Auke Kok2db10a02006-06-27 09:06:28 -07001379 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001380 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001381 return -EBUSY;
1382
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001383 netif_carrier_off(netdev);
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001386 err = e1000_setup_all_tx_resources(adapter);
1387 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 goto err_setup_tx;
1389
1390 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001391 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001392 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001393 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001394
Auke Kok79f05bf2006-06-27 09:06:32 -07001395 e1000_power_up_phy(adapter);
1396
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001397 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001398 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001399 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1400 e1000_update_mng_vlan(adapter);
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Auke Koke0aac5a2007-03-06 08:57:21 -08001403 /* before we allocate an interrupt, we must be ready to handle it.
1404 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1405 * as soon as we call pci_request_irq, so we have to setup our
1406 * clean_rx handler before we do so. */
1407 e1000_configure(adapter);
1408
1409 err = e1000_request_irq(adapter);
1410 if (err)
1411 goto err_req_irq;
1412
1413 /* From here on the code is the same as e1000_up() */
1414 clear_bit(__E1000_DOWN, &adapter->flags);
1415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001417
Auke Koke0aac5a2007-03-06 08:57:21 -08001418 e1000_irq_enable(adapter);
1419
Ben Hutchings076152d2008-07-18 17:50:57 -07001420 netif_start_queue(netdev);
1421
Auke Koke0aac5a2007-03-06 08:57:21 -08001422 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001423 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return E1000_SUCCESS;
1426
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001427err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001428 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001429 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001431 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432err_setup_tx:
1433 e1000_reset(adapter);
1434
1435 return err;
1436}
1437
1438/**
1439 * e1000_close - Disables a network interface
1440 * @netdev: network interface device structure
1441 *
1442 * Returns 0, this is not allowed to fail
1443 *
1444 * The close entry point is called when an interface is de-activated
1445 * by the OS. The hardware is still under the drivers control, but
1446 * needs to be disabled. A global MAC reset is issued to stop the
1447 * hardware, and all transmit and receive resources are freed.
1448 **/
1449
Joe Perches64798842008-07-11 15:17:02 -07001450static int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001452 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001453 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Auke Kok2db10a02006-06-27 09:06:28 -07001455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001457 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001458 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Bruce Allan46665602006-09-27 12:54:08 -07001463 /* kill manageability vlan ID if supported, but not if a vlan with
1464 * the same ID is registered on the host OS (let 8021q kill it) */
Joe Perches1dc32912008-07-11 15:17:08 -07001465 if ((hw->mng_cookie.status &
Bruce Allan46665602006-09-27 12:54:08 -07001466 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
Jiri Pirko5622e402011-07-21 03:26:31 +00001467 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001468 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1469 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return 0;
1472}
1473
1474/**
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001477 * @start: address of beginning of memory
1478 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 **/
Joe Perches64798842008-07-11 15:17:02 -07001480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
Joe Perches1dc32912008-07-11 15:17:08 -07001483 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001484 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 unsigned long end = begin + len;
1486
Malli Chilakala26483452005-04-28 19:44:46 -07001487 /* First rev 82545 and 82546 need to not allow any memory
1488 * write location to cross 64k boundary due to errata 23 */
Joe Perches1dc32912008-07-11 15:17:08 -07001489 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001490 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001491 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001492 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494
Joe Perchesc3033b02008-03-21 11:06:25 -07001495 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
1498/**
1499 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1500 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001501 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 *
1503 * Return 0 on success, negative on failure
1504 **/
1505
Joe Perches64798842008-07-11 15:17:02 -07001506static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 struct pci_dev *pdev = adapter->pdev;
1510 int size;
1511
1512 size = sizeof(struct e1000_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001513 txdr->buffer_info = vzalloc(size);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001514 if (!txdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001515 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1516 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENOMEM;
1518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 /* round up to nearest 4K */
1521
1522 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001523 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1526 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001527 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 vfree(txdr->buffer_info);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001530 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1531 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return -ENOMEM;
1533 }
1534
Malli Chilakala26483452005-04-28 19:44:46 -07001535 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1537 void *olddesc = txdr->desc;
1538 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001539 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001540 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001541 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001542 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1543 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001544 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001545 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 goto setup_tx_desc_die;
1549 }
1550
1551 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1552 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001553 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1554 txdr->dma);
1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1556 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001557 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001558 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 vfree(txdr->buffer_info);
1560 return -ENOMEM;
1561 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001562 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001563 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1564 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 }
1566 }
1567 memset(txdr->desc, 0, txdr->size);
1568
1569 txdr->next_to_use = 0;
1570 txdr->next_to_clean = 0;
1571
1572 return 0;
1573}
1574
1575/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001576 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1577 * (Descriptors) for all queues
1578 * @adapter: board private structure
1579 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001580 * Return 0 on success, negative on failure
1581 **/
1582
Joe Perches64798842008-07-11 15:17:02 -07001583int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001584{
1585 int i, err = 0;
1586
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001587 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001588 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1589 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001590 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001591 for (i-- ; i >= 0; i--)
1592 e1000_free_tx_resources(adapter,
1593 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001594 break;
1595 }
1596 }
1597
1598 return err;
1599}
1600
1601/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1603 * @adapter: board private structure
1604 *
1605 * Configure the Tx unit of the MAC after a reset.
1606 **/
1607
Joe Perches64798842008-07-11 15:17:02 -07001608static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
Joe Perches406874a2008-04-03 10:06:32 -07001610 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001611 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001612 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001613 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 /* Setup the HW Tx Head and Tail descriptor pointers */
1616
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001617 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001618 case 1:
1619 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001620 tdba = adapter->tx_ring[0].dma;
1621 tdlen = adapter->tx_ring[0].count *
1622 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001623 ew32(TDLEN, tdlen);
1624 ew32(TDBAH, (tdba >> 32));
1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1626 ew32(TDT, 0);
1627 ew32(TDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001628 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1629 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001630 break;
1631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001634 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001635 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001636 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1637 else
1638 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1639
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001640 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 case e1000_82542_rev2_0:
1642 case e1000_82542_rev2_1:
1643 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001644 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1645 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 break;
1647 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001648 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1649 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1650 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001652 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1653 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001654 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 /* Set the Tx Interrupt Delay register */
1657
Joe Perches1dc32912008-07-11 15:17:08 -07001658 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001659 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001660 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 /* Program the Transmit Control Register */
1663
Joe Perches1dc32912008-07-11 15:17:08 -07001664 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001666 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1668
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001669 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001672 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1673
1674 /* only set IDE if we are delaying interrupts using the timers */
1675 if (adapter->tx_int_delay)
1676 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001678 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1680 else
1681 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1682
1683 /* Cache if we're 82544 running in PCI-X because we'll
1684 * need this to apply a workaround later in the send path. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001685 if (hw->mac_type == e1000_82544 &&
1686 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001687 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001688
Joe Perches1dc32912008-07-11 15:17:08 -07001689 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693/**
1694 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1695 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001696 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 *
1698 * Returns 0 on success, negative on failure
1699 **/
1700
Joe Perches64798842008-07-11 15:17:02 -07001701static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1702 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001705 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 size = sizeof(struct e1000_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001708 rxdr->buffer_info = vzalloc(size);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001709 if (!rxdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001710 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1711 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 return -ENOMEM;
1713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001715 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 /* Round up to nearest 4K */
1718
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001719 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001720 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1723 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001725 if (!rxdr->desc) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001726 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1727 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 vfree(rxdr->buffer_info);
1730 return -ENOMEM;
1731 }
1732
Malli Chilakala26483452005-04-28 19:44:46 -07001733 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 void *olddesc = rxdr->desc;
1736 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001738 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001739 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001742 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001743 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001746 e_err(probe, "Unable to allocate memory for the Rx "
1747 "descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 goto setup_rx_desc_die;
1749 }
1750
1751 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1752 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001753 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1754 rxdr->dma);
1755 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1756 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001757 e_err(probe, "Unable to allocate aligned memory for "
1758 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001759 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001761 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001762 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1763 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765 }
1766 memset(rxdr->desc, 0, rxdr->size);
1767
1768 rxdr->next_to_clean = 0;
1769 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001770 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 return 0;
1773}
1774
1775/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001776 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1777 * (Descriptors) for all queues
1778 * @adapter: board private structure
1779 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001780 * Return 0 on success, negative on failure
1781 **/
1782
Joe Perches64798842008-07-11 15:17:02 -07001783int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001784{
1785 int i, err = 0;
1786
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001787 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001788 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1789 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001790 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001791 for (i-- ; i >= 0; i--)
1792 e1000_free_rx_resources(adapter,
1793 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001794 break;
1795 }
1796 }
1797
1798 return err;
1799}
1800
1801/**
Malli Chilakala26483452005-04-28 19:44:46 -07001802 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 * @adapter: Board private structure
1804 **/
Joe Perches64798842008-07-11 15:17:02 -07001805static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Joe Perches1dc32912008-07-11 15:17:08 -07001807 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001808 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Joe Perches1dc32912008-07-11 15:17:08 -07001810 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1813
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001814 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1815 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001816 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Joe Perches1dc32912008-07-11 15:17:08 -07001818 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rctl |= E1000_RCTL_SBP;
1820 else
1821 rctl &= ~E1000_RCTL_SBP;
1822
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001823 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1824 rctl &= ~E1000_RCTL_LPE;
1825 else
1826 rctl |= E1000_RCTL_LPE;
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001829 rctl &= ~E1000_RCTL_SZ_4096;
1830 rctl |= E1000_RCTL_BSEX;
1831 switch (adapter->rx_buffer_len) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08001832 case E1000_RXBUFFER_2048:
1833 default:
1834 rctl |= E1000_RCTL_SZ_2048;
1835 rctl &= ~E1000_RCTL_BSEX;
1836 break;
1837 case E1000_RXBUFFER_4096:
1838 rctl |= E1000_RCTL_SZ_4096;
1839 break;
1840 case E1000_RXBUFFER_8192:
1841 rctl |= E1000_RCTL_SZ_8192;
1842 break;
1843 case E1000_RXBUFFER_16384:
1844 rctl |= E1000_RCTL_SZ_16384;
1845 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001846 }
1847
Ben Greeare825b732012-04-04 06:01:29 +00001848 /* This is useful for sniffing bad packets. */
1849 if (adapter->netdev->features & NETIF_F_RXALL) {
1850 /* UPE and MPE will be handled by normal PROMISC logic
1851 * in e1000e_set_rx_mode */
1852 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1853 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1854 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1855
1856 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1857 E1000_RCTL_DPF | /* Allow filtered pause */
1858 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1859 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1860 * and that breaks VLANs.
1861 */
1862 }
1863
Joe Perches1dc32912008-07-11 15:17:08 -07001864 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865}
1866
1867/**
1868 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1869 * @adapter: board private structure
1870 *
1871 * Configure the Rx unit of the MAC after a reset.
1872 **/
1873
Joe Perches64798842008-07-11 15:17:02 -07001874static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
Joe Perches406874a2008-04-03 10:06:32 -07001876 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001877 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001878 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001879
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001880 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1881 rdlen = adapter->rx_ring[0].count *
1882 sizeof(struct e1000_rx_desc);
1883 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1884 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1885 } else {
1886 rdlen = adapter->rx_ring[0].count *
1887 sizeof(struct e1000_rx_desc);
1888 adapter->clean_rx = e1000_clean_rx_irq;
1889 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1890 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001893 rctl = er32(RCTL);
1894 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001897 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001899 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001900 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001901 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001902 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
1904
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001905 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1906 * the Base and Length of the Rx Descriptor Ring */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001907 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001908 case 1:
1909 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001910 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001911 ew32(RDLEN, rdlen);
1912 ew32(RDBAH, (rdba >> 32));
1913 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1914 ew32(RDT, 0);
1915 ew32(RDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001916 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1917 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001918 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001919 }
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001922 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001923 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001924 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001925 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001926 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001927 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001928 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001929 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 }
1931
1932 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001933 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
1936/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001937 * e1000_free_tx_resources - Free Tx Resources per Queue
1938 * @adapter: board private structure
1939 * @tx_ring: Tx descriptor ring for a specific queue
1940 *
1941 * Free all transmit software resources
1942 **/
1943
Joe Perches64798842008-07-11 15:17:02 -07001944static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001946{
1947 struct pci_dev *pdev = adapter->pdev;
1948
1949 e1000_clean_tx_ring(adapter, tx_ring);
1950
1951 vfree(tx_ring->buffer_info);
1952 tx_ring->buffer_info = NULL;
1953
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001954 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1955 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001956
1957 tx_ring->desc = NULL;
1958}
1959
1960/**
1961 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 * @adapter: board private structure
1963 *
1964 * Free all transmit software resources
1965 **/
1966
Joe Perches64798842008-07-11 15:17:02 -07001967void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001969 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001971 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001972 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
1974
Joe Perches64798842008-07-11 15:17:02 -07001975static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976 struct e1000_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Alexander Duyck602c0552009-12-02 16:46:00 +00001978 if (buffer_info->dma) {
1979 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001980 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001982 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001983 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001984 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001985 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001986 buffer_info->dma = 0;
1987 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001988 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001990 buffer_info->skb = NULL;
1991 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001992 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001993 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
1995
1996/**
1997 * e1000_clean_tx_ring - Free Tx Buffers
1998 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001999 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 **/
2001
Joe Perches64798842008-07-11 15:17:02 -07002002static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2003 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004{
Joe Perches1dc32912008-07-11 15:17:08 -07002005 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 struct e1000_buffer *buffer_info;
2007 unsigned long size;
2008 unsigned int i;
2009
2010 /* Free all the Tx ring sk_buffs */
2011
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002012 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 buffer_info = &tx_ring->buffer_info[i];
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2015 }
2016
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00002017 netdev_reset_queue(adapter->netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 size = sizeof(struct e1000_buffer) * tx_ring->count;
2019 memset(tx_ring->buffer_info, 0, size);
2020
2021 /* Zero out the descriptor ring */
2022
2023 memset(tx_ring->desc, 0, tx_ring->size);
2024
2025 tx_ring->next_to_use = 0;
2026 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002027 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Joe Perches1dc32912008-07-11 15:17:08 -07002029 writel(0, hw->hw_addr + tx_ring->tdh);
2030 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002031}
2032
2033/**
2034 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2035 * @adapter: board private structure
2036 **/
2037
Joe Perches64798842008-07-11 15:17:02 -07002038static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002039{
2040 int i;
2041
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002042 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002043 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
2046/**
2047 * e1000_free_rx_resources - Free Rx Resources
2048 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002049 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 *
2051 * Free all receive software resources
2052 **/
2053
Joe Perches64798842008-07-11 15:17:02 -07002054static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2055 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 struct pci_dev *pdev = adapter->pdev;
2058
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002059 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 vfree(rx_ring->buffer_info);
2062 rx_ring->buffer_info = NULL;
2063
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002064 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2065 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 rx_ring->desc = NULL;
2068}
2069
2070/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002071 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002073 *
2074 * Free all receive software resources
2075 **/
2076
Joe Perches64798842008-07-11 15:17:02 -07002077void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002078{
2079 int i;
2080
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002081 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002082 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2083}
2084
2085/**
2086 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2087 * @adapter: board private structure
2088 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 **/
2090
Joe Perches64798842008-07-11 15:17:02 -07002091static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2092 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Joe Perches1dc32912008-07-11 15:17:08 -07002094 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 struct e1000_buffer *buffer_info;
2096 struct pci_dev *pdev = adapter->pdev;
2097 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002098 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 /* Free all the Rx ring sk_buffs */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002101 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 buffer_info = &rx_ring->buffer_info[i];
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002103 if (buffer_info->dma &&
2104 adapter->clean_rx == e1000_clean_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002105 dma_unmap_single(&pdev->dev, buffer_info->dma,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002106 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002107 DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002108 } else if (buffer_info->dma &&
2109 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002110 dma_unmap_page(&pdev->dev, buffer_info->dma,
2111 buffer_info->length,
2112 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002115 buffer_info->dma = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002116 if (buffer_info->page) {
2117 put_page(buffer_info->page);
2118 buffer_info->page = NULL;
2119 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002120 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 dev_kfree_skb(buffer_info->skb);
2122 buffer_info->skb = NULL;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08002123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 }
2125
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002126 /* there also may be some cached data from a chained receive */
2127 if (rx_ring->rx_skb_top) {
2128 dev_kfree_skb(rx_ring->rx_skb_top);
2129 rx_ring->rx_skb_top = NULL;
2130 }
2131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 size = sizeof(struct e1000_buffer) * rx_ring->count;
2133 memset(rx_ring->buffer_info, 0, size);
2134
2135 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 memset(rx_ring->desc, 0, rx_ring->size);
2137
2138 rx_ring->next_to_clean = 0;
2139 rx_ring->next_to_use = 0;
2140
Joe Perches1dc32912008-07-11 15:17:08 -07002141 writel(0, hw->hw_addr + rx_ring->rdh);
2142 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002143}
2144
2145/**
2146 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2147 * @adapter: board private structure
2148 **/
2149
Joe Perches64798842008-07-11 15:17:02 -07002150static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002151{
2152 int i;
2153
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002154 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002155 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156}
2157
2158/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2159 * and memory write and invalidate disabled for certain operations
2160 */
Joe Perches64798842008-07-11 15:17:02 -07002161static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162{
Joe Perches1dc32912008-07-11 15:17:08 -07002163 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002165 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Joe Perches1dc32912008-07-11 15:17:08 -07002167 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Joe Perches1dc32912008-07-11 15:17:08 -07002169 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002171 ew32(RCTL, rctl);
2172 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 mdelay(5);
2174
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002175 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002176 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177}
2178
Joe Perches64798842008-07-11 15:17:02 -07002179static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180{
Joe Perches1dc32912008-07-11 15:17:08 -07002181 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002183 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Joe Perches1dc32912008-07-11 15:17:08 -07002185 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002187 ew32(RCTL, rctl);
2188 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 mdelay(5);
2190
Joe Perches1dc32912008-07-11 15:17:08 -07002191 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2192 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002194 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002195 /* No need to loop, because 82542 supports only 1 queue */
2196 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002197 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002198 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 }
2200}
2201
2202/**
2203 * e1000_set_mac - Change the Ethernet Address of the NIC
2204 * @netdev: network interface device structure
2205 * @p: pointer to an address structure
2206 *
2207 * Returns 0 on success, negative on failure
2208 **/
2209
Joe Perches64798842008-07-11 15:17:02 -07002210static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002212 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002213 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 struct sockaddr *addr = p;
2215
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002216 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 return -EADDRNOTAVAIL;
2218
2219 /* 82542 2.0 needs to be in reset to write receive address registers */
2220
Joe Perches1dc32912008-07-11 15:17:08 -07002221 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 e1000_enter_82542_rst(adapter);
2223
2224 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002225 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
Joe Perches1dc32912008-07-11 15:17:08 -07002227 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
Joe Perches1dc32912008-07-11 15:17:08 -07002229 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 e1000_leave_82542_rst(adapter);
2231
2232 return 0;
2233}
2234
2235/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002236 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 * @netdev: network interface device structure
2238 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002239 * The set_rx_mode entry point is called whenever the unicast or multicast
2240 * address lists or the network interface flags are updated. This routine is
2241 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 * promiscuous mode, and all-multi behavior.
2243 **/
2244
Joe Perches64798842008-07-11 15:17:02 -07002245static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002247 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002249 struct netdev_hw_addr *ha;
2250 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002251 u32 rctl;
2252 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002253 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002254 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002255 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2256
2257 if (!mcarray) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002258 e_err(probe, "memory allocation failed\n");
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002259 return;
2260 }
Auke Kokcd94dd02006-06-27 09:08:22 -07002261
Malli Chilakala26483452005-04-28 19:44:46 -07002262 /* Check for Promiscuous and All Multicast modes */
2263
Joe Perches1dc32912008-07-11 15:17:08 -07002264 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002266 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002268 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002270 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002271 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002272 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002273 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002274 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002275 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002276 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002277 }
2278
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002279 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002280 rctl |= E1000_RCTL_UPE;
2281 } else if (!(netdev->flags & IFF_PROMISC)) {
2282 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002283 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 }
2285
Joe Perches1dc32912008-07-11 15:17:08 -07002286 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
2288 /* 82542 2.0 needs to be in reset to write receive address registers */
2289
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002290 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 e1000_enter_82542_rst(adapter);
2292
Patrick McHardydb0ce502007-11-13 20:54:59 -08002293 /* load the first 14 addresses into the exact filters 1-14. Unicast
2294 * addresses take precedence to avoid disabling unicast filtering
2295 * when possible.
2296 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002297 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 * if there are not 14 addresses, go ahead and clear the filters
2299 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002300 i = 1;
2301 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002302 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002303 if (i == rar_entries)
2304 break;
2305 e1000_rar_set(hw, ha->addr, i++);
2306 }
2307
Jiri Pirko22bedad32010-04-01 21:22:57 +00002308 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002309 if (i == rar_entries) {
2310 /* load any remaining addresses into the hash table */
2311 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002312 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002313 hash_reg = (hash_value >> 5) & 0x7F;
2314 hash_bit = hash_value & 0x1F;
2315 mta = (1 << hash_bit);
2316 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002317 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002318 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 }
2320 }
2321
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002322 for (; i < rar_entries; i++) {
2323 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2324 E1000_WRITE_FLUSH();
2325 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2326 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 }
2328
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002329 /* write the hash table completely, write from bottom to avoid
2330 * both stupid write combining chipsets, and flushing each write */
2331 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2332 /*
2333 * If we are on an 82544 has an errata where writing odd
2334 * offsets overwrites the previous even offset, but writing
2335 * backwards over the range solves the issue by always
2336 * writing the odd offset first
2337 */
2338 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2339 }
2340 E1000_WRITE_FLUSH();
2341
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002342 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002344
2345 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346}
2347
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002348/**
2349 * e1000_update_phy_info_task - get phy info
2350 * @work: work struct contained inside adapter struct
2351 *
2352 * Need to wait a few seconds after link up to get diagnostic information from
2353 * the phy
2354 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002355static void e1000_update_phy_info_task(struct work_struct *work)
2356{
2357 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002358 struct e1000_adapter,
2359 phy_info_task.work);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002360 if (test_bit(__E1000_DOWN, &adapter->flags))
2361 return;
2362 mutex_lock(&adapter->mutex);
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002363 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002364 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365}
2366
2367/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002368 * e1000_82547_tx_fifo_stall_task - task to complete work
2369 * @work: work struct contained inside adapter struct
2370 **/
2371static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2372{
2373 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002374 struct e1000_adapter,
2375 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002376 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002378 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002380 if (test_bit(__E1000_DOWN, &adapter->flags))
2381 return;
2382 mutex_lock(&adapter->mutex);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002383 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002384 if ((er32(TDT) == er32(TDH)) &&
2385 (er32(TDFT) == er32(TDFH)) &&
2386 (er32(TDFTS) == er32(TDFHS))) {
2387 tctl = er32(TCTL);
2388 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2389 ew32(TDFT, adapter->tx_head_addr);
2390 ew32(TDFH, adapter->tx_head_addr);
2391 ew32(TDFTS, adapter->tx_head_addr);
2392 ew32(TDFHS, adapter->tx_head_addr);
2393 ew32(TCTL, tctl);
2394 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 adapter->tx_fifo_head = 0;
2397 atomic_set(&adapter->tx_fifo_stall, 0);
2398 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002399 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002400 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 }
2402 }
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002403 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404}
2405
Nick Nunleyb5481922010-02-03 14:49:28 +00002406bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002407{
2408 struct e1000_hw *hw = &adapter->hw;
2409 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002410
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002411 /* get_link_status is set on LSC (link status) interrupt or rx
2412 * sequence error interrupt (except on intel ce4100).
2413 * get_link_status will stay false until the
2414 * e1000_check_for_link establishes link for copper adapters
2415 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002416 */
2417 switch (hw->media_type) {
2418 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002419 if (hw->mac_type == e1000_ce4100)
2420 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002421 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002422 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002423 link_active = !hw->get_link_status;
2424 } else {
2425 link_active = true;
2426 }
2427 break;
2428 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002429 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002430 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2431 break;
2432 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002433 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002434 link_active = hw->serdes_has_link;
2435 break;
2436 default:
2437 break;
2438 }
2439
2440 return link_active;
2441}
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002444 * e1000_watchdog - work function
2445 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002447static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002449 struct e1000_adapter *adapter = container_of(work,
2450 struct e1000_adapter,
2451 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002452 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002454 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002455 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002457 if (test_bit(__E1000_DOWN, &adapter->flags))
2458 return;
2459
2460 mutex_lock(&adapter->mutex);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002461 link = e1000_has_link(adapter);
2462 if ((netif_carrier_ok(netdev)) && link)
2463 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002465 if (link) {
2466 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002467 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002468 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002469 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002470 e1000_get_speed_and_duplex(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 &adapter->link_speed,
2472 &adapter->link_duplex);
2473
Joe Perches1dc32912008-07-11 15:17:08 -07002474 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002475 pr_info("%s NIC Link is Up %d Mbps %s, "
2476 "Flow Control: %s\n",
2477 netdev->name,
2478 adapter->link_speed,
2479 adapter->link_duplex == FULL_DUPLEX ?
2480 "Full Duplex" : "Half Duplex",
2481 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2482 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2483 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2484 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002486 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002487 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002488 switch (adapter->link_speed) {
2489 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002490 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002491 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002492 break;
2493 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002494 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002495 /* maybe add some timeout factor ? */
2496 break;
2497 }
2498
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002499 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002500 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002501 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002502 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002505 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002506 schedule_delayed_work(&adapter->phy_info_task,
2507 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 adapter->smartspeed = 0;
2509 }
2510 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002511 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 adapter->link_speed = 0;
2513 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002514 pr_info("%s NIC Link is Down\n",
2515 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002517
2518 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002519 schedule_delayed_work(&adapter->phy_info_task,
2520 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 }
2522
2523 e1000_smartspeed(adapter);
2524 }
2525
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002526link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 e1000_update_stats(adapter);
2528
Joe Perches1dc32912008-07-11 15:17:08 -07002529 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002531 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 adapter->colc_old = adapter->stats.colc;
2533
2534 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2535 adapter->gorcl_old = adapter->stats.gorcl;
2536 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2537 adapter->gotcl_old = adapter->stats.gotcl;
2538
Joe Perches1dc32912008-07-11 15:17:08 -07002539 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002541 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002542 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 /* We've lost link, so the controller stops DMA,
2544 * but we've got queued Tx work that's never going
2545 * to get done, so reset controller to flush Tx.
2546 * (Do the reset outside of interrupt context). */
Jeff Kirsher87041632006-03-02 18:21:24 -08002547 adapter->tx_timeout_count++;
2548 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002549 /* exit immediately since reset is imminent */
2550 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 }
2552 }
2553
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002554 /* Simple mode for Interrupt Throttle Rate (ITR) */
2555 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2556 /*
2557 * Symmetric Tx/Rx gets a reduced ITR=2000;
2558 * Total asymmetrical Tx or Rx gets ITR=8000;
2559 * everyone else is between 2000-8000.
2560 */
2561 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2562 u32 dif = (adapter->gotcl > adapter->gorcl ?
2563 adapter->gotcl - adapter->gorcl :
2564 adapter->gorcl - adapter->gotcl) / 10000;
2565 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2566
2567 ew32(ITR, 1000000000 / (itr * 256));
2568 }
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002571 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
Malli Chilakala26483452005-04-28 19:44:46 -07002573 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002574 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002576 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002577 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002578 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002579
2580unlock:
2581 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582}
2583
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002584enum latency_range {
2585 lowest_latency = 0,
2586 low_latency = 1,
2587 bulk_latency = 2,
2588 latency_invalid = 255
2589};
2590
2591/**
2592 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002593 * @adapter: pointer to adapter
2594 * @itr_setting: current adapter->itr
2595 * @packets: the number of packets during this measurement interval
2596 * @bytes: the number of bytes during this measurement interval
2597 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002598 * Stores a new ITR value based on packets and byte
2599 * counts during the last interrupt. The advantage of per interrupt
2600 * computation is faster updates and more accurate ITR for the current
2601 * traffic pattern. Constants in this function were computed
2602 * based on theoretical maximum wire speed and thresholds were set based
2603 * on testing data as well as attempting to minimize response time
2604 * while increasing bulk throughput.
2605 * this functionality is controlled by the InterruptThrottleRate module
2606 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002607 **/
2608static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002609 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002610{
2611 unsigned int retval = itr_setting;
2612 struct e1000_hw *hw = &adapter->hw;
2613
2614 if (unlikely(hw->mac_type < e1000_82540))
2615 goto update_itr_done;
2616
2617 if (packets == 0)
2618 goto update_itr_done;
2619
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002620 switch (itr_setting) {
2621 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002622 /* jumbo frames get bulk treatment*/
2623 if (bytes/packets > 8000)
2624 retval = bulk_latency;
2625 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002626 retval = low_latency;
2627 break;
2628 case low_latency: /* 50 usec aka 20000 ints/s */
2629 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002630 /* jumbo frames need bulk latency setting */
2631 if (bytes/packets > 8000)
2632 retval = bulk_latency;
2633 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002634 retval = bulk_latency;
2635 else if ((packets > 35))
2636 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002637 } else if (bytes/packets > 2000)
2638 retval = bulk_latency;
2639 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002640 retval = lowest_latency;
2641 break;
2642 case bulk_latency: /* 250 usec aka 4000 ints/s */
2643 if (bytes > 25000) {
2644 if (packets > 35)
2645 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002646 } else if (bytes < 6000) {
2647 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002648 }
2649 break;
2650 }
2651
2652update_itr_done:
2653 return retval;
2654}
2655
2656static void e1000_set_itr(struct e1000_adapter *adapter)
2657{
2658 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002659 u16 current_itr;
2660 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002661
2662 if (unlikely(hw->mac_type < e1000_82540))
2663 return;
2664
2665 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2666 if (unlikely(adapter->link_speed != SPEED_1000)) {
2667 current_itr = 0;
2668 new_itr = 4000;
2669 goto set_itr_now;
2670 }
2671
2672 adapter->tx_itr = e1000_update_itr(adapter,
2673 adapter->tx_itr,
2674 adapter->total_tx_packets,
2675 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002676 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2677 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2678 adapter->tx_itr = low_latency;
2679
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002680 adapter->rx_itr = e1000_update_itr(adapter,
2681 adapter->rx_itr,
2682 adapter->total_rx_packets,
2683 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002684 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2685 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2686 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002687
2688 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2689
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002690 switch (current_itr) {
2691 /* counts and packets in update_itr are dependent on these numbers */
2692 case lowest_latency:
2693 new_itr = 70000;
2694 break;
2695 case low_latency:
2696 new_itr = 20000; /* aka hwitr = ~200 */
2697 break;
2698 case bulk_latency:
2699 new_itr = 4000;
2700 break;
2701 default:
2702 break;
2703 }
2704
2705set_itr_now:
2706 if (new_itr != adapter->itr) {
2707 /* this attempts to bias the interrupt rate towards Bulk
2708 * by adding intermediate steps when interrupt rate is
2709 * increasing */
2710 new_itr = new_itr > adapter->itr ?
2711 min(adapter->itr + (new_itr >> 2), new_itr) :
2712 new_itr;
2713 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002714 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002715 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002716}
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718#define E1000_TX_FLAGS_CSUM 0x00000001
2719#define E1000_TX_FLAGS_VLAN 0x00000002
2720#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002721#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002722#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2724#define E1000_TX_FLAGS_VLAN_SHIFT 16
2725
Joe Perches64798842008-07-11 15:17:02 -07002726static int e1000_tso(struct e1000_adapter *adapter,
2727 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002730 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002732 u32 cmd_length = 0;
2733 u16 ipcse = 0, tucse, mss;
2734 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 int err;
2736
Herbert Xu89114af2006-07-08 13:34:32 -07002737 if (skb_is_gso(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (skb_header_cloned(skb)) {
2739 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2740 if (err)
2741 return err;
2742 }
2743
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002744 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002745 mss = skb_shinfo(skb)->gso_size;
Alexey Dobriyan60828232006-05-23 14:52:21 -07002746 if (skb->protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002747 struct iphdr *iph = ip_hdr(skb);
2748 iph->tot_len = 0;
2749 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002750 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2751 iph->daddr, 0,
2752 IPPROTO_TCP,
2753 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002754 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002755 ipcse = skb_transport_offset(skb) - 1;
Auke Koke15fdd02006-08-16 11:28:45 -07002756 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002757 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002758 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002759 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2760 &ipv6_hdr(skb)->daddr,
2761 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002762 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002763 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002764 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002765 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002766 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002767 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 tucse = 0;
2769
2770 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002771 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002773 i = tx_ring->next_to_use;
2774 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002775 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2778 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2779 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2780 context_desc->upper_setup.tcp_fields.tucss = tucss;
2781 context_desc->upper_setup.tcp_fields.tucso = tucso;
2782 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2783 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2784 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2785 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2786
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002787 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002788 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002789
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002790 if (++i == tx_ring->count) i = 0;
2791 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Joe Perchesc3033b02008-03-21 11:06:25 -07002793 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002795 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796}
2797
Joe Perches64798842008-07-11 15:17:02 -07002798static bool e1000_tx_csum(struct e1000_adapter *adapter,
2799 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800{
2801 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002802 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002804 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002805 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Dave Graham3ed30672008-10-09 14:29:26 -07002807 if (skb->ip_summed != CHECKSUM_PARTIAL)
2808 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
Dave Graham3ed30672008-10-09 14:29:26 -07002810 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002811 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002812 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2813 cmd_len |= E1000_TXD_CMD_TCP;
2814 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002815 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002816 /* XXX not handling all IPV6 headers */
2817 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2818 cmd_len |= E1000_TXD_CMD_TCP;
2819 break;
2820 default:
2821 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002822 e_warn(drv, "checksum_partial proto=%x!\n",
2823 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002824 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 }
2826
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002827 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002828
2829 i = tx_ring->next_to_use;
2830 buffer_info = &tx_ring->buffer_info[i];
2831 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2832
2833 context_desc->lower_setup.ip_config = 0;
2834 context_desc->upper_setup.tcp_fields.tucss = css;
2835 context_desc->upper_setup.tcp_fields.tucso =
2836 css + skb->csum_offset;
2837 context_desc->upper_setup.tcp_fields.tucse = 0;
2838 context_desc->tcp_seg_setup.data = 0;
2839 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2840
2841 buffer_info->time_stamp = jiffies;
2842 buffer_info->next_to_watch = i;
2843
2844 if (unlikely(++i == tx_ring->count)) i = 0;
2845 tx_ring->next_to_use = i;
2846
2847 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848}
2849
2850#define E1000_MAX_TXD_PWR 12
2851#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2852
Joe Perches64798842008-07-11 15:17:02 -07002853static int e1000_tx_map(struct e1000_adapter *adapter,
2854 struct e1000_tx_ring *tx_ring,
2855 struct sk_buff *skb, unsigned int first,
2856 unsigned int max_per_txd, unsigned int nr_frags,
2857 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858{
Joe Perches1dc32912008-07-11 15:17:08 -07002859 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002860 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002861 struct e1000_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002862 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002863 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002864 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866 i = tx_ring->next_to_use;
2867
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002868 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002869 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002871 /* Workaround for Controller erratum --
2872 * descriptor for non-tso packet in a linear SKB that follows a
2873 * tso gets written back prematurely before the data is fully
Jeff Kirsher0f15a8f2006-03-02 18:46:29 -08002874 * DMA'd to the controller */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002875 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002876 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002877 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002878 size -= 4;
2879 }
2880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 /* Workaround for premature desc write-backs
2882 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002883 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002885 /* work-around for errata 10 and it applies
2886 * to all controllers in PCI-X mode
2887 * The fix is to make sure that the first descriptor of a
2888 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2889 */
Joe Perches1dc32912008-07-11 15:17:08 -07002890 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07002891 (size > 2015) && count == 0))
2892 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002893
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 /* Workaround for potential 82544 hang in PCI-X. Avoid
2895 * terminating buffers within evenly-aligned dwords. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002896 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2898 size > 4))
2899 size -= 4;
2900
2901 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002902 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002904 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002905 buffer_info->dma = dma_map_single(&pdev->dev,
2906 skb->data + offset,
2907 size, DMA_TO_DEVICE);
2908 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002909 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002910 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
2912 len -= size;
2913 offset += size;
2914 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002915 if (len) {
2916 i++;
2917 if (unlikely(i == tx_ring->count))
2918 i = 0;
2919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 }
2921
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002922 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002923 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
2925 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002926 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002927 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002929 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002930 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002931 i++;
2932 if (unlikely(i == tx_ring->count))
2933 i = 0;
2934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 buffer_info = &tx_ring->buffer_info[i];
2936 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 /* Workaround for premature desc write-backs
2938 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002939 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 /* Workaround for potential 82544 hang in PCI-X.
2942 * Avoid terminating buffers within evenly-aligned
2943 * dwords. */
Ian Campbell877749b2011-08-29 23:18:26 +00002944 bufend = (unsigned long)
2945 page_to_phys(skb_frag_page(frag));
2946 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002947 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002948 !(bufend & 4) &&
2949 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 size -= 4;
2951
2952 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002954 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002955 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2956 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002957 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002958 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002959 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960
2961 len -= size;
2962 offset += size;
2963 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 }
2965 }
2966
Dean Nelson31c15a22011-08-25 14:39:24 +00002967 segs = skb_shinfo(skb)->gso_segs ?: 1;
2968 /* multiply data chunks by size of headers */
2969 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002972 tx_ring->buffer_info[i].segs = segs;
2973 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 tx_ring->buffer_info[first].next_to_watch = i;
2975
2976 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002977
2978dma_error:
2979 dev_err(&pdev->dev, "TX DMA map failed\n");
2980 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002981 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002982 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002983
2984 while (count--) {
2985 if (i==0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002986 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002987 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002988 buffer_info = &tx_ring->buffer_info[i];
2989 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2990 }
2991
2992 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993}
2994
Joe Perches64798842008-07-11 15:17:02 -07002995static void e1000_tx_queue(struct e1000_adapter *adapter,
2996 struct e1000_tx_ring *tx_ring, int tx_flags,
2997 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998{
Joe Perches1dc32912008-07-11 15:17:08 -07002999 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 struct e1000_tx_desc *tx_desc = NULL;
3001 struct e1000_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07003002 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 unsigned int i;
3004
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003005 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3007 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003008 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3009
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003010 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003011 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 }
3013
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003014 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3016 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3017 }
3018
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003019 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 txd_lower |= E1000_TXD_CMD_VLE;
3021 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3022 }
3023
Ben Greear11a78dc2012-02-11 15:40:01 +00003024 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3025 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3026
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 i = tx_ring->next_to_use;
3028
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003029 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 buffer_info = &tx_ring->buffer_info[i];
3031 tx_desc = E1000_TX_DESC(*tx_ring, i);
3032 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3033 tx_desc->lower.data =
3034 cpu_to_le32(txd_lower | buffer_info->length);
3035 tx_desc->upper.data = cpu_to_le32(txd_upper);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003036 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 }
3038
3039 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3040
Ben Greear11a78dc2012-02-11 15:40:01 +00003041 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3042 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3043 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 /* Force memory writes to complete before letting h/w
3046 * know there are new descriptors to fetch. (Only
3047 * applicable for weak-ordered memory model archs,
3048 * such as IA-64). */
3049 wmb();
3050
3051 tx_ring->next_to_use = i;
Joe Perches1dc32912008-07-11 15:17:08 -07003052 writel(i, hw->hw_addr + tx_ring->tdt);
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003053 /* we need this if more than one processor can write to our tail
3054 * at a time, it syncronizes IO on IA64/Altix systems */
3055 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056}
3057
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003058/* 82547 workaround to avoid controller hang in half-duplex environment.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 * The workaround is to avoid queuing a large packet that would span
3060 * the internal Tx FIFO ring boundary by notifying the stack to resend
3061 * the packet at a later time. This gives the Tx FIFO an opportunity to
3062 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3063 * to the beginning of the Tx FIFO.
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003064 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
3066#define E1000_FIFO_HDR 0x10
3067#define E1000_82547_PAD_LEN 0x3E0
3068
Joe Perches64798842008-07-11 15:17:02 -07003069static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3070 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071{
Joe Perches406874a2008-04-03 10:06:32 -07003072 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3073 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003075 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003077 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 goto no_fifo_stall_required;
3079
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003080 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 return 1;
3082
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003083 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 atomic_set(&adapter->tx_fifo_stall, 1);
3085 return 1;
3086 }
3087
3088no_fifo_stall_required:
3089 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003090 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3092 return 0;
3093}
3094
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003095static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3096{
3097 struct e1000_adapter *adapter = netdev_priv(netdev);
3098 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3099
3100 netif_stop_queue(netdev);
3101 /* Herbert's original patch had:
3102 * smp_mb__after_netif_stop_queue();
3103 * but since that doesn't exist yet, just open code it. */
3104 smp_mb();
3105
3106 /* We need to check again in a case another CPU has just
3107 * made room available. */
3108 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3109 return -EBUSY;
3110
3111 /* A reprieve! */
3112 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003113 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003114 return 0;
3115}
3116
3117static int e1000_maybe_stop_tx(struct net_device *netdev,
3118 struct e1000_tx_ring *tx_ring, int size)
3119{
3120 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3121 return 0;
3122 return __e1000_maybe_stop_tx(netdev, size);
3123}
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003126static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3127 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003129 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003130 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003131 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3133 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3134 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003135 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003136 unsigned int nr_frags;
3137 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003139 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 unsigned int f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003142 /* This goes back to the question of how to logically map a tx queue
3143 * to a flow. Right now, performance is impacted slightly negatively
3144 * if using multiple tx queues. If the stack breaks away from a
3145 * single qdisc implementation, we can look at this again. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003146 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003147
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003148 if (unlikely(skb->len <= 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 dev_kfree_skb_any(skb);
3150 return NETDEV_TX_OK;
3151 }
3152
Tushar Dave59d86c72012-09-15 10:16:57 +00003153 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3154 * packets may get corrupted during padding by HW.
3155 * To WA this issue, pad all small packets manually.
3156 */
3157 if (skb->len < ETH_ZLEN) {
3158 if (skb_pad(skb, ETH_ZLEN - skb->len))
3159 return NETDEV_TX_OK;
3160 skb->len = ETH_ZLEN;
3161 skb_set_tail_pointer(skb, ETH_ZLEN);
3162 }
3163
Herbert Xu79671682006-06-22 02:40:14 -07003164 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003165 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 * make sure there is enough room in the FIFO before
3167 * initiating the DMA for each buffer. The calc is:
3168 * 4 = ceil(buffer len/mss). To make sure we don't
3169 * overrun the FIFO, adjust the max buffer len if mss
3170 * drops. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003171 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003172 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 max_per_txd = min(mss << 2, max_per_txd);
3174 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003175
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003176 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003177 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003178 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003179 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003180 case e1000_82544:
3181 /* Make sure we have room to chop off 4 bytes,
3182 * and that the end alignment will work out to
3183 * this hardware's requirements
3184 * NOTE: this is a TSO only workaround
3185 * if end byte alignment not correct move us
3186 * into the next dword */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07003187 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003188 break;
3189 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003190 pull_size = min((unsigned int)4, skb->data_len);
3191 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003192 e_err(drv, "__pskb_pull_tail "
3193 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003194 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003195 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003196 }
Eric Dumazete743d312010-04-14 15:59:40 -07003197 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003198 break;
3199 default:
3200 /* do nothing */
3201 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003202 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
3205
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003206 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003207 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003209 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003210
Jeff Kirsherfd803242005-12-13 00:06:22 -05003211 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003212 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003213 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 count += TXD_USE_COUNT(len, max_txd_pwr);
3216
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003217 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 count++;
3219
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003220 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003221 * in PCI-X mode, so add one more descriptor to the count
3222 */
Joe Perches1dc32912008-07-11 15:17:08 -07003223 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003224 (len > 2015)))
3225 count++;
3226
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003228 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003229 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003231 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 count += nr_frags;
3233
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 /* need: count + 2 desc gap to keep tail from touching
3235 * head, otherwise try next time */
Alexander Duyck80179432009-01-21 14:42:47 -08003236 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003239 if (unlikely((hw->mac_type == e1000_82547) &&
3240 (e1000_82547_fifo_workaround(adapter, skb)))) {
3241 netif_stop_queue(netdev);
3242 if (!test_bit(__E1000_DOWN, &adapter->flags))
3243 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3244 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 }
3246
Jiri Pirko5622e402011-07-21 03:26:31 +00003247 if (vlan_tx_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 tx_flags |= E1000_TX_FLAGS_VLAN;
3249 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3250 }
3251
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003252 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003253
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003254 tso = e1000_tso(adapter, tx_ring, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 if (tso < 0) {
3256 dev_kfree_skb_any(skb);
3257 return NETDEV_TX_OK;
3258 }
3259
Jeff Kirsherfd803242005-12-13 00:06:22 -05003260 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003261 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003262 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 tx_flags |= E1000_TX_FLAGS_TSO;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003264 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 tx_flags |= E1000_TX_FLAGS_CSUM;
3266
Alexey Dobriyan60828232006-05-23 14:52:21 -07003267 if (likely(skb->protocol == htons(ETH_P_IP)))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003268 tx_flags |= E1000_TX_FLAGS_IPV4;
3269
Ben Greear11a78dc2012-02-11 15:40:01 +00003270 if (unlikely(skb->no_fcs))
3271 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3272
Alexander Duyck37e73df2009-03-25 21:58:45 +00003273 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3274 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Alexander Duyck37e73df2009-03-25 21:58:45 +00003276 if (count) {
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003277 netdev_sent_queue(netdev, skb->len);
Willem de Bruijneab467f2012-04-27 09:04:04 +00003278 skb_tx_timestamp(skb);
3279
Alexander Duyck37e73df2009-03-25 21:58:45 +00003280 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duyck37e73df2009-03-25 21:58:45 +00003281 /* Make sure there is space in the ring for the next send. */
3282 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Alexander Duyck37e73df2009-03-25 21:58:45 +00003284 } else {
3285 dev_kfree_skb_any(skb);
3286 tx_ring->buffer_info[first].time_stamp = 0;
3287 tx_ring->next_to_use = first;
3288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 return NETDEV_TX_OK;
3291}
3292
Tushar Daveb04e36b2012-01-27 09:00:46 +00003293#define NUM_REGS 38 /* 1 based count */
3294static void e1000_regdump(struct e1000_adapter *adapter)
3295{
3296 struct e1000_hw *hw = &adapter->hw;
3297 u32 regs[NUM_REGS];
3298 u32 *regs_buff = regs;
3299 int i = 0;
3300
Tushar Davee29b5d82012-02-10 08:06:36 +00003301 static const char * const reg_name[] = {
3302 "CTRL", "STATUS",
3303 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3304 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3305 "TIDV", "TXDCTL", "TADV", "TARC0",
3306 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3307 "TXDCTL1", "TARC1",
3308 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3309 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3310 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003311 };
3312
3313 regs_buff[0] = er32(CTRL);
3314 regs_buff[1] = er32(STATUS);
3315
3316 regs_buff[2] = er32(RCTL);
3317 regs_buff[3] = er32(RDLEN);
3318 regs_buff[4] = er32(RDH);
3319 regs_buff[5] = er32(RDT);
3320 regs_buff[6] = er32(RDTR);
3321
3322 regs_buff[7] = er32(TCTL);
3323 regs_buff[8] = er32(TDBAL);
3324 regs_buff[9] = er32(TDBAH);
3325 regs_buff[10] = er32(TDLEN);
3326 regs_buff[11] = er32(TDH);
3327 regs_buff[12] = er32(TDT);
3328 regs_buff[13] = er32(TIDV);
3329 regs_buff[14] = er32(TXDCTL);
3330 regs_buff[15] = er32(TADV);
3331 regs_buff[16] = er32(TARC0);
3332
3333 regs_buff[17] = er32(TDBAL1);
3334 regs_buff[18] = er32(TDBAH1);
3335 regs_buff[19] = er32(TDLEN1);
3336 regs_buff[20] = er32(TDH1);
3337 regs_buff[21] = er32(TDT1);
3338 regs_buff[22] = er32(TXDCTL1);
3339 regs_buff[23] = er32(TARC1);
3340 regs_buff[24] = er32(CTRL_EXT);
3341 regs_buff[25] = er32(ERT);
3342 regs_buff[26] = er32(RDBAL0);
3343 regs_buff[27] = er32(RDBAH0);
3344 regs_buff[28] = er32(TDFH);
3345 regs_buff[29] = er32(TDFT);
3346 regs_buff[30] = er32(TDFHS);
3347 regs_buff[31] = er32(TDFTS);
3348 regs_buff[32] = er32(TDFPC);
3349 regs_buff[33] = er32(RDFH);
3350 regs_buff[34] = er32(RDFT);
3351 regs_buff[35] = er32(RDFHS);
3352 regs_buff[36] = er32(RDFTS);
3353 regs_buff[37] = er32(RDFPC);
3354
3355 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003356 for (i = 0; i < NUM_REGS; i++)
3357 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003358}
3359
3360/*
3361 * e1000_dump: Print registers, tx ring and rx ring
3362 */
3363static void e1000_dump(struct e1000_adapter *adapter)
3364{
3365 /* this code doesn't handle multiple rings */
3366 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3367 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3368 int i;
3369
3370 if (!netif_msg_hw(adapter))
3371 return;
3372
3373 /* Print Registers */
3374 e1000_regdump(adapter);
3375
3376 /*
3377 * transmit dump
3378 */
3379 pr_info("TX Desc ring0 dump\n");
3380
3381 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3382 *
3383 * Legacy Transmit Descriptor
3384 * +--------------------------------------------------------------+
3385 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3386 * +--------------------------------------------------------------+
3387 * 8 | Special | CSS | Status | CMD | CSO | Length |
3388 * +--------------------------------------------------------------+
3389 * 63 48 47 36 35 32 31 24 23 16 15 0
3390 *
3391 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3392 * 63 48 47 40 39 32 31 16 15 8 7 0
3393 * +----------------------------------------------------------------+
3394 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3395 * +----------------------------------------------------------------+
3396 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3397 * +----------------------------------------------------------------+
3398 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3399 *
3400 * Extended Data Descriptor (DTYP=0x1)
3401 * +----------------------------------------------------------------+
3402 * 0 | Buffer Address [63:0] |
3403 * +----------------------------------------------------------------+
3404 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3405 * +----------------------------------------------------------------+
3406 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3407 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003408 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3409 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003410
3411 if (!netif_msg_tx_done(adapter))
3412 goto rx_ring_summary;
3413
3414 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3415 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3416 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003417 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003418 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003419 const char *type;
3420
Tushar Daveb04e36b2012-01-27 09:00:46 +00003421 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003422 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003423 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003424 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003425 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003426 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003427 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003428 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003429
Tushar Davee29b5d82012-02-10 08:06:36 +00003430 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3431 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3432 le64_to_cpu(u->a), le64_to_cpu(u->b),
3433 (u64)buffer_info->dma, buffer_info->length,
3434 buffer_info->next_to_watch,
3435 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003436 }
3437
3438rx_ring_summary:
3439 /*
3440 * receive dump
3441 */
3442 pr_info("\nRX Desc ring dump\n");
3443
3444 /* Legacy Receive Descriptor Format
3445 *
3446 * +-----------------------------------------------------+
3447 * | Buffer Address [63:0] |
3448 * +-----------------------------------------------------+
3449 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3450 * +-----------------------------------------------------+
3451 * 63 48 47 40 39 32 31 16 15 0
3452 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003453 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003454
3455 if (!netif_msg_rx_status(adapter))
3456 goto exit;
3457
3458 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3459 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3460 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003461 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003462 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003463 const char *type;
3464
Tushar Daveb04e36b2012-01-27 09:00:46 +00003465 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003466 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003467 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003468 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003469 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003470 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003471
Tushar Davee29b5d82012-02-10 08:06:36 +00003472 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3473 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3474 (u64)buffer_info->dma, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003475 } /* for */
3476
3477 /* dump the descriptor caches */
3478 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003479 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003480 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003481 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3482 i,
3483 readl(adapter->hw.hw_addr + i+4),
3484 readl(adapter->hw.hw_addr + i),
3485 readl(adapter->hw.hw_addr + i+12),
3486 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003487 }
3488 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003489 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003490 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003491 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3492 i,
3493 readl(adapter->hw.hw_addr + i+4),
3494 readl(adapter->hw.hw_addr + i),
3495 readl(adapter->hw.hw_addr + i+12),
3496 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003497 }
3498exit:
3499 return;
3500}
3501
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502/**
3503 * e1000_tx_timeout - Respond to a Tx Hang
3504 * @netdev: network interface device structure
3505 **/
3506
Joe Perches64798842008-07-11 15:17:02 -07003507static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003509 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510
3511 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003512 adapter->tx_timeout_count++;
3513 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514}
3515
Joe Perches64798842008-07-11 15:17:02 -07003516static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517{
David Howells65f27f32006-11-22 14:55:48 +00003518 struct e1000_adapter *adapter =
3519 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00003521 if (test_bit(__E1000_DOWN, &adapter->flags))
3522 return;
Tushar Daveb04e36b2012-01-27 09:00:46 +00003523 e_err(drv, "Reset adapter\n");
Jesse Brandeburg338c15e2010-09-22 18:22:42 +00003524 e1000_reinit_safe(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525}
3526
3527/**
3528 * e1000_get_stats - Get System Network Statistics
3529 * @netdev: network interface device structure
3530 *
3531 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003532 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 **/
3534
Joe Perches64798842008-07-11 15:17:02 -07003535static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003537 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003538 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539}
3540
3541/**
3542 * e1000_change_mtu - Change the Maximum Transfer Unit
3543 * @netdev: network interface device structure
3544 * @new_mtu: new value for maximum frame size
3545 *
3546 * Returns 0 on success, negative on failure
3547 **/
3548
Joe Perches64798842008-07-11 15:17:02 -07003549static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003551 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003552 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3554
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003555 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3556 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003557 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003558 return -EINVAL;
3559 }
3560
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003561 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003562 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003563 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003564 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003565 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003566 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003567 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003568 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003569 default:
3570 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3571 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003572 }
3573
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003574 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3575 msleep(1);
3576 /* e1000_down has a dependency on max_frame_size */
3577 hw->max_frame_size = max_frame;
3578 if (netif_running(netdev))
3579 e1000_down(adapter);
3580
David S. Miller87f50322006-07-31 22:39:40 -07003581 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003582 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003583 * larger slab size.
3584 * i.e. RXBUFFER_2048 --> size-4096 slab
3585 * however with the new *_jumbo_rx* routines, jumbo receives will use
3586 * fragmented skbs */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003587
Jesse Brandeburg99261462010-01-22 22:56:16 +00003588 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003589 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003590 else
3591#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003592 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003593#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3594 adapter->rx_buffer_len = PAGE_SIZE;
3595#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003596
3597 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003598 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003599 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003600 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3601 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003602
Emil Tantilov675ad472010-04-27 14:02:58 +00003603 pr_info("%s changing MTU from %d to %d\n",
3604 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003605 netdev->mtu = new_mtu;
3606
Auke Kok2db10a02006-06-27 09:06:28 -07003607 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003608 e1000_up(adapter);
3609 else
3610 e1000_reset(adapter);
3611
3612 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 return 0;
3615}
3616
3617/**
3618 * e1000_update_stats - Update the board statistics counters
3619 * @adapter: board private structure
3620 **/
3621
Joe Perches64798842008-07-11 15:17:02 -07003622void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003624 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003626 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003628 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
3630#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3631
Linas Vepstas282f33c2006-06-08 22:19:44 -07003632 /*
3633 * Prevent stats update while adapter is being reset, or if the pci
3634 * connection is down.
3635 */
Auke Kok90267292006-06-08 09:30:24 -07003636 if (adapter->link_speed == 0)
3637 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003638 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003639 return;
Auke Kok90267292006-06-08 09:30:24 -07003640
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 spin_lock_irqsave(&adapter->stats_lock, flags);
3642
Masatake YAMATO828d0552007-10-20 03:06:37 +02003643 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644 * called from the interrupt context, so they must only
3645 * be written while holding adapter->stats_lock
3646 */
3647
Joe Perches1dc32912008-07-11 15:17:08 -07003648 adapter->stats.crcerrs += er32(CRCERRS);
3649 adapter->stats.gprc += er32(GPRC);
3650 adapter->stats.gorcl += er32(GORCL);
3651 adapter->stats.gorch += er32(GORCH);
3652 adapter->stats.bprc += er32(BPRC);
3653 adapter->stats.mprc += er32(MPRC);
3654 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003655
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003656 adapter->stats.prc64 += er32(PRC64);
3657 adapter->stats.prc127 += er32(PRC127);
3658 adapter->stats.prc255 += er32(PRC255);
3659 adapter->stats.prc511 += er32(PRC511);
3660 adapter->stats.prc1023 += er32(PRC1023);
3661 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
Joe Perches1dc32912008-07-11 15:17:08 -07003663 adapter->stats.symerrs += er32(SYMERRS);
3664 adapter->stats.mpc += er32(MPC);
3665 adapter->stats.scc += er32(SCC);
3666 adapter->stats.ecol += er32(ECOL);
3667 adapter->stats.mcc += er32(MCC);
3668 adapter->stats.latecol += er32(LATECOL);
3669 adapter->stats.dc += er32(DC);
3670 adapter->stats.sec += er32(SEC);
3671 adapter->stats.rlec += er32(RLEC);
3672 adapter->stats.xonrxc += er32(XONRXC);
3673 adapter->stats.xontxc += er32(XONTXC);
3674 adapter->stats.xoffrxc += er32(XOFFRXC);
3675 adapter->stats.xofftxc += er32(XOFFTXC);
3676 adapter->stats.fcruc += er32(FCRUC);
3677 adapter->stats.gptc += er32(GPTC);
3678 adapter->stats.gotcl += er32(GOTCL);
3679 adapter->stats.gotch += er32(GOTCH);
3680 adapter->stats.rnbc += er32(RNBC);
3681 adapter->stats.ruc += er32(RUC);
3682 adapter->stats.rfc += er32(RFC);
3683 adapter->stats.rjc += er32(RJC);
3684 adapter->stats.torl += er32(TORL);
3685 adapter->stats.torh += er32(TORH);
3686 adapter->stats.totl += er32(TOTL);
3687 adapter->stats.toth += er32(TOTH);
3688 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003689
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003690 adapter->stats.ptc64 += er32(PTC64);
3691 adapter->stats.ptc127 += er32(PTC127);
3692 adapter->stats.ptc255 += er32(PTC255);
3693 adapter->stats.ptc511 += er32(PTC511);
3694 adapter->stats.ptc1023 += er32(PTC1023);
3695 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003696
Joe Perches1dc32912008-07-11 15:17:08 -07003697 adapter->stats.mptc += er32(MPTC);
3698 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
3700 /* used for adaptive IFS */
3701
Joe Perches1dc32912008-07-11 15:17:08 -07003702 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003704 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 adapter->stats.colc += hw->collision_delta;
3706
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003707 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003708 adapter->stats.algnerrc += er32(ALGNERRC);
3709 adapter->stats.rxerrc += er32(RXERRC);
3710 adapter->stats.tncrs += er32(TNCRS);
3711 adapter->stats.cexterr += er32(CEXTERR);
3712 adapter->stats.tsctc += er32(TSCTC);
3713 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 }
3715
3716 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003717 netdev->stats.multicast = adapter->stats.mprc;
3718 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
3720 /* Rx Errors */
3721
Jeff Kirsher87041632006-03-02 18:21:24 -08003722 /* RLEC on some newer hardware can be incorrect so build
3723 * our own version based on RUC and ROC */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003724 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003726 adapter->stats.ruc + adapter->stats.roc +
3727 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003728 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003729 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3730 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3731 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3732 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733
3734 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003735 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003736 netdev->stats.tx_errors = adapter->stats.txerrc;
3737 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3738 netdev->stats.tx_window_errors = adapter->stats.latecol;
3739 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003740 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003741 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003742 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003743 adapter->stats.tncrs = 0;
3744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745
3746 /* Tx Dropped needs to be maintained elsewhere */
3747
3748 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003749 if (hw->media_type == e1000_media_type_copper) {
3750 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3752 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3753 adapter->phy_stats.idle_errors += phy_tmp;
3754 }
3755
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003756 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 (hw->phy_type == e1000_phy_m88) &&
3758 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3759 adapter->phy_stats.receive_errors += phy_tmp;
3760 }
3761
Jeff Garzik15e376b2006-12-15 11:16:33 -05003762 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003763 if (hw->has_smbus) {
3764 adapter->stats.mgptc += er32(MGTPTC);
3765 adapter->stats.mgprc += er32(MGTPRC);
3766 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003767 }
3768
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3770}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003771
3772/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 * e1000_intr - Interrupt Handler
3774 * @irq: interrupt number
3775 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 **/
3777
Joe Perches64798842008-07-11 15:17:02 -07003778static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779{
3780 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003781 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003783 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003784
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003785 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003786 return IRQ_NONE; /* Not our interrupt */
3787
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003788 /*
3789 * we might have caused the interrupt, but the above
3790 * read cleared it, and just in case the driver is
3791 * down there is nothing to do so return handled
3792 */
3793 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3794 return IRQ_HANDLED;
3795
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003796 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003798 /* guard against interrupt when we're going down */
3799 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003800 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 }
3802
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003803 /* disable interrupts, without the synchronize_irq bit */
3804 ew32(IMC, ~0);
3805 E1000_WRITE_FLUSH();
3806
Ben Hutchings288379f2009-01-19 16:43:59 -08003807 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003808 adapter->total_tx_bytes = 0;
3809 adapter->total_tx_packets = 0;
3810 adapter->total_rx_bytes = 0;
3811 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003812 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003813 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003814 /* this really should not happen! if it does it is basically a
3815 * bug, but not a hard error, so enable ints and continue */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003816 if (!test_bit(__E1000_DOWN, &adapter->flags))
3817 e1000_irq_enable(adapter);
3818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 return IRQ_HANDLED;
3821}
3822
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823/**
3824 * e1000_clean - NAPI Rx polling callback
3825 * @adapter: board private structure
3826 **/
Joe Perches64798842008-07-11 15:17:02 -07003827static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003829 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003830 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003831
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003832 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003833
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003834 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003835
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003836 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003837 work_done = budget;
3838
David S. Miller53e52c72008-01-07 21:06:12 -08003839 /* If budget not fully consumed, exit the polling mode */
3840 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003841 if (likely(adapter->itr_setting & 3))
3842 e1000_set_itr(adapter);
Ben Hutchings288379f2009-01-19 16:43:59 -08003843 napi_complete(napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003844 if (!test_bit(__E1000_DOWN, &adapter->flags))
3845 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 }
3847
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003848 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849}
3850
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851/**
3852 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3853 * @adapter: board private structure
3854 **/
Joe Perches64798842008-07-11 15:17:02 -07003855static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3856 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857{
Joe Perches1dc32912008-07-11 15:17:08 -07003858 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 struct net_device *netdev = adapter->netdev;
3860 struct e1000_tx_desc *tx_desc, *eop_desc;
3861 struct e1000_buffer *buffer_info;
3862 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003863 unsigned int count = 0;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003864 unsigned int total_tx_bytes=0, total_tx_packets=0;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003865 unsigned int bytes_compl = 0, pkts_compl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866
3867 i = tx_ring->next_to_clean;
3868 eop = tx_ring->buffer_info[i].next_to_watch;
3869 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870
Alexander Duyckccfb3422009-03-25 21:59:04 +00003871 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3872 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003873 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00003874 rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003875 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 tx_desc = E1000_TX_DESC(*tx_ring, i);
3877 buffer_info = &tx_ring->buffer_info[i];
3878 cleaned = (i == eop);
3879
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003880 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003881 total_tx_packets += buffer_info->segs;
3882 total_tx_bytes += buffer_info->bytecount;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003883 if (buffer_info->skb) {
3884 bytes_compl += buffer_info->skb->len;
3885 pkts_compl++;
3886 }
3887
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003888 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003889 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003890 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003892 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003894
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 eop = tx_ring->buffer_info[i].next_to_watch;
3896 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3897 }
3898
3899 tx_ring->next_to_clean = i;
3900
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003901 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3902
Auke Kok77b2aad2006-04-14 19:05:25 -07003903#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003904 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003905 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3906 /* Make sure that anybody stopping the queue after this
3907 * sees the new next_to_clean.
3908 */
3909 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003910
3911 if (netif_queue_stopped(netdev) &&
3912 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003913 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003914 ++adapter->restart_queue;
3915 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003916 }
Malli Chilakala26483452005-04-28 19:44:46 -07003917
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003918 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003919 /* Detect a transmit hang in hardware, this serializes the
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 * check with the clearing of time_stamp and movement of i */
Joe Perchesc3033b02008-03-21 11:06:25 -07003921 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003922 if (tx_ring->buffer_info[eop].time_stamp &&
3923 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00003924 (adapter->tx_timeout_factor * HZ)) &&
3925 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003926
3927 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003928 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003929 " Tx Queue <%lu>\n"
3930 " TDH <%x>\n"
3931 " TDT <%x>\n"
3932 " next_to_use <%x>\n"
3933 " next_to_clean <%x>\n"
3934 "buffer_info[next_to_clean]\n"
3935 " time_stamp <%lx>\n"
3936 " next_to_watch <%x>\n"
3937 " jiffies <%lx>\n"
3938 " next_to_watch.status <%x>\n",
Jeff Kirsher7bfa4812006-01-12 16:50:41 -08003939 (unsigned long)((tx_ring - adapter->tx_ring) /
3940 sizeof(struct e1000_tx_ring)),
Joe Perches1dc32912008-07-11 15:17:08 -07003941 readl(hw->hw_addr + tx_ring->tdh),
3942 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003943 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003944 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003945 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003946 eop,
3947 jiffies,
3948 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003949 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003953 adapter->total_tx_bytes += total_tx_bytes;
3954 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003955 netdev->stats.tx_bytes += total_tx_bytes;
3956 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003957 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958}
3959
3960/**
3961 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003962 * @adapter: board private structure
3963 * @status_err: receive descriptor status and error fields
3964 * @csum: receive descriptor csum field
3965 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 **/
3967
Joe Perches64798842008-07-11 15:17:02 -07003968static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3969 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970{
Joe Perches1dc32912008-07-11 15:17:08 -07003971 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003972 u16 status = (u16)status_err;
3973 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003974
3975 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003976
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 /* 82543 or newer only */
Joe Perches1dc32912008-07-11 15:17:08 -07003978 if (unlikely(hw->mac_type < e1000_82543)) return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 /* Ignore Checksum bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003980 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003981 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003982 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003983 /* let the stack verify checksum errors */
3984 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 return;
3986 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003987 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003988 if (!(status & E1000_RXD_STAT_TCPCS))
3989 return;
3990
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003991 /* It must be a TCP or UDP packet with a valid checksum */
3992 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 /* TCP checksum is good */
3994 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003996 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997}
3998
3999/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004000 * e1000_consume_page - helper function
4001 **/
4002static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
4003 u16 length)
4004{
4005 bi->page = NULL;
4006 skb->len += length;
4007 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00004008 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004009}
4010
4011/**
4012 * e1000_receive_skb - helper function to handle rx indications
4013 * @adapter: board private structure
4014 * @status: descriptor status field as written by hardware
4015 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4016 * @skb: pointer to sk_buff to be indicated to stack
4017 */
4018static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4019 __le16 vlan, struct sk_buff *skb)
4020{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00004021 skb->protocol = eth_type_trans(skb, adapter->netdev);
4022
Jiri Pirko5622e402011-07-21 03:26:31 +00004023 if (status & E1000_RXD_STAT_VP) {
4024 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4025
4026 __vlan_hwaccel_put_tag(skb, vid);
4027 }
4028 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004029}
4030
4031/**
4032 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4033 * @adapter: board private structure
4034 * @rx_ring: ring to clean
4035 * @work_done: amount of napi work completed this call
4036 * @work_to_do: max amount of work allowed for this call to do
4037 *
4038 * the return value indicates whether actual cleaning was done, there
4039 * is no guarantee that everything was cleaned
4040 */
4041static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4042 struct e1000_rx_ring *rx_ring,
4043 int *work_done, int work_to_do)
4044{
4045 struct e1000_hw *hw = &adapter->hw;
4046 struct net_device *netdev = adapter->netdev;
4047 struct pci_dev *pdev = adapter->pdev;
4048 struct e1000_rx_desc *rx_desc, *next_rxd;
4049 struct e1000_buffer *buffer_info, *next_buffer;
4050 unsigned long irq_flags;
4051 u32 length;
4052 unsigned int i;
4053 int cleaned_count = 0;
4054 bool cleaned = false;
4055 unsigned int total_rx_bytes=0, total_rx_packets=0;
4056
4057 i = rx_ring->next_to_clean;
4058 rx_desc = E1000_RX_DESC(*rx_ring, i);
4059 buffer_info = &rx_ring->buffer_info[i];
4060
4061 while (rx_desc->status & E1000_RXD_STAT_DD) {
4062 struct sk_buff *skb;
4063 u8 status;
4064
4065 if (*work_done >= work_to_do)
4066 break;
4067 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004068 rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004069
4070 status = rx_desc->status;
4071 skb = buffer_info->skb;
4072 buffer_info->skb = NULL;
4073
4074 if (++i == rx_ring->count) i = 0;
4075 next_rxd = E1000_RX_DESC(*rx_ring, i);
4076 prefetch(next_rxd);
4077
4078 next_buffer = &rx_ring->buffer_info[i];
4079
4080 cleaned = true;
4081 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004082 dma_unmap_page(&pdev->dev, buffer_info->dma,
4083 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004084 buffer_info->dma = 0;
4085
4086 length = le16_to_cpu(rx_desc->length);
4087
4088 /* errors is only valid for DD + EOP descriptors */
4089 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4090 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004091 u8 *mapped;
4092 u8 last_byte;
4093
4094 mapped = page_address(buffer_info->page);
4095 last_byte = *(mapped + length - 1);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004096 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4097 last_byte)) {
4098 spin_lock_irqsave(&adapter->stats_lock,
4099 irq_flags);
4100 e1000_tbi_adjust_stats(hw, &adapter->stats,
Sebastian Andrzej Siewior281a8f22012-05-15 09:18:55 +00004101 length, mapped);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004102 spin_unlock_irqrestore(&adapter->stats_lock,
4103 irq_flags);
4104 length--;
4105 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004106 if (netdev->features & NETIF_F_RXALL)
4107 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004108 /* recycle both page and skb */
4109 buffer_info->skb = skb;
4110 /* an error means any chain goes out the window
4111 * too */
4112 if (rx_ring->rx_skb_top)
4113 dev_kfree_skb(rx_ring->rx_skb_top);
4114 rx_ring->rx_skb_top = NULL;
4115 goto next_desc;
4116 }
4117 }
4118
4119#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004120process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004121 if (!(status & E1000_RXD_STAT_EOP)) {
4122 /* this descriptor is only the beginning (or middle) */
4123 if (!rxtop) {
4124 /* this is the beginning of a chain */
4125 rxtop = skb;
4126 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4127 0, length);
4128 } else {
4129 /* this is the middle of a chain */
4130 skb_fill_page_desc(rxtop,
4131 skb_shinfo(rxtop)->nr_frags,
4132 buffer_info->page, 0, length);
4133 /* re-use the skb, only consumed the page */
4134 buffer_info->skb = skb;
4135 }
4136 e1000_consume_page(buffer_info, rxtop, length);
4137 goto next_desc;
4138 } else {
4139 if (rxtop) {
4140 /* end of the chain */
4141 skb_fill_page_desc(rxtop,
4142 skb_shinfo(rxtop)->nr_frags,
4143 buffer_info->page, 0, length);
4144 /* re-use the current skb, we only consumed the
4145 * page */
4146 buffer_info->skb = skb;
4147 skb = rxtop;
4148 rxtop = NULL;
4149 e1000_consume_page(buffer_info, skb, length);
4150 } else {
4151 /* no chain, got EOP, this buf is the packet
4152 * copybreak to save the put_page/alloc_page */
4153 if (length <= copybreak &&
4154 skb_tailroom(skb) >= length) {
4155 u8 *vaddr;
Cong Wang46790262011-11-25 23:14:23 +08004156 vaddr = kmap_atomic(buffer_info->page);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004157 memcpy(skb_tail_pointer(skb), vaddr, length);
Cong Wang46790262011-11-25 23:14:23 +08004158 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004159 /* re-use the page, so don't erase
4160 * buffer_info->page */
4161 skb_put(skb, length);
4162 } else {
4163 skb_fill_page_desc(skb, 0,
4164 buffer_info->page, 0,
4165 length);
4166 e1000_consume_page(buffer_info, skb,
4167 length);
4168 }
4169 }
4170 }
4171
4172 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4173 e1000_rx_checksum(adapter,
4174 (u32)(status) |
4175 ((u32)(rx_desc->errors) << 24),
4176 le16_to_cpu(rx_desc->csum), skb);
4177
Ben Greearb0d15622012-02-11 15:40:11 +00004178 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4179 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4180 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004181 total_rx_packets++;
4182
4183 /* eth type trans needs skb->data to point to something */
4184 if (!pskb_may_pull(skb, ETH_HLEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004185 e_err(drv, "pskb_may_pull failed.\n");
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004186 dev_kfree_skb(skb);
4187 goto next_desc;
4188 }
4189
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004190 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4191
4192next_desc:
4193 rx_desc->status = 0;
4194
4195 /* return some buffers to hardware, one at a time is too slow */
4196 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4197 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4198 cleaned_count = 0;
4199 }
4200
4201 /* use prefetched values */
4202 rx_desc = next_rxd;
4203 buffer_info = next_buffer;
4204 }
4205 rx_ring->next_to_clean = i;
4206
4207 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4208 if (cleaned_count)
4209 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4210
4211 adapter->total_rx_packets += total_rx_packets;
4212 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004213 netdev->stats.rx_bytes += total_rx_bytes;
4214 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004215 return cleaned;
4216}
4217
Joe Perches57bf6ee2010-05-13 15:26:17 +00004218/*
4219 * this should improve performance for small packets with large amounts
4220 * of reassembly being done in the stack
4221 */
4222static void e1000_check_copybreak(struct net_device *netdev,
4223 struct e1000_buffer *buffer_info,
4224 u32 length, struct sk_buff **skb)
4225{
4226 struct sk_buff *new_skb;
4227
4228 if (length > copybreak)
4229 return;
4230
4231 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4232 if (!new_skb)
4233 return;
4234
4235 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4236 (*skb)->data - NET_IP_ALIGN,
4237 length + NET_IP_ALIGN);
4238 /* save the skb in buffer_info as good */
4239 buffer_info->skb = *skb;
4240 *skb = new_skb;
4241}
4242
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004243/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004244 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004246 * @rx_ring: ring to clean
4247 * @work_done: amount of napi work completed this call
4248 * @work_to_do: max amount of work allowed for this call to do
4249 */
Joe Perches64798842008-07-11 15:17:02 -07004250static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4251 struct e1000_rx_ring *rx_ring,
4252 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253{
Joe Perches1dc32912008-07-11 15:17:08 -07004254 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 struct net_device *netdev = adapter->netdev;
4256 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004257 struct e1000_rx_desc *rx_desc, *next_rxd;
4258 struct e1000_buffer *buffer_info, *next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07004260 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004262 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004263 bool cleaned = false;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004264 unsigned int total_rx_bytes=0, total_rx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265
4266 i = rx_ring->next_to_clean;
4267 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004268 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004270 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004271 struct sk_buff *skb;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004272 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004273
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004274 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 break;
4276 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004277 rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004278
Jeff Kirshera292ca62006-01-12 16:51:30 -08004279 status = rx_desc->status;
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004280 skb = buffer_info->skb;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004281 buffer_info->skb = NULL;
4282
Jeff Kirsher30320be2006-03-02 18:21:57 -08004283 prefetch(skb->data - NET_IP_ALIGN);
4284
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004285 if (++i == rx_ring->count) i = 0;
4286 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004287 prefetch(next_rxd);
4288
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004289 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004290
Joe Perchesc3033b02008-03-21 11:06:25 -07004291 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004292 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004293 dma_unmap_single(&pdev->dev, buffer_info->dma,
4294 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004295 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 length = le16_to_cpu(rx_desc->length);
Neil Hormanea30e112009-06-02 01:29:58 -07004298 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004299 * packet, if thats the case we need to toss it. In fact, we
4300 * to toss every packet with the EOP bit clear and the next
4301 * frame that _does_ have the EOP bit set, as it is by
4302 * definition only a frame fragment
4303 */
4304 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4305 adapter->discarding = true;
4306
4307 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004308 /* All receives must fit into a single buffer */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004309 e_dbg("Receive packet consumed multiple buffers\n");
Auke Kok864c4e42006-06-27 09:06:53 -07004310 /* recycle */
Auke Kok8fc897b2006-08-28 14:56:16 -07004311 buffer_info->skb = skb;
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004312 if (status & E1000_RXD_STAT_EOP)
4313 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 goto next_desc;
4315 }
4316
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004317 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004318 u8 last_byte = *(skb->data + length - 1);
Joe Perches1dc32912008-07-11 15:17:08 -07004319 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4320 last_byte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004322 e1000_tbi_adjust_stats(hw, &adapter->stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 length, skb->data);
4324 spin_unlock_irqrestore(&adapter->stats_lock,
4325 flags);
4326 length--;
4327 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004328 if (netdev->features & NETIF_F_RXALL)
4329 goto process_skb;
Auke Kok9e2feac2006-04-14 19:05:18 -07004330 /* recycle */
4331 buffer_info->skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 goto next_desc;
4333 }
Auke Kok1cb58212006-04-18 12:31:04 -07004334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335
Ben Greeare825b732012-04-04 06:01:29 +00004336process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004337 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004338 total_rx_packets++;
4339
Ben Greearb0d15622012-02-11 15:40:11 +00004340 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4341 /* adjust length to remove Ethernet CRC, this must be
4342 * done after the TBI_ACCEPT workaround above
4343 */
4344 length -= 4;
4345
Joe Perches57bf6ee2010-05-13 15:26:17 +00004346 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4347
Auke Kok996695d2006-11-01 08:47:50 -08004348 skb_put(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349
4350 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004351 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004352 (u32)(status) |
4353 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004354 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004355
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004356 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004357
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358next_desc:
4359 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004361 /* return some buffers to hardware, one at a time is too slow */
4362 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4363 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4364 cleaned_count = 0;
4365 }
4366
Jeff Kirsher30320be2006-03-02 18:21:57 -08004367 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004368 rx_desc = next_rxd;
4369 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004372
4373 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4374 if (cleaned_count)
4375 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004377 adapter->total_rx_packets += total_rx_packets;
4378 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004379 netdev->stats.rx_bytes += total_rx_bytes;
4380 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 return cleaned;
4382}
4383
4384/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004385 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4386 * @adapter: address of board private structure
4387 * @rx_ring: pointer to receive ring structure
4388 * @cleaned_count: number of buffers to allocate this pass
4389 **/
4390
4391static void
4392e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4393 struct e1000_rx_ring *rx_ring, int cleaned_count)
4394{
4395 struct net_device *netdev = adapter->netdev;
4396 struct pci_dev *pdev = adapter->pdev;
4397 struct e1000_rx_desc *rx_desc;
4398 struct e1000_buffer *buffer_info;
4399 struct sk_buff *skb;
4400 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004401 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004402
4403 i = rx_ring->next_to_use;
4404 buffer_info = &rx_ring->buffer_info[i];
4405
4406 while (cleaned_count--) {
4407 skb = buffer_info->skb;
4408 if (skb) {
4409 skb_trim(skb, 0);
4410 goto check_page;
4411 }
4412
Eric Dumazet89d71a62009-10-13 05:34:20 +00004413 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004414 if (unlikely(!skb)) {
4415 /* Better luck next round */
4416 adapter->alloc_rx_buff_failed++;
4417 break;
4418 }
4419
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004420 buffer_info->skb = skb;
4421 buffer_info->length = adapter->rx_buffer_len;
4422check_page:
4423 /* allocate a new page if necessary */
4424 if (!buffer_info->page) {
4425 buffer_info->page = alloc_page(GFP_ATOMIC);
4426 if (unlikely(!buffer_info->page)) {
4427 adapter->alloc_rx_buff_failed++;
4428 break;
4429 }
4430 }
4431
Anton Blanchardb5abb022010-02-19 17:54:53 +00004432 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004433 buffer_info->dma = dma_map_page(&pdev->dev,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004434 buffer_info->page, 0,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004435 buffer_info->length,
4436 DMA_FROM_DEVICE);
4437 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004438 put_page(buffer_info->page);
4439 dev_kfree_skb(skb);
4440 buffer_info->page = NULL;
4441 buffer_info->skb = NULL;
4442 buffer_info->dma = 0;
4443 adapter->alloc_rx_buff_failed++;
4444 break; /* while !buffer_info->skb */
4445 }
4446 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004447
4448 rx_desc = E1000_RX_DESC(*rx_ring, i);
4449 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4450
4451 if (unlikely(++i == rx_ring->count))
4452 i = 0;
4453 buffer_info = &rx_ring->buffer_info[i];
4454 }
4455
4456 if (likely(rx_ring->next_to_use != i)) {
4457 rx_ring->next_to_use = i;
4458 if (unlikely(i-- == 0))
4459 i = (rx_ring->count - 1);
4460
4461 /* Force memory writes to complete before letting h/w
4462 * know there are new descriptors to fetch. (Only
4463 * applicable for weak-ordered memory model archs,
4464 * such as IA-64). */
4465 wmb();
4466 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4467 }
4468}
4469
4470/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004471 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 * @adapter: address of board private structure
4473 **/
4474
Joe Perches64798842008-07-11 15:17:02 -07004475static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4476 struct e1000_rx_ring *rx_ring,
4477 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478{
Joe Perches1dc32912008-07-11 15:17:08 -07004479 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 struct net_device *netdev = adapter->netdev;
4481 struct pci_dev *pdev = adapter->pdev;
4482 struct e1000_rx_desc *rx_desc;
4483 struct e1000_buffer *buffer_info;
4484 struct sk_buff *skb;
Malli Chilakala26483452005-04-28 19:44:46 -07004485 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004486 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487
4488 i = rx_ring->next_to_use;
4489 buffer_info = &rx_ring->buffer_info[i];
4490
Jeff Kirshera292ca62006-01-12 16:51:30 -08004491 while (cleaned_count--) {
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004492 skb = buffer_info->skb;
4493 if (skb) {
Jeff Kirshera292ca62006-01-12 16:51:30 -08004494 skb_trim(skb, 0);
4495 goto map_skb;
4496 }
4497
Eric Dumazet89d71a62009-10-13 05:34:20 +00004498 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004499 if (unlikely(!skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004501 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 break;
4503 }
4504
Malli Chilakala26483452005-04-28 19:44:46 -07004505 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4507 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004508 e_err(rx_err, "skb align check failed: %u bytes at "
4509 "%p\n", bufsz, skb->data);
Malli Chilakala26483452005-04-28 19:44:46 -07004510 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004511 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Malli Chilakala26483452005-04-28 19:44:46 -07004512 /* Failed allocation, critical failure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 if (!skb) {
4514 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004515 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 break;
4517 }
Malli Chilakala26483452005-04-28 19:44:46 -07004518
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4520 /* give up */
4521 dev_kfree_skb(skb);
4522 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004523 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 break; /* while !buffer_info->skb */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004526
4527 /* Use new allocation */
4528 dev_kfree_skb(oldskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 buffer_info->skb = skb;
4531 buffer_info->length = adapter->rx_buffer_len;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004532map_skb:
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004533 buffer_info->dma = dma_map_single(&pdev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 skb->data,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004535 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004536 DMA_FROM_DEVICE);
4537 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004538 dev_kfree_skb(skb);
4539 buffer_info->skb = NULL;
4540 buffer_info->dma = 0;
4541 adapter->alloc_rx_buff_failed++;
4542 break; /* while !buffer_info->skb */
4543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004545 /*
4546 * XXX if it was allocated cleanly it will never map to a
4547 * boundary crossing
4548 */
4549
Malli Chilakala26483452005-04-28 19:44:46 -07004550 /* Fix for errata 23, can't cross 64kB boundary */
4551 if (!e1000_check_64k_bound(adapter,
4552 (void *)(unsigned long)buffer_info->dma,
4553 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004554 e_err(rx_err, "dma align check failed: %u bytes at "
4555 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004556 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 dev_kfree_skb(skb);
4558 buffer_info->skb = NULL;
4559
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004560 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004562 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004563 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004565 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 break; /* while !buffer_info->skb */
4567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 rx_desc = E1000_RX_DESC(*rx_ring, i);
4569 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4570
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004571 if (unlikely(++i == rx_ring->count))
4572 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 buffer_info = &rx_ring->buffer_info[i];
4574 }
4575
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004576 if (likely(rx_ring->next_to_use != i)) {
4577 rx_ring->next_to_use = i;
4578 if (unlikely(i-- == 0))
4579 i = (rx_ring->count - 1);
4580
4581 /* Force memory writes to complete before letting h/w
4582 * know there are new descriptors to fetch. (Only
4583 * applicable for weak-ordered memory model archs,
4584 * such as IA-64). */
4585 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004586 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588}
4589
4590/**
4591 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4592 * @adapter:
4593 **/
4594
Joe Perches64798842008-07-11 15:17:02 -07004595static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596{
Joe Perches1dc32912008-07-11 15:17:08 -07004597 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004598 u16 phy_status;
4599 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600
Joe Perches1dc32912008-07-11 15:17:08 -07004601 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4602 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 return;
4604
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004605 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 /* If Master/Slave config fault is asserted twice,
4607 * we assume back-to-back */
Joe Perches1dc32912008-07-11 15:17:08 -07004608 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004609 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004610 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004611 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004612 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004613 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004615 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 phy_ctrl);
4617 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004618 if (!e1000_phy_setup_autoneg(hw) &&
4619 !e1000_read_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620 &phy_ctrl)) {
4621 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4622 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004623 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 phy_ctrl);
4625 }
4626 }
4627 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004628 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004630 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004632 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4633 if (!e1000_phy_setup_autoneg(hw) &&
4634 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4636 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004637 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 }
4639 }
4640 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004641 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642 adapter->smartspeed = 0;
4643}
4644
4645/**
4646 * e1000_ioctl -
4647 * @netdev:
4648 * @ifreq:
4649 * @cmd:
4650 **/
4651
Joe Perches64798842008-07-11 15:17:02 -07004652static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653{
4654 switch (cmd) {
4655 case SIOCGMIIPHY:
4656 case SIOCGMIIREG:
4657 case SIOCSMIIREG:
4658 return e1000_mii_ioctl(netdev, ifr, cmd);
4659 default:
4660 return -EOPNOTSUPP;
4661 }
4662}
4663
4664/**
4665 * e1000_mii_ioctl -
4666 * @netdev:
4667 * @ifreq:
4668 * @cmd:
4669 **/
4670
Joe Perches64798842008-07-11 15:17:02 -07004671static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4672 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004674 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004675 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 struct mii_ioctl_data *data = if_mii(ifr);
4677 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004678 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004679 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680
Joe Perches1dc32912008-07-11 15:17:08 -07004681 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 return -EOPNOTSUPP;
4683
4684 switch (cmd) {
4685 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004686 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 break;
4688 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004689 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004690 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004691 &data->val_out)) {
4692 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004694 }
4695 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 break;
4697 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004698 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 return -EFAULT;
4700 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004701 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004702 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004703 mii_reg)) {
4704 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004706 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004707 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004708 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 switch (data->reg_num) {
4710 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004711 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004713 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004714 hw->autoneg = 1;
4715 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004717 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004719 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004721 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 else
David Decotigny14ad2512011-04-27 18:32:43 +00004723 speed = SPEED_10;
4724 retval = e1000_set_spd_dplx(
4725 adapter, speed,
4726 ((mii_reg & 0x100)
4727 ? DUPLEX_FULL :
4728 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004729 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730 return retval;
4731 }
Auke Kok2db10a02006-06-27 09:06:28 -07004732 if (netif_running(adapter->netdev))
4733 e1000_reinit_locked(adapter);
4734 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 e1000_reset(adapter);
4736 break;
4737 case M88E1000_PHY_SPEC_CTRL:
4738 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004739 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 return -EIO;
4741 break;
4742 }
4743 } else {
4744 switch (data->reg_num) {
4745 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004746 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004748 if (netif_running(adapter->netdev))
4749 e1000_reinit_locked(adapter);
4750 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 e1000_reset(adapter);
4752 break;
4753 }
4754 }
4755 break;
4756 default:
4757 return -EOPNOTSUPP;
4758 }
4759 return E1000_SUCCESS;
4760}
4761
Joe Perches64798842008-07-11 15:17:02 -07004762void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763{
4764 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004765 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004767 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004768 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769}
4770
Joe Perches64798842008-07-11 15:17:02 -07004771void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772{
4773 struct e1000_adapter *adapter = hw->back;
4774
4775 pci_clear_mwi(adapter->pdev);
4776}
4777
Joe Perches64798842008-07-11 15:17:02 -07004778int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004779{
4780 struct e1000_adapter *adapter = hw->back;
4781 return pcix_get_mmrbc(adapter->pdev);
4782}
4783
Joe Perches64798842008-07-11 15:17:02 -07004784void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004785{
4786 struct e1000_adapter *adapter = hw->back;
4787 pcix_set_mmrbc(adapter->pdev, mmrbc);
4788}
4789
Joe Perches64798842008-07-11 15:17:02 -07004790void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791{
4792 outl(value, port);
4793}
4794
Jiri Pirko5622e402011-07-21 03:26:31 +00004795static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796{
Jiri Pirko5622e402011-07-21 03:26:31 +00004797 u16 vid;
4798
4799 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4800 return true;
4801 return false;
4802}
4803
Jiri Pirko52f55092012-03-20 18:10:01 +00004804static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4805 netdev_features_t features)
4806{
4807 struct e1000_hw *hw = &adapter->hw;
4808 u32 ctrl;
4809
4810 ctrl = er32(CTRL);
4811 if (features & NETIF_F_HW_VLAN_RX) {
4812 /* enable VLAN tag insert/strip */
4813 ctrl |= E1000_CTRL_VME;
4814 } else {
4815 /* disable VLAN tag insert/strip */
4816 ctrl &= ~E1000_CTRL_VME;
4817 }
4818 ew32(CTRL, ctrl);
4819}
Jiri Pirko5622e402011-07-21 03:26:31 +00004820static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4821 bool filter_on)
4822{
Joe Perches1dc32912008-07-11 15:17:08 -07004823 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004824 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004826 if (!test_bit(__E1000_DOWN, &adapter->flags))
4827 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828
Jiri Pirko52f55092012-03-20 18:10:01 +00004829 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004830 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004831 /* enable VLAN receive filtering */
4832 rctl = er32(RCTL);
4833 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004834 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004835 rctl |= E1000_RCTL_VFE;
4836 ew32(RCTL, rctl);
4837 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004839 /* disable VLAN receive filtering */
4840 rctl = er32(RCTL);
4841 rctl &= ~E1000_RCTL_VFE;
4842 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843 }
4844
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004845 if (!test_bit(__E1000_DOWN, &adapter->flags))
4846 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847}
4848
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004849static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004850 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004851{
4852 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004853
4854 if (!test_bit(__E1000_DOWN, &adapter->flags))
4855 e1000_irq_disable(adapter);
4856
Jiri Pirko52f55092012-03-20 18:10:01 +00004857 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004858
4859 if (!test_bit(__E1000_DOWN, &adapter->flags))
4860 e1000_irq_enable(adapter);
4861}
4862
Jiri Pirko8e586132011-12-08 19:52:37 -05004863static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004865 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004866 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004867 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004868
Joe Perches1dc32912008-07-11 15:17:08 -07004869 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004870 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4871 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004872 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004873
4874 if (!e1000_vlan_used(adapter))
4875 e1000_vlan_filter_on_off(adapter, true);
4876
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877 /* add VID to filter table */
4878 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004879 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004881 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004882
4883 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004884
4885 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886}
4887
Jiri Pirko8e586132011-12-08 19:52:37 -05004888static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004890 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004891 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004892 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004894 if (!test_bit(__E1000_DOWN, &adapter->flags))
4895 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004896 if (!test_bit(__E1000_DOWN, &adapter->flags))
4897 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898
4899 /* remove VID from filter table */
4900 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004901 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004903 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004904
4905 clear_bit(vid, adapter->active_vlans);
4906
4907 if (!e1000_vlan_used(adapter))
4908 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05004909
4910 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911}
4912
Joe Perches64798842008-07-11 15:17:02 -07004913static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914{
Jiri Pirko5622e402011-07-21 03:26:31 +00004915 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916
Jiri Pirko5622e402011-07-21 03:26:31 +00004917 if (!e1000_vlan_used(adapter))
4918 return;
4919
4920 e1000_vlan_filter_on_off(adapter, true);
4921 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4922 e1000_vlan_rx_add_vid(adapter->netdev, vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923}
4924
David Decotigny14ad2512011-04-27 18:32:43 +00004925int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926{
Joe Perches1dc32912008-07-11 15:17:08 -07004927 struct e1000_hw *hw = &adapter->hw;
4928
4929 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930
David Decotigny14ad2512011-04-27 18:32:43 +00004931 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4932 * for the switch() below to work */
4933 if ((spd & 1) || (dplx & ~1))
4934 goto err_inval;
4935
Malli Chilakala69213682005-06-17 17:44:20 -07004936 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07004937 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00004938 spd != SPEED_1000 &&
4939 dplx != DUPLEX_FULL)
4940 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07004941
David Decotigny14ad2512011-04-27 18:32:43 +00004942 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004944 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 break;
4946 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004947 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948 break;
4949 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004950 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 break;
4952 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004953 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 break;
4955 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004956 hw->autoneg = 1;
4957 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 break;
4959 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4960 default:
David Decotigny14ad2512011-04-27 18:32:43 +00004961 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 }
Jesse Brandeburgc819bbd52012-07-26 02:31:09 +00004963
4964 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4965 hw->mdix = AUTO_ALL_MODES;
4966
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00004968
4969err_inval:
4970 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4971 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972}
4973
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00004974static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975{
4976 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07004977 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004978 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004979 u32 ctrl, ctrl_ext, rctl, status;
4980 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07004981#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08004982 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07004983#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984
4985 netif_device_detach(netdev);
4986
Auke Kok2db10a02006-06-27 09:06:28 -07004987 if (netif_running(netdev)) {
4988 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07004990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004992#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08004993 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00004994 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004995 return retval;
4996#endif
4997
Joe Perches1dc32912008-07-11 15:17:08 -07004998 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004999 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 wufc &= ~E1000_WUFC_LNKC;
5001
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005002 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08005004 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005
Dean Nelsonb8681792012-01-19 17:47:24 +00005006 rctl = er32(RCTL);
5007
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00005009 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00005011
5012 /* enable receives in the hardware */
5013 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014
Joe Perches1dc32912008-07-11 15:17:08 -07005015 if (hw->mac_type >= e1000_82540) {
5016 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 /* advertise wake from D3Cold */
5018 #define E1000_CTRL_ADVD3WUC 0x00100000
5019 /* phy power management enable */
5020 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5021 ctrl |= E1000_CTRL_ADVD3WUC |
5022 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07005023 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 }
5025
Joe Perches1dc32912008-07-11 15:17:08 -07005026 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00005027 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005029 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005031 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 }
5033
Joe Perches1dc32912008-07-11 15:17:08 -07005034 ew32(WUC, E1000_WUC_PME_EN);
5035 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005037 ew32(WUC, 0);
5038 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 }
5040
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005041 e1000_release_manageability(adapter);
5042
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005043 *enable_wake = !!wufc;
5044
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005045 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005046 if (adapter->en_mng_pt)
5047 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048
Auke Kokedd106f2006-11-06 08:57:12 -08005049 if (netif_running(netdev))
5050 e1000_free_irq(adapter);
5051
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005053
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 return 0;
5055}
5056
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005057#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005058static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5059{
5060 int retval;
5061 bool wake;
5062
5063 retval = __e1000_shutdown(pdev, &wake);
5064 if (retval)
5065 return retval;
5066
5067 if (wake) {
5068 pci_prepare_to_sleep(pdev);
5069 } else {
5070 pci_wake_from_d3(pdev, false);
5071 pci_set_power_state(pdev, PCI_D3hot);
5072 }
5073
5074 return 0;
5075}
5076
Joe Perches64798842008-07-11 15:17:02 -07005077static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078{
5079 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005080 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005081 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005082 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
Auke Kokd0e027d2006-04-14 19:04:40 -07005084 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005085 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005086 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005087
5088 if (adapter->need_ioport)
5089 err = pci_enable_device(pdev);
5090 else
5091 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005092 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005093 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005094 return err;
5095 }
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005096 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097
Auke Kokd0e027d2006-04-14 19:04:40 -07005098 pci_enable_wake(pdev, PCI_D3hot, 0);
5099 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100
Joe Perchesc7be73b2008-07-11 15:17:28 -07005101 if (netif_running(netdev)) {
5102 err = e1000_request_irq(adapter);
5103 if (err)
5104 return err;
5105 }
Auke Kokedd106f2006-11-06 08:57:12 -08005106
5107 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005109 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005111 e1000_init_manageability(adapter);
5112
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005113 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114 e1000_up(adapter);
5115
5116 netif_device_attach(netdev);
5117
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118 return 0;
5119}
5120#endif
Auke Kokc653e632006-05-23 13:35:57 -07005121
5122static void e1000_shutdown(struct pci_dev *pdev)
5123{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005124 bool wake;
5125
5126 __e1000_shutdown(pdev, &wake);
5127
5128 if (system_state == SYSTEM_POWER_OFF) {
5129 pci_wake_from_d3(pdev, wake);
5130 pci_set_power_state(pdev, PCI_D3hot);
5131 }
Auke Kokc653e632006-05-23 13:35:57 -07005132}
5133
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134#ifdef CONFIG_NET_POLL_CONTROLLER
5135/*
5136 * Polling 'interrupt' - used by things like netconsole to send skbs
5137 * without having to re-enable interrupts. It's not called while
5138 * the interrupt routine is executing.
5139 */
Joe Perches64798842008-07-11 15:17:02 -07005140static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005142 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005143
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005145 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 enable_irq(adapter->pdev->irq);
5147}
5148#endif
5149
Auke Kok90267292006-06-08 09:30:24 -07005150/**
5151 * e1000_io_error_detected - called when PCI error is detected
5152 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005153 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005154 *
5155 * This function is called after a PCI bus error affecting
5156 * this device has been detected.
5157 */
Joe Perches64798842008-07-11 15:17:02 -07005158static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5159 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005160{
5161 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005162 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005163
5164 netif_device_detach(netdev);
5165
Andre Detscheab63302009-06-30 12:46:13 +00005166 if (state == pci_channel_io_perm_failure)
5167 return PCI_ERS_RESULT_DISCONNECT;
5168
Auke Kok90267292006-06-08 09:30:24 -07005169 if (netif_running(netdev))
5170 e1000_down(adapter);
Linas Vepstas72e8d6b2006-09-18 20:58:06 -07005171 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005172
5173 /* Request a slot slot reset. */
5174 return PCI_ERS_RESULT_NEED_RESET;
5175}
5176
5177/**
5178 * e1000_io_slot_reset - called after the pci bus has been reset.
5179 * @pdev: Pointer to PCI device
5180 *
5181 * Restart the card from scratch, as if from a cold-boot. Implementation
5182 * resembles the first-half of the e1000_resume routine.
5183 */
5184static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5185{
5186 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005187 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005188 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005189 int err;
Auke Kok90267292006-06-08 09:30:24 -07005190
Taku Izumi81250292008-07-11 15:17:44 -07005191 if (adapter->need_ioport)
5192 err = pci_enable_device(pdev);
5193 else
5194 err = pci_enable_device_mem(pdev);
5195 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005196 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005197 return PCI_ERS_RESULT_DISCONNECT;
5198 }
5199 pci_set_master(pdev);
5200
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005201 pci_enable_wake(pdev, PCI_D3hot, 0);
5202 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005203
Auke Kok90267292006-06-08 09:30:24 -07005204 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005205 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005206
5207 return PCI_ERS_RESULT_RECOVERED;
5208}
5209
5210/**
5211 * e1000_io_resume - called when traffic can start flowing again.
5212 * @pdev: Pointer to PCI device
5213 *
5214 * This callback is called when the error recovery driver tells us that
5215 * its OK to resume normal operation. Implementation resembles the
5216 * second-half of the e1000_resume routine.
5217 */
5218static void e1000_io_resume(struct pci_dev *pdev)
5219{
5220 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005221 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005222
5223 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005224
5225 if (netif_running(netdev)) {
5226 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005227 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005228 return;
5229 }
5230 }
5231
5232 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005233}
5234
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235/* e1000_main.c */