blob: 3bfbb8df898935f4acf47a52ebb8be3f0f683a67 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000049static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000148 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800149 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000159static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Jiri Pirko5622e402011-07-21 03:26:31 +0000164static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
Jiri Pirko8e586132011-12-08 19:52:37 -0500169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
Auke Kok6fdfef12006-06-27 09:06:36 -0700173#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int e1000_resume(struct pci_dev *pdev);
176#endif
Auke Kokc653e632006-05-23 13:35:57 -0700177static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void e1000_netpoll (struct net_device *netdev);
182#endif
183
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100184#define COPYBREAK_DEFAULT 256
185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186module_param(copybreak, uint, 0644);
187MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
189
Auke Kok90267292006-06-08 09:30:24 -0700190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193static void e1000_io_resume(struct pci_dev *pdev);
194
195static struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
199};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
Auke Kokc4e24f02006-09-27 12:53:19 -0700206#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300207 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700209 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif
Auke Kok90267292006-06-08 09:30:24 -0700211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213};
214
215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_VERSION);
219
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222module_param(debug, int, 0);
223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
228 *
229 **/
230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231{
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
234}
235
236/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 * e1000_init_module - Driver Registration Routine
238 *
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
241 **/
242
Joe Perches64798842008-07-11 15:17:02 -0700243static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Emil Tantilov675ad472010-04-27 14:02:58 +0000248 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jeff Garzik29917622006-08-19 17:48:59 -0400250 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100251 if (copybreak != COPYBREAK_DEFAULT) {
252 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000253 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100254 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 return ret;
259}
260
261module_init(e1000_init_module);
262
263/**
264 * e1000_exit_module - Driver Exit Cleanup Routine
265 *
266 * e1000_exit_module is called just before the driver is removed
267 * from memory.
268 **/
269
Joe Perches64798842008-07-11 15:17:02 -0700270static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
Auke Kok2db10a02006-06-27 09:06:28 -0700277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000280 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700281 int irq_flags = IRQF_SHARED;
282 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700283
Auke Koke94bd232007-05-16 01:49:46 -0700284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700288 }
Auke Kok2db10a02006-06-27 09:06:28 -0700289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
304
Joe Perches64798842008-07-11 15:17:02 -0700305static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Joe Perches1dc32912008-07-11 15:17:08 -0700307 struct e1000_hw *hw = &adapter->hw;
308
309 ew32(IMC, ~0);
310 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 synchronize_irq(adapter->pdev->irq);
312}
313
314/**
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 **/
318
Joe Perches64798842008-07-11 15:17:02 -0700319static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Joe Perches1dc32912008-07-11 15:17:08 -0700321 struct e1000_hw *hw = &adapter->hw;
322
323 ew32(IMS, IMS_ENABLE_MASK);
324 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100326
Joe Perches64798842008-07-11 15:17:02 -0700327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700328{
Joe Perches1dc32912008-07-11 15:17:08 -0700329 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700330 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700331 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700332 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800333
Jiri Pirko5622e402011-07-21 03:26:31 +0000334 if (!e1000_vlan_used(adapter))
335 return;
336
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800341 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000342 } else {
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344 }
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346 (vid != old_vid) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
349 } else {
350 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700351 }
352}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800353
Joe Perches64798842008-07-11 15:17:02 -0700354static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500355{
Joe Perches1dc32912008-07-11 15:17:08 -0700356 struct e1000_hw *hw = &adapter->hw;
357
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500358 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700359 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500360
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
363
Joe Perches1dc32912008-07-11 15:17:08 -0700364 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500365 }
366}
367
Joe Perches64798842008-07-11 15:17:02 -0700368static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500369{
Joe Perches1dc32912008-07-11 15:17:08 -0700370 struct e1000_hw *hw = &adapter->hw;
371
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500372 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700373 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500374
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
377
Joe Perches1dc32912008-07-11 15:17:08 -0700378 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500379 }
380}
381
Auke Koke0aac5a2007-03-06 08:57:21 -0800382/**
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
385 **/
386static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700389 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Patrick McHardydb0ce502007-11-13 20:54:59 -0800391 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500394 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800402 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800407}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800408
Auke Koke0aac5a2007-03-06 08:57:21 -0800409int e1000_up(struct e1000_adapter *adapter)
410{
Joe Perches1dc32912008-07-11 15:17:08 -0700411 struct e1000_hw *hw = &adapter->hw;
412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700415
Auke Kok1314bbf2006-09-27 12:54:02 -0700416 clear_bit(__E1000_DOWN, &adapter->flags);
417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700419
Auke Koke0aac5a2007-03-06 08:57:21 -0800420 e1000_irq_enable(adapter);
421
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000422 netif_wake_queue(adapter->netdev);
423
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100424 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700425 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return 0;
427}
428
Auke Kok79f05bf2006-06-27 09:06:32 -0700429/**
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
432 *
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
436 *
437 **/
438
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700439void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700440{
Joe Perches1dc32912008-07-11 15:17:08 -0700441 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700442 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700443
444 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700445 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
Joe Perches1dc32912008-07-11 15:17:08 -0700448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700449 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700451 }
452}
453
454static void e1000_power_down_phy(struct e1000_adapter *adapter)
455{
Joe Perches1dc32912008-07-11 15:17:08 -0700456 struct e1000_hw *hw = &adapter->hw;
457
Bruce Allan61c25052006-09-27 12:53:54 -0700458 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700459 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700460 * (a) WoL is enabled
461 * (b) AMT is active
462 * (c) SoL/IDER session is active */
Joe Perches1dc32912008-07-11 15:17:08 -0700463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700465 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700466
Joe Perches1dc32912008-07-11 15:17:08 -0700467 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000472 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700479 goto out;
480 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700481 default:
482 goto out;
483 }
Joe Perches1dc32912008-07-11 15:17:08 -0700484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700485 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000487 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700488 }
Bruce Allan61c25052006-09-27 12:53:54 -0700489out:
490 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700491}
492
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
504}
505
Joe Perches64798842008-07-11 15:17:02 -0700506void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000508 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000510 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Auke Kok1314bbf2006-09-27 12:54:02 -0700512
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000513 /* disable receives in the hardware */
514 rctl = er32(RCTL);
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
517
Jesse Brandeburg51851072009-09-25 12:17:01 +0000518 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519
520 /* disable transmits in the hardware */
521 tctl = er32(TCTL);
522 tctl &= ~E1000_TCTL_EN;
523 ew32(TCTL, tctl);
524 /* flush both disables and wait for them to finish */
525 E1000_WRITE_FLUSH();
526 msleep(10);
527
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700528 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800531
Anupam Chandaab088532010-11-21 09:54:21 -0800532 /*
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000535 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800536 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000537 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
stephen hemminger38df7a32010-10-21 07:50:57 +0000548static void e1000_reinit_safe(struct e1000_adapter *adapter)
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000549{
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551 msleep(1);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000552 mutex_lock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000553 e1000_down(adapter);
554 e1000_up(adapter);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000555 mutex_unlock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000556 clear_bit(__E1000_RESETTING, &adapter->flags);
557}
558
Joe Perches64798842008-07-11 15:17:02 -0700559void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700560{
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000561 /* if rtnl_lock is not held the call path is bogus */
562 ASSERT_RTNL();
Auke Kok2db10a02006-06-27 09:06:28 -0700563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565 msleep(1);
566 e1000_down(adapter);
567 e1000_up(adapter);
568 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Joe Perches64798842008-07-11 15:17:02 -0700571void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Joe Perches1dc32912008-07-11 15:17:08 -0700573 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700575 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000576 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
580 */
581
Joe Perches1dc32912008-07-11 15:17:08 -0700582 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
585 case e1000_82543:
586 case e1000_82544:
587 case e1000_82540:
588 case e1000_82541:
589 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700590 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100591 pba = E1000_PBA_48K;
592 break;
593 case e1000_82545:
594 case e1000_82545_rev_3:
595 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000596 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100597 case e1000_82546_rev_3:
598 pba = E1000_PBA_48K;
599 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700600 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700601 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700602 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700603 pba = E1000_PBA_30K;
604 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100605 case e1000_undefined:
606 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700607 break;
608 }
609
Joe Perchesc3033b02008-03-21 11:06:25 -0700610 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100612 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700613
Joe Perches1dc32912008-07-11 15:17:08 -0700614 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
620 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100622 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700623 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700624
Bruce Allan018ea442006-12-15 10:39:45 +0100625 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000626 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100627 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000628 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
Joe Perches1dc32912008-07-11 15:17:08 -0700631 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 pba &= 0xffff;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000636 /*
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
639 */
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
642 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700643 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100644 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700647 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100648 min_rx_space >>= 10;
649
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
656
657 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700658 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
661 break;
662 default:
663 break;
664 }
665
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000668 if (pba < min_rx_space)
669 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700672
Joe Perches1dc32912008-07-11 15:17:08 -0700673 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000675 /*
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
684 */
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800687
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700691 hw->fc_send_xon = 1;
692 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700694 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700695 e1000_reset_hw(hw);
696 if (hw->mac_type >= e1000_82544)
697 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700698
Joe Perches1dc32912008-07-11 15:17:08 -0700699 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700700 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700701 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100702
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700704 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700705 hw->autoneg == 1 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700712 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Joe Perches1dc32912008-07-11 15:17:08 -0700718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700720
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500721 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Ben Hutchings1aa8b472012-07-10 10:56:59 +0000724/* Dump the eeprom for users having checksum issues */
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800726{
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000738 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800739 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
Emil Tantilov675ad472010-04-27 14:02:58 +0000749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800752
Emil Tantilov675ad472010-04-27 14:02:58 +0000753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
Emil Tantilov675ad472010-04-27 14:02:58 +0000757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800768
769 kfree(data);
770}
771
772/**
Taku Izumi81250292008-07-11 15:17:44 -0700773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
775 *
776 * Return true if an adapter needs ioport resources
777 **/
778static int e1000_is_need_ioport(struct pci_dev *pdev)
779{
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806}
807
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000808static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000810{
811 /*
812 * Since there is no support for separate rx/tx vlan accel
813 * enable/disable make sure tx flag is always in same state as rx.
814 */
815 if (features & NETIF_F_HW_VLAN_RX)
816 features |= NETIF_F_HW_VLAN_TX;
817 else
818 features &= ~NETIF_F_HW_VLAN_TX;
819
820 return features;
821}
822
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000823static int e1000_set_features(struct net_device *netdev,
824 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000825{
826 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000827 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000828
Jiri Pirko5622e402011-07-21 03:26:31 +0000829 if (changed & NETIF_F_HW_VLAN_RX)
830 e1000_vlan_mode(netdev, features);
831
Ben Greeare825b732012-04-04 06:01:29 +0000832 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000833 return 0;
834
Ben Greeare825b732012-04-04 06:01:29 +0000835 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000836 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
837
838 if (netif_running(netdev))
839 e1000_reinit_locked(adapter);
840 else
841 e1000_reset(adapter);
842
843 return 0;
844}
845
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800846static const struct net_device_ops e1000_netdev_ops = {
847 .ndo_open = e1000_open,
848 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800849 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800850 .ndo_get_stats = e1000_get_stats,
851 .ndo_set_rx_mode = e1000_set_rx_mode,
852 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000853 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800854 .ndo_change_mtu = e1000_change_mtu,
855 .ndo_do_ioctl = e1000_ioctl,
856 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800857 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
858 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
859#ifdef CONFIG_NET_POLL_CONTROLLER
860 .ndo_poll_controller = e1000_netpoll,
861#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000862 .ndo_fix_features = e1000_fix_features,
863 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800864};
865
Taku Izumi81250292008-07-11 15:17:44 -0700866/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000867 * e1000_init_hw_struct - initialize members of hw struct
868 * @adapter: board private struct
869 * @hw: structure used by e1000_hw.c
870 *
871 * Factors out initialization of the e1000_hw struct to its own function
872 * that can be called very early at init (just after struct allocation).
873 * Fields are initialized based on PCI device information and
874 * OS network device settings (MTU size).
875 * Returns negative error codes if MAC type setup fails.
876 */
877static int e1000_init_hw_struct(struct e1000_adapter *adapter,
878 struct e1000_hw *hw)
879{
880 struct pci_dev *pdev = adapter->pdev;
881
882 /* PCI config space info */
883 hw->vendor_id = pdev->vendor;
884 hw->device_id = pdev->device;
885 hw->subsystem_vendor_id = pdev->subsystem_vendor;
886 hw->subsystem_id = pdev->subsystem_device;
887 hw->revision_id = pdev->revision;
888
889 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
890
891 hw->max_frame_size = adapter->netdev->mtu +
892 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
893 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
894
895 /* identify the MAC */
896 if (e1000_set_mac_type(hw)) {
897 e_err(probe, "Unknown MAC Type\n");
898 return -EIO;
899 }
900
901 switch (hw->mac_type) {
902 default:
903 break;
904 case e1000_82541:
905 case e1000_82547:
906 case e1000_82541_rev_2:
907 case e1000_82547_rev_2:
908 hw->phy_init_script = 1;
909 break;
910 }
911
912 e1000_set_media_type(hw);
913 e1000_get_bus_info(hw);
914
915 hw->wait_autoneg_complete = false;
916 hw->tbi_compatibility_en = true;
917 hw->adaptive_ifs = true;
918
919 /* Copper options */
920
921 if (hw->media_type == e1000_media_type_copper) {
922 hw->mdix = AUTO_ALL_MODES;
923 hw->disable_polarity_correction = false;
924 hw->master_slave = E1000_MASTER_SLAVE;
925 }
926
927 return 0;
928}
929
930/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 * e1000_probe - Device Initialization Routine
932 * @pdev: PCI device information struct
933 * @ent: entry in e1000_pci_tbl
934 *
935 * Returns 0 on success, negative on failure
936 *
937 * e1000_probe initializes an adapter identified by a pci_dev structure.
938 * The OS initialization, configuring of the adapter private structure,
939 * and a hardware reset occur.
940 **/
Joe Perches1dc32912008-07-11 15:17:08 -0700941static int __devinit e1000_probe(struct pci_dev *pdev,
942 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
944 struct net_device *netdev;
945 struct e1000_adapter *adapter;
Joe Perches1dc32912008-07-11 15:17:08 -0700946 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 static int cards_found = 0;
Jesse Brandeburg120cd572006-08-31 14:27:46 -0700949 static int global_quad_port_a = 0; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700950 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700951 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000952 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700953 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700954 int bars, need_ioport;
Joe Perches0795af52007-10-03 17:59:30 -0700955
Taku Izumi81250292008-07-11 15:17:44 -0700956 /* do not allocate ioport bars when not needed */
957 need_ioport = e1000_is_need_ioport(pdev);
958 if (need_ioport) {
959 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
960 err = pci_enable_device(pdev);
961 } else {
962 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800963 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700964 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700965 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 return err;
967
Taku Izumi81250292008-07-11 15:17:44 -0700968 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700969 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700970 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000973 err = pci_save_state(pdev);
974 if (err)
975 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700977 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700979 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 SET_NETDEV_DEV(netdev, &pdev->dev);
983
984 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700985 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 adapter->netdev = netdev;
987 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000988 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700989 adapter->bars = bars;
990 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Joe Perches1dc32912008-07-11 15:17:08 -0700992 hw = &adapter->hw;
993 hw->back = adapter;
994
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700995 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700996 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700997 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Taku Izumi81250292008-07-11 15:17:44 -07001000 if (adapter->need_ioport) {
1001 for (i = BAR_1; i <= BAR_5; i++) {
1002 if (pci_resource_len(pdev, i) == 0)
1003 continue;
1004 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1005 hw->io_base = pci_resource_start(pdev, i);
1006 break;
1007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
1009 }
1010
Jesse Brandeburge508be12010-09-07 21:01:12 +00001011 /* make ready for any if (hw->...) below */
1012 err = e1000_init_hw_struct(adapter, hw);
1013 if (err)
1014 goto err_sw_init;
1015
1016 /*
1017 * there is a workaround being applied below that limits
1018 * 64-bit DMA addresses to 64-bit hardware. There are some
1019 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1020 */
1021 pci_using_dac = 0;
1022 if ((hw->bus_type == e1000_bus_type_pcix) &&
1023 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1024 /*
1025 * according to DMA-API-HOWTO, coherent calls will always
1026 * succeed if the set call did
1027 */
1028 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1029 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001030 } else {
Dean Nelson19a0b672010-11-11 05:50:25 +00001031 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1032 if (err) {
1033 pr_err("No usable DMA config, aborting\n");
1034 goto err_dma;
1035 }
1036 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Jesse Brandeburge508be12010-09-07 21:01:12 +00001037 }
1038
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001039 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001042 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001043
Auke Kok0eb5a342006-09-27 12:53:17 -07001044 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 adapter->bd_number = cards_found;
1047
1048 /* setup the private structure */
1049
Joe Perchesc7be73b2008-07-11 15:17:28 -07001050 err = e1000_sw_init(adapter);
1051 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 goto err_sw_init;
1053
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001054 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001055 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001056 hw->ce4100_gbe_mdio_base_virt =
1057 ioremap(pci_resource_start(pdev, BAR_1),
Dirk Brandewie5377a412011-01-06 14:29:54 +00001058 pci_resource_len(pdev, BAR_1));
1059
Florian Fainelli13acde82012-01-04 20:23:35 +00001060 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001061 goto err_mdio_ioremap;
1062 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001063
Joe Perches1dc32912008-07-11 15:17:08 -07001064 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001065 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001066 NETIF_F_HW_CSUM |
1067 NETIF_F_HW_VLAN_RX;
Michał Mirosławe97d3202011-06-08 08:36:42 +00001068 netdev->features = NETIF_F_HW_VLAN_TX |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 NETIF_F_HW_VLAN_FILTER;
1070 }
1071
Joe Perches1dc32912008-07-11 15:17:08 -07001072 if ((hw->mac_type >= e1000_82544) &&
1073 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001074 netdev->hw_features |= NETIF_F_TSO;
1075
Ben Greear11a78dc2012-02-11 15:40:01 +00001076 netdev->priv_flags |= IFF_SUPP_NOFCS;
1077
Michał Mirosławe97d3202011-06-08 08:36:42 +00001078 netdev->features |= netdev->hw_features;
Tushar Dave75006732012-06-12 13:03:29 +00001079 netdev->hw_features |= (NETIF_F_RXCSUM |
1080 NETIF_F_RXALL |
1081 NETIF_F_RXFCS);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001082
Yi Zou7b872a52010-09-22 17:57:58 +00001083 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Tushar Dave75006732012-06-12 13:03:29 +00001088 netdev->vlan_features |= (NETIF_F_TSO |
1089 NETIF_F_HW_CSUM |
1090 NETIF_F_SG);
Patrick McHardy20501a62008-10-11 12:25:59 -07001091
Jiri Pirko01789342011-08-16 06:29:00 +00001092 netdev->priv_flags |= IFF_UNICAST_FLT;
1093
Joe Perches1dc32912008-07-11 15:17:08 -07001094 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001095
Auke Kokcd94dd02006-06-27 09:08:22 -07001096 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001097 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001098 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001099 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001100 }
1101
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001102 /* before reading the EEPROM, reset the controller to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 * put the device in a known good starting state */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001104
Joe Perches1dc32912008-07-11 15:17:08 -07001105 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001108 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001109 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001110 e1000_dump_eeprom(adapter);
1111 /*
1112 * set MAC address to all zeroes to invalidate and temporary
1113 * disable this device for the user. This blocks regular
1114 * traffic while still permitting ethtool ioctls from reaching
1115 * the hardware as well as allowing the user to run the
1116 * interface after manually setting a hw addr using
1117 * `ip set address`
1118 */
Joe Perches1dc32912008-07-11 15:17:08 -07001119 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001120 } else {
1121 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001122 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001123 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
Auke Kok67b3c272007-12-17 13:50:23 -08001125 /* don't block initalization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001126 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1127 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Auke Kok67b3c272007-12-17 13:50:23 -08001129 if (!is_valid_ether_addr(netdev->perm_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001130 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001133 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1134 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1135 e1000_82547_tx_fifo_stall_task);
1136 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001137 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 e1000_check_options(adapter);
1140
1141 /* Initial Wake on LAN setting
1142 * If APM wake is enabled in the EEPROM,
1143 * enable the ACPI Magic Packet filter
1144 */
1145
Joe Perches1dc32912008-07-11 15:17:08 -07001146 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 case e1000_82542_rev2_0:
1148 case e1000_82542_rev2_1:
1149 case e1000_82543:
1150 break;
1151 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001152 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break;
1156 case e1000_82546:
1157 case e1000_82546_rev_3:
Joe Perches1dc32912008-07-11 15:17:08 -07001158 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1159 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1161 break;
1162 }
1163 /* Fall Through */
1164 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001165 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1167 break;
1168 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001169 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001170 adapter->eeprom_wol |= E1000_WUFC_MAG;
1171
1172 /* now that we have the eeprom settings, apply the special cases
1173 * where the eeprom may be wrong or the board simply won't support
1174 * wake on lan on a particular port */
1175 switch (pdev->device) {
1176 case E1000_DEV_ID_82546GB_PCIE:
1177 adapter->eeprom_wol = 0;
1178 break;
1179 case E1000_DEV_ID_82546EB_FIBER:
1180 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001181 /* Wake events only supported on port A for dual fiber
1182 * regardless of eeprom setting */
Joe Perches1dc32912008-07-11 15:17:08 -07001183 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001184 adapter->eeprom_wol = 0;
1185 break;
1186 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1187 /* if quad port adapter, disable WoL on all but port A */
1188 if (global_quad_port_a != 0)
1189 adapter->eeprom_wol = 0;
1190 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001191 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001192 /* Reset for multiple quad port adapters */
1193 if (++global_quad_port_a == 4)
1194 global_quad_port_a = 0;
1195 break;
1196 }
1197
1198 /* initialize the wol settings based on the eeprom settings */
1199 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001200 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Dirk Brandewie5377a412011-01-06 14:29:54 +00001202 /* Auto detect PHY address */
1203 if (hw->mac_type == e1000_ce4100) {
1204 for (i = 0; i < 32; i++) {
1205 hw->phy_addr = i;
1206 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207 if (tmp == 0 || tmp == 0xFF) {
1208 if (i == 31)
1209 goto err_eeprom;
1210 continue;
1211 } else
1212 break;
1213 }
1214 }
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* reset the hardware with the new settings */
1217 e1000_reset(adapter);
1218
Auke Kok416b5d12007-06-01 10:22:39 -07001219 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001220 err = register_netdev(netdev);
1221 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001222 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001223
Jiri Pirko52f55092012-03-20 18:10:01 +00001224 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001225
Emil Tantilov675ad472010-04-27 14:02:58 +00001226 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001227 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001228 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1234 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001235
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001236 /* carrier off reporting is important to ethtool even BEFORE open */
1237 netif_carrier_off(netdev);
1238
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001239 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 cards_found++;
1242 return 0;
1243
1244err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001245err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001246 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001247
Joe Perches1dc32912008-07-11 15:17:08 -07001248 if (hw->flash_address)
1249 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001250 kfree(adapter->tx_ring);
1251 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001252err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001254err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001255 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001256 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257err_ioremap:
1258 free_netdev(netdev);
1259err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001260 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001261err_pci_reg:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001262 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 return err;
1264}
1265
1266/**
1267 * e1000_remove - Device Removal Routine
1268 * @pdev: PCI device information struct
1269 *
1270 * e1000_remove is called by the PCI subsystem to alert the driver
1271 * that it should release a PCI device. The could be caused by a
1272 * Hot-Plug event, or because the driver is going to be removed from
1273 * memory.
1274 **/
1275
Joe Perches64798842008-07-11 15:17:02 -07001276static void __devexit e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277{
1278 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001279 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001280 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001282 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001283 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001285 unregister_netdev(netdev);
1286
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001287 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001289 kfree(adapter->tx_ring);
1290 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001291
Florian Fainelli1c267502012-01-04 20:23:34 +00001292 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001293 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001294 iounmap(hw->hw_addr);
1295 if (hw->flash_address)
1296 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001297 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 free_netdev(netdev);
1300
1301 pci_disable_device(pdev);
1302}
1303
1304/**
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1307 *
1308 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001309 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 **/
1311
Joe Perches64798842008-07-11 15:17:02 -07001312static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Auke Kokeb0f8052006-07-14 16:14:48 -07001314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001316 adapter->num_tx_queues = 1;
1317 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001318
1319 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001320 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001321 return -ENOMEM;
1322 }
1323
Herbert Xu47313052007-05-29 15:07:31 -07001324 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001325 e1000_irq_disable(adapter);
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 spin_lock_init(&adapter->stats_lock);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00001328 mutex_init(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Auke Kok1314bbf2006-09-27 12:54:02 -07001330 set_bit(__E1000_DOWN, &adapter->flags);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return 0;
1333}
1334
1335/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001336 * e1000_alloc_queues - Allocate memory for all rings
1337 * @adapter: board private structure to initialize
1338 *
1339 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001340 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001341 **/
1342
Joe Perches64798842008-07-11 15:17:02 -07001343static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001344{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001345 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001347 if (!adapter->tx_ring)
1348 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001349
Yan Burman1c7e5b12007-03-06 08:58:04 -08001350 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1351 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001352 if (!adapter->rx_ring) {
1353 kfree(adapter->tx_ring);
1354 return -ENOMEM;
1355 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001356
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001357 return E1000_SUCCESS;
1358}
1359
1360/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 * e1000_open - Called when a network interface is made active
1362 * @netdev: network interface device structure
1363 *
1364 * Returns 0 on success, negative value on failure
1365 *
1366 * The open entry point is called when a network interface is made
1367 * active by the system (IFF_UP). At this point all resources needed
1368 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001369 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 * and the stack is notified that the interface is ready.
1371 **/
1372
Joe Perches64798842008-07-11 15:17:02 -07001373static int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001375 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001376 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 int err;
1378
Auke Kok2db10a02006-06-27 09:06:28 -07001379 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001380 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001381 return -EBUSY;
1382
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001383 netif_carrier_off(netdev);
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001386 err = e1000_setup_all_tx_resources(adapter);
1387 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 goto err_setup_tx;
1389
1390 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001391 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001392 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001393 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001394
Auke Kok79f05bf2006-06-27 09:06:32 -07001395 e1000_power_up_phy(adapter);
1396
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001397 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001398 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001399 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1400 e1000_update_mng_vlan(adapter);
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Auke Koke0aac5a2007-03-06 08:57:21 -08001403 /* before we allocate an interrupt, we must be ready to handle it.
1404 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1405 * as soon as we call pci_request_irq, so we have to setup our
1406 * clean_rx handler before we do so. */
1407 e1000_configure(adapter);
1408
1409 err = e1000_request_irq(adapter);
1410 if (err)
1411 goto err_req_irq;
1412
1413 /* From here on the code is the same as e1000_up() */
1414 clear_bit(__E1000_DOWN, &adapter->flags);
1415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001417
Auke Koke0aac5a2007-03-06 08:57:21 -08001418 e1000_irq_enable(adapter);
1419
Ben Hutchings076152d2008-07-18 17:50:57 -07001420 netif_start_queue(netdev);
1421
Auke Koke0aac5a2007-03-06 08:57:21 -08001422 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001423 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return E1000_SUCCESS;
1426
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001427err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001428 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001429 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001431 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432err_setup_tx:
1433 e1000_reset(adapter);
1434
1435 return err;
1436}
1437
1438/**
1439 * e1000_close - Disables a network interface
1440 * @netdev: network interface device structure
1441 *
1442 * Returns 0, this is not allowed to fail
1443 *
1444 * The close entry point is called when an interface is de-activated
1445 * by the OS. The hardware is still under the drivers control, but
1446 * needs to be disabled. A global MAC reset is issued to stop the
1447 * hardware, and all transmit and receive resources are freed.
1448 **/
1449
Joe Perches64798842008-07-11 15:17:02 -07001450static int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001452 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001453 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Auke Kok2db10a02006-06-27 09:06:28 -07001455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001457 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001458 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Bruce Allan46665602006-09-27 12:54:08 -07001463 /* kill manageability vlan ID if supported, but not if a vlan with
1464 * the same ID is registered on the host OS (let 8021q kill it) */
Joe Perches1dc32912008-07-11 15:17:08 -07001465 if ((hw->mng_cookie.status &
Bruce Allan46665602006-09-27 12:54:08 -07001466 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
Jiri Pirko5622e402011-07-21 03:26:31 +00001467 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001468 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1469 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return 0;
1472}
1473
1474/**
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001477 * @start: address of beginning of memory
1478 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 **/
Joe Perches64798842008-07-11 15:17:02 -07001480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
Joe Perches1dc32912008-07-11 15:17:08 -07001483 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001484 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 unsigned long end = begin + len;
1486
Malli Chilakala26483452005-04-28 19:44:46 -07001487 /* First rev 82545 and 82546 need to not allow any memory
1488 * write location to cross 64k boundary due to errata 23 */
Joe Perches1dc32912008-07-11 15:17:08 -07001489 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001490 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001491 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001492 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494
Joe Perchesc3033b02008-03-21 11:06:25 -07001495 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
1498/**
1499 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1500 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001501 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 *
1503 * Return 0 on success, negative on failure
1504 **/
1505
Joe Perches64798842008-07-11 15:17:02 -07001506static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 struct pci_dev *pdev = adapter->pdev;
1510 int size;
1511
1512 size = sizeof(struct e1000_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001513 txdr->buffer_info = vzalloc(size);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001514 if (!txdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001515 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1516 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENOMEM;
1518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 /* round up to nearest 4K */
1521
1522 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001523 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1526 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001527 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 vfree(txdr->buffer_info);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001530 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1531 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return -ENOMEM;
1533 }
1534
Malli Chilakala26483452005-04-28 19:44:46 -07001535 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1537 void *olddesc = txdr->desc;
1538 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001539 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001540 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001541 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001542 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1543 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001544 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001545 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 goto setup_tx_desc_die;
1549 }
1550
1551 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1552 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001553 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1554 txdr->dma);
1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1556 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001557 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001558 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 vfree(txdr->buffer_info);
1560 return -ENOMEM;
1561 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001562 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001563 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1564 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 }
1566 }
1567 memset(txdr->desc, 0, txdr->size);
1568
1569 txdr->next_to_use = 0;
1570 txdr->next_to_clean = 0;
1571
1572 return 0;
1573}
1574
1575/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001576 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1577 * (Descriptors) for all queues
1578 * @adapter: board private structure
1579 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001580 * Return 0 on success, negative on failure
1581 **/
1582
Joe Perches64798842008-07-11 15:17:02 -07001583int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001584{
1585 int i, err = 0;
1586
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001587 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001588 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1589 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001590 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001591 for (i-- ; i >= 0; i--)
1592 e1000_free_tx_resources(adapter,
1593 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001594 break;
1595 }
1596 }
1597
1598 return err;
1599}
1600
1601/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1603 * @adapter: board private structure
1604 *
1605 * Configure the Tx unit of the MAC after a reset.
1606 **/
1607
Joe Perches64798842008-07-11 15:17:02 -07001608static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
Joe Perches406874a2008-04-03 10:06:32 -07001610 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001611 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001612 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001613 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 /* Setup the HW Tx Head and Tail descriptor pointers */
1616
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001617 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001618 case 1:
1619 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001620 tdba = adapter->tx_ring[0].dma;
1621 tdlen = adapter->tx_ring[0].count *
1622 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001623 ew32(TDLEN, tdlen);
1624 ew32(TDBAH, (tdba >> 32));
1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1626 ew32(TDT, 0);
1627 ew32(TDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001628 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1629 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001630 break;
1631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001634 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001635 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001636 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1637 else
1638 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1639
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001640 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 case e1000_82542_rev2_0:
1642 case e1000_82542_rev2_1:
1643 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001644 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1645 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 break;
1647 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001648 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1649 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1650 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001652 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1653 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001654 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 /* Set the Tx Interrupt Delay register */
1657
Joe Perches1dc32912008-07-11 15:17:08 -07001658 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001659 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001660 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 /* Program the Transmit Control Register */
1663
Joe Perches1dc32912008-07-11 15:17:08 -07001664 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001666 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1668
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001669 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001672 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1673
1674 /* only set IDE if we are delaying interrupts using the timers */
1675 if (adapter->tx_int_delay)
1676 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001678 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1680 else
1681 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1682
1683 /* Cache if we're 82544 running in PCI-X because we'll
1684 * need this to apply a workaround later in the send path. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001685 if (hw->mac_type == e1000_82544 &&
1686 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001687 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001688
Joe Perches1dc32912008-07-11 15:17:08 -07001689 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693/**
1694 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1695 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001696 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 *
1698 * Returns 0 on success, negative on failure
1699 **/
1700
Joe Perches64798842008-07-11 15:17:02 -07001701static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1702 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001705 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 size = sizeof(struct e1000_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001708 rxdr->buffer_info = vzalloc(size);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001709 if (!rxdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001710 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1711 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 return -ENOMEM;
1713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001715 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 /* Round up to nearest 4K */
1718
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001719 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001720 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1723 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001725 if (!rxdr->desc) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001726 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1727 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 vfree(rxdr->buffer_info);
1730 return -ENOMEM;
1731 }
1732
Malli Chilakala26483452005-04-28 19:44:46 -07001733 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 void *olddesc = rxdr->desc;
1736 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001738 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001739 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001742 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001743 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001746 e_err(probe, "Unable to allocate memory for the Rx "
1747 "descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 goto setup_rx_desc_die;
1749 }
1750
1751 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1752 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001753 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1754 rxdr->dma);
1755 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1756 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001757 e_err(probe, "Unable to allocate aligned memory for "
1758 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001759 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001761 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001762 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1763 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765 }
1766 memset(rxdr->desc, 0, rxdr->size);
1767
1768 rxdr->next_to_clean = 0;
1769 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001770 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 return 0;
1773}
1774
1775/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001776 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1777 * (Descriptors) for all queues
1778 * @adapter: board private structure
1779 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001780 * Return 0 on success, negative on failure
1781 **/
1782
Joe Perches64798842008-07-11 15:17:02 -07001783int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001784{
1785 int i, err = 0;
1786
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001787 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001788 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1789 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001790 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001791 for (i-- ; i >= 0; i--)
1792 e1000_free_rx_resources(adapter,
1793 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001794 break;
1795 }
1796 }
1797
1798 return err;
1799}
1800
1801/**
Malli Chilakala26483452005-04-28 19:44:46 -07001802 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 * @adapter: Board private structure
1804 **/
Joe Perches64798842008-07-11 15:17:02 -07001805static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Joe Perches1dc32912008-07-11 15:17:08 -07001807 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001808 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Joe Perches1dc32912008-07-11 15:17:08 -07001810 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1813
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001814 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1815 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001816 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Joe Perches1dc32912008-07-11 15:17:08 -07001818 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rctl |= E1000_RCTL_SBP;
1820 else
1821 rctl &= ~E1000_RCTL_SBP;
1822
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001823 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1824 rctl &= ~E1000_RCTL_LPE;
1825 else
1826 rctl |= E1000_RCTL_LPE;
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001829 rctl &= ~E1000_RCTL_SZ_4096;
1830 rctl |= E1000_RCTL_BSEX;
1831 switch (adapter->rx_buffer_len) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08001832 case E1000_RXBUFFER_2048:
1833 default:
1834 rctl |= E1000_RCTL_SZ_2048;
1835 rctl &= ~E1000_RCTL_BSEX;
1836 break;
1837 case E1000_RXBUFFER_4096:
1838 rctl |= E1000_RCTL_SZ_4096;
1839 break;
1840 case E1000_RXBUFFER_8192:
1841 rctl |= E1000_RCTL_SZ_8192;
1842 break;
1843 case E1000_RXBUFFER_16384:
1844 rctl |= E1000_RCTL_SZ_16384;
1845 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001846 }
1847
Ben Greeare825b732012-04-04 06:01:29 +00001848 /* This is useful for sniffing bad packets. */
1849 if (adapter->netdev->features & NETIF_F_RXALL) {
1850 /* UPE and MPE will be handled by normal PROMISC logic
1851 * in e1000e_set_rx_mode */
1852 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1853 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1854 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1855
1856 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1857 E1000_RCTL_DPF | /* Allow filtered pause */
1858 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1859 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1860 * and that breaks VLANs.
1861 */
1862 }
1863
Joe Perches1dc32912008-07-11 15:17:08 -07001864 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865}
1866
1867/**
1868 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1869 * @adapter: board private structure
1870 *
1871 * Configure the Rx unit of the MAC after a reset.
1872 **/
1873
Joe Perches64798842008-07-11 15:17:02 -07001874static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
Joe Perches406874a2008-04-03 10:06:32 -07001876 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001877 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001878 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001879
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001880 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1881 rdlen = adapter->rx_ring[0].count *
1882 sizeof(struct e1000_rx_desc);
1883 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1884 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1885 } else {
1886 rdlen = adapter->rx_ring[0].count *
1887 sizeof(struct e1000_rx_desc);
1888 adapter->clean_rx = e1000_clean_rx_irq;
1889 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1890 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001893 rctl = er32(RCTL);
1894 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001897 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001899 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001900 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001901 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001902 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
1904
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001905 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1906 * the Base and Length of the Rx Descriptor Ring */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001907 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001908 case 1:
1909 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001910 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001911 ew32(RDLEN, rdlen);
1912 ew32(RDBAH, (rdba >> 32));
1913 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1914 ew32(RDT, 0);
1915 ew32(RDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001916 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1917 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001918 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001919 }
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001922 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001923 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001924 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001925 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001926 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001927 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001928 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001929 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 }
1931
1932 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001933 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
1936/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001937 * e1000_free_tx_resources - Free Tx Resources per Queue
1938 * @adapter: board private structure
1939 * @tx_ring: Tx descriptor ring for a specific queue
1940 *
1941 * Free all transmit software resources
1942 **/
1943
Joe Perches64798842008-07-11 15:17:02 -07001944static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001946{
1947 struct pci_dev *pdev = adapter->pdev;
1948
1949 e1000_clean_tx_ring(adapter, tx_ring);
1950
1951 vfree(tx_ring->buffer_info);
1952 tx_ring->buffer_info = NULL;
1953
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001954 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1955 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001956
1957 tx_ring->desc = NULL;
1958}
1959
1960/**
1961 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 * @adapter: board private structure
1963 *
1964 * Free all transmit software resources
1965 **/
1966
Joe Perches64798842008-07-11 15:17:02 -07001967void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001969 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001971 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001972 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973}
1974
Joe Perches64798842008-07-11 15:17:02 -07001975static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976 struct e1000_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Alexander Duyck602c0552009-12-02 16:46:00 +00001978 if (buffer_info->dma) {
1979 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001980 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001982 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001983 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001984 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001985 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001986 buffer_info->dma = 0;
1987 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001988 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001990 buffer_info->skb = NULL;
1991 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001992 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001993 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
1995
1996/**
1997 * e1000_clean_tx_ring - Free Tx Buffers
1998 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001999 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 **/
2001
Joe Perches64798842008-07-11 15:17:02 -07002002static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2003 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004{
Joe Perches1dc32912008-07-11 15:17:08 -07002005 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 struct e1000_buffer *buffer_info;
2007 unsigned long size;
2008 unsigned int i;
2009
2010 /* Free all the Tx ring sk_buffs */
2011
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002012 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 buffer_info = &tx_ring->buffer_info[i];
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2015 }
2016
2017 size = sizeof(struct e1000_buffer) * tx_ring->count;
2018 memset(tx_ring->buffer_info, 0, size);
2019
2020 /* Zero out the descriptor ring */
2021
2022 memset(tx_ring->desc, 0, tx_ring->size);
2023
2024 tx_ring->next_to_use = 0;
2025 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002026 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Joe Perches1dc32912008-07-11 15:17:08 -07002028 writel(0, hw->hw_addr + tx_ring->tdh);
2029 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002030}
2031
2032/**
2033 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2034 * @adapter: board private structure
2035 **/
2036
Joe Perches64798842008-07-11 15:17:02 -07002037static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002038{
2039 int i;
2040
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002041 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002042 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044
2045/**
2046 * e1000_free_rx_resources - Free Rx Resources
2047 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002048 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 *
2050 * Free all receive software resources
2051 **/
2052
Joe Perches64798842008-07-11 15:17:02 -07002053static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2054 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 struct pci_dev *pdev = adapter->pdev;
2057
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002058 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060 vfree(rx_ring->buffer_info);
2061 rx_ring->buffer_info = NULL;
2062
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002063 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2064 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066 rx_ring->desc = NULL;
2067}
2068
2069/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002070 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002072 *
2073 * Free all receive software resources
2074 **/
2075
Joe Perches64798842008-07-11 15:17:02 -07002076void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002077{
2078 int i;
2079
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002080 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002081 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2082}
2083
2084/**
2085 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2086 * @adapter: board private structure
2087 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 **/
2089
Joe Perches64798842008-07-11 15:17:02 -07002090static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2091 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
Joe Perches1dc32912008-07-11 15:17:08 -07002093 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 struct e1000_buffer *buffer_info;
2095 struct pci_dev *pdev = adapter->pdev;
2096 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002097 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 /* Free all the Rx ring sk_buffs */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002100 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 buffer_info = &rx_ring->buffer_info[i];
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002102 if (buffer_info->dma &&
2103 adapter->clean_rx == e1000_clean_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002104 dma_unmap_single(&pdev->dev, buffer_info->dma,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002105 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002106 DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002107 } else if (buffer_info->dma &&
2108 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002109 dma_unmap_page(&pdev->dev, buffer_info->dma,
2110 buffer_info->length,
2111 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002114 buffer_info->dma = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002115 if (buffer_info->page) {
2116 put_page(buffer_info->page);
2117 buffer_info->page = NULL;
2118 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002119 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 dev_kfree_skb(buffer_info->skb);
2121 buffer_info->skb = NULL;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08002122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 }
2124
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002125 /* there also may be some cached data from a chained receive */
2126 if (rx_ring->rx_skb_top) {
2127 dev_kfree_skb(rx_ring->rx_skb_top);
2128 rx_ring->rx_skb_top = NULL;
2129 }
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 size = sizeof(struct e1000_buffer) * rx_ring->count;
2132 memset(rx_ring->buffer_info, 0, size);
2133
2134 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 memset(rx_ring->desc, 0, rx_ring->size);
2136
2137 rx_ring->next_to_clean = 0;
2138 rx_ring->next_to_use = 0;
2139
Joe Perches1dc32912008-07-11 15:17:08 -07002140 writel(0, hw->hw_addr + rx_ring->rdh);
2141 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002142}
2143
2144/**
2145 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2146 * @adapter: board private structure
2147 **/
2148
Joe Perches64798842008-07-11 15:17:02 -07002149static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002150{
2151 int i;
2152
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002153 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002154 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
2157/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2158 * and memory write and invalidate disabled for certain operations
2159 */
Joe Perches64798842008-07-11 15:17:02 -07002160static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
Joe Perches1dc32912008-07-11 15:17:08 -07002162 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002164 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
Joe Perches1dc32912008-07-11 15:17:08 -07002166 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Joe Perches1dc32912008-07-11 15:17:08 -07002168 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002170 ew32(RCTL, rctl);
2171 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 mdelay(5);
2173
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002174 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002175 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
Joe Perches64798842008-07-11 15:17:02 -07002178static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Joe Perches1dc32912008-07-11 15:17:08 -07002180 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002182 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Joe Perches1dc32912008-07-11 15:17:08 -07002184 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002186 ew32(RCTL, rctl);
2187 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 mdelay(5);
2189
Joe Perches1dc32912008-07-11 15:17:08 -07002190 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2191 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002193 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002194 /* No need to loop, because 82542 supports only 1 queue */
2195 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002196 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002197 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 }
2199}
2200
2201/**
2202 * e1000_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
2208
Joe Perches64798842008-07-11 15:17:02 -07002209static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002211 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002212 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 struct sockaddr *addr = p;
2214
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002215 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 return -EADDRNOTAVAIL;
2217
2218 /* 82542 2.0 needs to be in reset to write receive address registers */
2219
Joe Perches1dc32912008-07-11 15:17:08 -07002220 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 e1000_enter_82542_rst(adapter);
2222
2223 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002224 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
Joe Perches1dc32912008-07-11 15:17:08 -07002226 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Joe Perches1dc32912008-07-11 15:17:08 -07002228 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 e1000_leave_82542_rst(adapter);
2230
2231 return 0;
2232}
2233
2234/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002235 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 * @netdev: network interface device structure
2237 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002238 * The set_rx_mode entry point is called whenever the unicast or multicast
2239 * address lists or the network interface flags are updated. This routine is
2240 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 * promiscuous mode, and all-multi behavior.
2242 **/
2243
Joe Perches64798842008-07-11 15:17:02 -07002244static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002246 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002248 struct netdev_hw_addr *ha;
2249 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002250 u32 rctl;
2251 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002252 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002253 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002254 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2255
2256 if (!mcarray) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002257 e_err(probe, "memory allocation failed\n");
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002258 return;
2259 }
Auke Kokcd94dd02006-06-27 09:08:22 -07002260
Malli Chilakala26483452005-04-28 19:44:46 -07002261 /* Check for Promiscuous and All Multicast modes */
2262
Joe Perches1dc32912008-07-11 15:17:08 -07002263 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002265 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002267 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002269 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002270 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002271 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002272 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002273 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002274 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002275 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002276 }
2277
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002278 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002279 rctl |= E1000_RCTL_UPE;
2280 } else if (!(netdev->flags & IFF_PROMISC)) {
2281 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002282 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 }
2284
Joe Perches1dc32912008-07-11 15:17:08 -07002285 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
2287 /* 82542 2.0 needs to be in reset to write receive address registers */
2288
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002289 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 e1000_enter_82542_rst(adapter);
2291
Patrick McHardydb0ce502007-11-13 20:54:59 -08002292 /* load the first 14 addresses into the exact filters 1-14. Unicast
2293 * addresses take precedence to avoid disabling unicast filtering
2294 * when possible.
2295 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002296 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 * if there are not 14 addresses, go ahead and clear the filters
2298 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002299 i = 1;
2300 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002301 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002302 if (i == rar_entries)
2303 break;
2304 e1000_rar_set(hw, ha->addr, i++);
2305 }
2306
Jiri Pirko22bedad32010-04-01 21:22:57 +00002307 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002308 if (i == rar_entries) {
2309 /* load any remaining addresses into the hash table */
2310 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002311 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002312 hash_reg = (hash_value >> 5) & 0x7F;
2313 hash_bit = hash_value & 0x1F;
2314 mta = (1 << hash_bit);
2315 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002316 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002317 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 }
2319 }
2320
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002321 for (; i < rar_entries; i++) {
2322 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2323 E1000_WRITE_FLUSH();
2324 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2325 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 }
2327
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002328 /* write the hash table completely, write from bottom to avoid
2329 * both stupid write combining chipsets, and flushing each write */
2330 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2331 /*
2332 * If we are on an 82544 has an errata where writing odd
2333 * offsets overwrites the previous even offset, but writing
2334 * backwards over the range solves the issue by always
2335 * writing the odd offset first
2336 */
2337 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2338 }
2339 E1000_WRITE_FLUSH();
2340
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002341 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002343
2344 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345}
2346
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002347/**
2348 * e1000_update_phy_info_task - get phy info
2349 * @work: work struct contained inside adapter struct
2350 *
2351 * Need to wait a few seconds after link up to get diagnostic information from
2352 * the phy
2353 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002354static void e1000_update_phy_info_task(struct work_struct *work)
2355{
2356 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002357 struct e1000_adapter,
2358 phy_info_task.work);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002359 if (test_bit(__E1000_DOWN, &adapter->flags))
2360 return;
2361 mutex_lock(&adapter->mutex);
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002362 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002363 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364}
2365
2366/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002367 * e1000_82547_tx_fifo_stall_task - task to complete work
2368 * @work: work struct contained inside adapter struct
2369 **/
2370static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2371{
2372 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002373 struct e1000_adapter,
2374 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002375 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002377 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002379 if (test_bit(__E1000_DOWN, &adapter->flags))
2380 return;
2381 mutex_lock(&adapter->mutex);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002382 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002383 if ((er32(TDT) == er32(TDH)) &&
2384 (er32(TDFT) == er32(TDFH)) &&
2385 (er32(TDFTS) == er32(TDFHS))) {
2386 tctl = er32(TCTL);
2387 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2388 ew32(TDFT, adapter->tx_head_addr);
2389 ew32(TDFH, adapter->tx_head_addr);
2390 ew32(TDFTS, adapter->tx_head_addr);
2391 ew32(TDFHS, adapter->tx_head_addr);
2392 ew32(TCTL, tctl);
2393 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 adapter->tx_fifo_head = 0;
2396 atomic_set(&adapter->tx_fifo_stall, 0);
2397 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002398 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002399 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 }
2401 }
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002402 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403}
2404
Nick Nunleyb5481922010-02-03 14:49:28 +00002405bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002406{
2407 struct e1000_hw *hw = &adapter->hw;
2408 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002409
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002410 /* get_link_status is set on LSC (link status) interrupt or rx
2411 * sequence error interrupt (except on intel ce4100).
2412 * get_link_status will stay false until the
2413 * e1000_check_for_link establishes link for copper adapters
2414 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002415 */
2416 switch (hw->media_type) {
2417 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002418 if (hw->mac_type == e1000_ce4100)
2419 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002420 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002421 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002422 link_active = !hw->get_link_status;
2423 } else {
2424 link_active = true;
2425 }
2426 break;
2427 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002428 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002429 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2430 break;
2431 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002432 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002433 link_active = hw->serdes_has_link;
2434 break;
2435 default:
2436 break;
2437 }
2438
2439 return link_active;
2440}
2441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002443 * e1000_watchdog - work function
2444 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002446static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002448 struct e1000_adapter *adapter = container_of(work,
2449 struct e1000_adapter,
2450 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002451 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002453 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002454 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002456 if (test_bit(__E1000_DOWN, &adapter->flags))
2457 return;
2458
2459 mutex_lock(&adapter->mutex);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002460 link = e1000_has_link(adapter);
2461 if ((netif_carrier_ok(netdev)) && link)
2462 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002464 if (link) {
2465 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002466 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002467 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002468 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002469 e1000_get_speed_and_duplex(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 &adapter->link_speed,
2471 &adapter->link_duplex);
2472
Joe Perches1dc32912008-07-11 15:17:08 -07002473 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002474 pr_info("%s NIC Link is Up %d Mbps %s, "
2475 "Flow Control: %s\n",
2476 netdev->name,
2477 adapter->link_speed,
2478 adapter->link_duplex == FULL_DUPLEX ?
2479 "Full Duplex" : "Half Duplex",
2480 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2481 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2482 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2483 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002485 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002486 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002487 switch (adapter->link_speed) {
2488 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002489 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002490 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002491 break;
2492 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002493 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002494 /* maybe add some timeout factor ? */
2495 break;
2496 }
2497
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002498 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002499 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002500 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002501 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002504 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002505 schedule_delayed_work(&adapter->phy_info_task,
2506 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 adapter->smartspeed = 0;
2508 }
2509 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002510 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 adapter->link_speed = 0;
2512 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002513 pr_info("%s NIC Link is Down\n",
2514 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002516
2517 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002518 schedule_delayed_work(&adapter->phy_info_task,
2519 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 }
2521
2522 e1000_smartspeed(adapter);
2523 }
2524
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002525link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 e1000_update_stats(adapter);
2527
Joe Perches1dc32912008-07-11 15:17:08 -07002528 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002530 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 adapter->colc_old = adapter->stats.colc;
2532
2533 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2534 adapter->gorcl_old = adapter->stats.gorcl;
2535 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2536 adapter->gotcl_old = adapter->stats.gotcl;
2537
Joe Perches1dc32912008-07-11 15:17:08 -07002538 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002540 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002541 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 /* We've lost link, so the controller stops DMA,
2543 * but we've got queued Tx work that's never going
2544 * to get done, so reset controller to flush Tx.
2545 * (Do the reset outside of interrupt context). */
Jeff Kirsher87041632006-03-02 18:21:24 -08002546 adapter->tx_timeout_count++;
2547 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002548 /* exit immediately since reset is imminent */
2549 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
2551 }
2552
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002553 /* Simple mode for Interrupt Throttle Rate (ITR) */
2554 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2555 /*
2556 * Symmetric Tx/Rx gets a reduced ITR=2000;
2557 * Total asymmetrical Tx or Rx gets ITR=8000;
2558 * everyone else is between 2000-8000.
2559 */
2560 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2561 u32 dif = (adapter->gotcl > adapter->gorcl ?
2562 adapter->gotcl - adapter->gorcl :
2563 adapter->gorcl - adapter->gotcl) / 10000;
2564 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2565
2566 ew32(ITR, 1000000000 / (itr * 256));
2567 }
2568
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002570 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571
Malli Chilakala26483452005-04-28 19:44:46 -07002572 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002573 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002575 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002576 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002577 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002578
2579unlock:
2580 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581}
2582
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002583enum latency_range {
2584 lowest_latency = 0,
2585 low_latency = 1,
2586 bulk_latency = 2,
2587 latency_invalid = 255
2588};
2589
2590/**
2591 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002592 * @adapter: pointer to adapter
2593 * @itr_setting: current adapter->itr
2594 * @packets: the number of packets during this measurement interval
2595 * @bytes: the number of bytes during this measurement interval
2596 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002597 * Stores a new ITR value based on packets and byte
2598 * counts during the last interrupt. The advantage of per interrupt
2599 * computation is faster updates and more accurate ITR for the current
2600 * traffic pattern. Constants in this function were computed
2601 * based on theoretical maximum wire speed and thresholds were set based
2602 * on testing data as well as attempting to minimize response time
2603 * while increasing bulk throughput.
2604 * this functionality is controlled by the InterruptThrottleRate module
2605 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002606 **/
2607static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002608 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002609{
2610 unsigned int retval = itr_setting;
2611 struct e1000_hw *hw = &adapter->hw;
2612
2613 if (unlikely(hw->mac_type < e1000_82540))
2614 goto update_itr_done;
2615
2616 if (packets == 0)
2617 goto update_itr_done;
2618
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002619 switch (itr_setting) {
2620 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002621 /* jumbo frames get bulk treatment*/
2622 if (bytes/packets > 8000)
2623 retval = bulk_latency;
2624 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002625 retval = low_latency;
2626 break;
2627 case low_latency: /* 50 usec aka 20000 ints/s */
2628 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002629 /* jumbo frames need bulk latency setting */
2630 if (bytes/packets > 8000)
2631 retval = bulk_latency;
2632 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002633 retval = bulk_latency;
2634 else if ((packets > 35))
2635 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002636 } else if (bytes/packets > 2000)
2637 retval = bulk_latency;
2638 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002639 retval = lowest_latency;
2640 break;
2641 case bulk_latency: /* 250 usec aka 4000 ints/s */
2642 if (bytes > 25000) {
2643 if (packets > 35)
2644 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002645 } else if (bytes < 6000) {
2646 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002647 }
2648 break;
2649 }
2650
2651update_itr_done:
2652 return retval;
2653}
2654
2655static void e1000_set_itr(struct e1000_adapter *adapter)
2656{
2657 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002658 u16 current_itr;
2659 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002660
2661 if (unlikely(hw->mac_type < e1000_82540))
2662 return;
2663
2664 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2665 if (unlikely(adapter->link_speed != SPEED_1000)) {
2666 current_itr = 0;
2667 new_itr = 4000;
2668 goto set_itr_now;
2669 }
2670
2671 adapter->tx_itr = e1000_update_itr(adapter,
2672 adapter->tx_itr,
2673 adapter->total_tx_packets,
2674 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002675 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2676 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2677 adapter->tx_itr = low_latency;
2678
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002679 adapter->rx_itr = e1000_update_itr(adapter,
2680 adapter->rx_itr,
2681 adapter->total_rx_packets,
2682 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002683 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2684 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2685 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002686
2687 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2688
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002689 switch (current_itr) {
2690 /* counts and packets in update_itr are dependent on these numbers */
2691 case lowest_latency:
2692 new_itr = 70000;
2693 break;
2694 case low_latency:
2695 new_itr = 20000; /* aka hwitr = ~200 */
2696 break;
2697 case bulk_latency:
2698 new_itr = 4000;
2699 break;
2700 default:
2701 break;
2702 }
2703
2704set_itr_now:
2705 if (new_itr != adapter->itr) {
2706 /* this attempts to bias the interrupt rate towards Bulk
2707 * by adding intermediate steps when interrupt rate is
2708 * increasing */
2709 new_itr = new_itr > adapter->itr ?
2710 min(adapter->itr + (new_itr >> 2), new_itr) :
2711 new_itr;
2712 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002713 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002714 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002715}
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717#define E1000_TX_FLAGS_CSUM 0x00000001
2718#define E1000_TX_FLAGS_VLAN 0x00000002
2719#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002720#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002721#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2723#define E1000_TX_FLAGS_VLAN_SHIFT 16
2724
Joe Perches64798842008-07-11 15:17:02 -07002725static int e1000_tso(struct e1000_adapter *adapter,
2726 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002729 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002731 u32 cmd_length = 0;
2732 u16 ipcse = 0, tucse, mss;
2733 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 int err;
2735
Herbert Xu89114af2006-07-08 13:34:32 -07002736 if (skb_is_gso(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 if (skb_header_cloned(skb)) {
2738 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2739 if (err)
2740 return err;
2741 }
2742
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002743 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002744 mss = skb_shinfo(skb)->gso_size;
Alexey Dobriyan60828232006-05-23 14:52:21 -07002745 if (skb->protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002746 struct iphdr *iph = ip_hdr(skb);
2747 iph->tot_len = 0;
2748 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002749 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2750 iph->daddr, 0,
2751 IPPROTO_TCP,
2752 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002753 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002754 ipcse = skb_transport_offset(skb) - 1;
Auke Koke15fdd02006-08-16 11:28:45 -07002755 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002756 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002757 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002758 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2759 &ipv6_hdr(skb)->daddr,
2760 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002761 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002762 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002763 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002764 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002765 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002766 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 tucse = 0;
2768
2769 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002770 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002772 i = tx_ring->next_to_use;
2773 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002774 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2777 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2778 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2779 context_desc->upper_setup.tcp_fields.tucss = tucss;
2780 context_desc->upper_setup.tcp_fields.tucso = tucso;
2781 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2782 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2783 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2784 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2785
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002786 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002787 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002788
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002789 if (++i == tx_ring->count) i = 0;
2790 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
Joe Perchesc3033b02008-03-21 11:06:25 -07002792 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002794 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795}
2796
Joe Perches64798842008-07-11 15:17:02 -07002797static bool e1000_tx_csum(struct e1000_adapter *adapter,
2798 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002801 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002803 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002804 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
Dave Graham3ed30672008-10-09 14:29:26 -07002806 if (skb->ip_summed != CHECKSUM_PARTIAL)
2807 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
Dave Graham3ed30672008-10-09 14:29:26 -07002809 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002810 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002811 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2812 cmd_len |= E1000_TXD_CMD_TCP;
2813 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002814 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002815 /* XXX not handling all IPV6 headers */
2816 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2817 cmd_len |= E1000_TXD_CMD_TCP;
2818 break;
2819 default:
2820 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002821 e_warn(drv, "checksum_partial proto=%x!\n",
2822 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002823 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 }
2825
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002826 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002827
2828 i = tx_ring->next_to_use;
2829 buffer_info = &tx_ring->buffer_info[i];
2830 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2831
2832 context_desc->lower_setup.ip_config = 0;
2833 context_desc->upper_setup.tcp_fields.tucss = css;
2834 context_desc->upper_setup.tcp_fields.tucso =
2835 css + skb->csum_offset;
2836 context_desc->upper_setup.tcp_fields.tucse = 0;
2837 context_desc->tcp_seg_setup.data = 0;
2838 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2839
2840 buffer_info->time_stamp = jiffies;
2841 buffer_info->next_to_watch = i;
2842
2843 if (unlikely(++i == tx_ring->count)) i = 0;
2844 tx_ring->next_to_use = i;
2845
2846 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847}
2848
2849#define E1000_MAX_TXD_PWR 12
2850#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2851
Joe Perches64798842008-07-11 15:17:02 -07002852static int e1000_tx_map(struct e1000_adapter *adapter,
2853 struct e1000_tx_ring *tx_ring,
2854 struct sk_buff *skb, unsigned int first,
2855 unsigned int max_per_txd, unsigned int nr_frags,
2856 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
Joe Perches1dc32912008-07-11 15:17:08 -07002858 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002859 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002860 struct e1000_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002861 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002862 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002863 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
2865 i = tx_ring->next_to_use;
2866
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002867 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002868 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002870 /* Workaround for Controller erratum --
2871 * descriptor for non-tso packet in a linear SKB that follows a
2872 * tso gets written back prematurely before the data is fully
Jeff Kirsher0f15a8f2006-03-02 18:46:29 -08002873 * DMA'd to the controller */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002874 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002875 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002876 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002877 size -= 4;
2878 }
2879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 /* Workaround for premature desc write-backs
2881 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002882 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002884 /* work-around for errata 10 and it applies
2885 * to all controllers in PCI-X mode
2886 * The fix is to make sure that the first descriptor of a
2887 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2888 */
Joe Perches1dc32912008-07-11 15:17:08 -07002889 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07002890 (size > 2015) && count == 0))
2891 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 /* Workaround for potential 82544 hang in PCI-X. Avoid
2894 * terminating buffers within evenly-aligned dwords. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002895 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2897 size > 4))
2898 size -= 4;
2899
2900 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002901 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002903 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002904 buffer_info->dma = dma_map_single(&pdev->dev,
2905 skb->data + offset,
2906 size, DMA_TO_DEVICE);
2907 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002908 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002909 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
2911 len -= size;
2912 offset += size;
2913 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002914 if (len) {
2915 i++;
2916 if (unlikely(i == tx_ring->count))
2917 i = 0;
2918 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 }
2920
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002921 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002922 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
2924 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002925 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002926 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002928 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002929 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002930 i++;
2931 if (unlikely(i == tx_ring->count))
2932 i = 0;
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 buffer_info = &tx_ring->buffer_info[i];
2935 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 /* Workaround for premature desc write-backs
2937 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002938 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 /* Workaround for potential 82544 hang in PCI-X.
2941 * Avoid terminating buffers within evenly-aligned
2942 * dwords. */
Ian Campbell877749b2011-08-29 23:18:26 +00002943 bufend = (unsigned long)
2944 page_to_phys(skb_frag_page(frag));
2945 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002946 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002947 !(bufend & 4) &&
2948 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 size -= 4;
2950
2951 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002953 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002954 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2955 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002956 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002957 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002958 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960 len -= size;
2961 offset += size;
2962 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 }
2964 }
2965
Dean Nelson31c15a22011-08-25 14:39:24 +00002966 segs = skb_shinfo(skb)->gso_segs ?: 1;
2967 /* multiply data chunks by size of headers */
2968 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002971 tx_ring->buffer_info[i].segs = segs;
2972 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 tx_ring->buffer_info[first].next_to_watch = i;
2974
2975 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002976
2977dma_error:
2978 dev_err(&pdev->dev, "TX DMA map failed\n");
2979 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002980 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002981 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002982
2983 while (count--) {
2984 if (i==0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002985 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002986 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002987 buffer_info = &tx_ring->buffer_info[i];
2988 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2989 }
2990
2991 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992}
2993
Joe Perches64798842008-07-11 15:17:02 -07002994static void e1000_tx_queue(struct e1000_adapter *adapter,
2995 struct e1000_tx_ring *tx_ring, int tx_flags,
2996 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997{
Joe Perches1dc32912008-07-11 15:17:08 -07002998 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 struct e1000_tx_desc *tx_desc = NULL;
3000 struct e1000_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07003001 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 unsigned int i;
3003
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003004 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3006 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003007 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3008
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003009 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003010 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 }
3012
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003013 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3015 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3016 }
3017
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003018 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 txd_lower |= E1000_TXD_CMD_VLE;
3020 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3021 }
3022
Ben Greear11a78dc2012-02-11 15:40:01 +00003023 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 i = tx_ring->next_to_use;
3027
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003028 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 buffer_info = &tx_ring->buffer_info[i];
3030 tx_desc = E1000_TX_DESC(*tx_ring, i);
3031 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3032 tx_desc->lower.data =
3033 cpu_to_le32(txd_lower | buffer_info->length);
3034 tx_desc->upper.data = cpu_to_le32(txd_upper);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003035 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 }
3037
3038 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3039
Ben Greear11a78dc2012-02-11 15:40:01 +00003040 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3041 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3042 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3043
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 /* Force memory writes to complete before letting h/w
3045 * know there are new descriptors to fetch. (Only
3046 * applicable for weak-ordered memory model archs,
3047 * such as IA-64). */
3048 wmb();
3049
3050 tx_ring->next_to_use = i;
Joe Perches1dc32912008-07-11 15:17:08 -07003051 writel(i, hw->hw_addr + tx_ring->tdt);
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003052 /* we need this if more than one processor can write to our tail
3053 * at a time, it syncronizes IO on IA64/Altix systems */
3054 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055}
3056
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003057/* 82547 workaround to avoid controller hang in half-duplex environment.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 * The workaround is to avoid queuing a large packet that would span
3059 * the internal Tx FIFO ring boundary by notifying the stack to resend
3060 * the packet at a later time. This gives the Tx FIFO an opportunity to
3061 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3062 * to the beginning of the Tx FIFO.
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
3065#define E1000_FIFO_HDR 0x10
3066#define E1000_82547_PAD_LEN 0x3E0
3067
Joe Perches64798842008-07-11 15:17:02 -07003068static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3069 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070{
Joe Perches406874a2008-04-03 10:06:32 -07003071 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3072 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003074 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003076 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 goto no_fifo_stall_required;
3078
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003079 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 return 1;
3081
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003082 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 atomic_set(&adapter->tx_fifo_stall, 1);
3084 return 1;
3085 }
3086
3087no_fifo_stall_required:
3088 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003089 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3091 return 0;
3092}
3093
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003094static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3095{
3096 struct e1000_adapter *adapter = netdev_priv(netdev);
3097 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3098
3099 netif_stop_queue(netdev);
3100 /* Herbert's original patch had:
3101 * smp_mb__after_netif_stop_queue();
3102 * but since that doesn't exist yet, just open code it. */
3103 smp_mb();
3104
3105 /* We need to check again in a case another CPU has just
3106 * made room available. */
3107 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3108 return -EBUSY;
3109
3110 /* A reprieve! */
3111 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003112 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003113 return 0;
3114}
3115
3116static int e1000_maybe_stop_tx(struct net_device *netdev,
3117 struct e1000_tx_ring *tx_ring, int size)
3118{
3119 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3120 return 0;
3121 return __e1000_maybe_stop_tx(netdev, size);
3122}
3123
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003125static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3126 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003128 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003129 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003130 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3132 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3133 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003134 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003135 unsigned int nr_frags;
3136 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003138 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 unsigned int f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003141 /* This goes back to the question of how to logically map a tx queue
3142 * to a flow. Right now, performance is impacted slightly negatively
3143 * if using multiple tx queues. If the stack breaks away from a
3144 * single qdisc implementation, we can look at this again. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003145 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003146
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003147 if (unlikely(skb->len <= 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 dev_kfree_skb_any(skb);
3149 return NETDEV_TX_OK;
3150 }
3151
Herbert Xu79671682006-06-22 02:40:14 -07003152 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003153 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 * make sure there is enough room in the FIFO before
3155 * initiating the DMA for each buffer. The calc is:
3156 * 4 = ceil(buffer len/mss). To make sure we don't
3157 * overrun the FIFO, adjust the max buffer len if mss
3158 * drops. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003159 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003160 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 max_per_txd = min(mss << 2, max_per_txd);
3162 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003163
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003164 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003165 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003166 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003167 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003168 case e1000_82544:
3169 /* Make sure we have room to chop off 4 bytes,
3170 * and that the end alignment will work out to
3171 * this hardware's requirements
3172 * NOTE: this is a TSO only workaround
3173 * if end byte alignment not correct move us
3174 * into the next dword */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07003175 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003176 break;
3177 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003178 pull_size = min((unsigned int)4, skb->data_len);
3179 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003180 e_err(drv, "__pskb_pull_tail "
3181 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003182 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003183 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003184 }
Eric Dumazete743d312010-04-14 15:59:40 -07003185 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003186 break;
3187 default:
3188 /* do nothing */
3189 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003190 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 }
3193
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003194 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003195 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003197 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003198
Jeff Kirsherfd803242005-12-13 00:06:22 -05003199 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003200 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003201 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003202
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 count += TXD_USE_COUNT(len, max_txd_pwr);
3204
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003205 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 count++;
3207
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003208 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003209 * in PCI-X mode, so add one more descriptor to the count
3210 */
Joe Perches1dc32912008-07-11 15:17:08 -07003211 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003212 (len > 2015)))
3213 count++;
3214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003216 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003217 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003219 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 count += nr_frags;
3221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 /* need: count + 2 desc gap to keep tail from touching
3223 * head, otherwise try next time */
Alexander Duyck80179432009-01-21 14:42:47 -08003224 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003227 if (unlikely((hw->mac_type == e1000_82547) &&
3228 (e1000_82547_fifo_workaround(adapter, skb)))) {
3229 netif_stop_queue(netdev);
3230 if (!test_bit(__E1000_DOWN, &adapter->flags))
3231 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3232 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 }
3234
Jiri Pirko5622e402011-07-21 03:26:31 +00003235 if (vlan_tx_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 tx_flags |= E1000_TX_FLAGS_VLAN;
3237 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3238 }
3239
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003240 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003241
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003242 tso = e1000_tso(adapter, tx_ring, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 if (tso < 0) {
3244 dev_kfree_skb_any(skb);
3245 return NETDEV_TX_OK;
3246 }
3247
Jeff Kirsherfd803242005-12-13 00:06:22 -05003248 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003249 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003250 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 tx_flags |= E1000_TX_FLAGS_TSO;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003252 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 tx_flags |= E1000_TX_FLAGS_CSUM;
3254
Alexey Dobriyan60828232006-05-23 14:52:21 -07003255 if (likely(skb->protocol == htons(ETH_P_IP)))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003256 tx_flags |= E1000_TX_FLAGS_IPV4;
3257
Ben Greear11a78dc2012-02-11 15:40:01 +00003258 if (unlikely(skb->no_fcs))
3259 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3260
Alexander Duyck37e73df2009-03-25 21:58:45 +00003261 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3262 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
Alexander Duyck37e73df2009-03-25 21:58:45 +00003264 if (count) {
Willem de Bruijneab467f2012-04-27 09:04:04 +00003265 skb_tx_timestamp(skb);
3266
Alexander Duyck37e73df2009-03-25 21:58:45 +00003267 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duyck37e73df2009-03-25 21:58:45 +00003268 /* Make sure there is space in the ring for the next send. */
3269 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
Alexander Duyck37e73df2009-03-25 21:58:45 +00003271 } else {
3272 dev_kfree_skb_any(skb);
3273 tx_ring->buffer_info[first].time_stamp = 0;
3274 tx_ring->next_to_use = first;
3275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 return NETDEV_TX_OK;
3278}
3279
Tushar Daveb04e36b2012-01-27 09:00:46 +00003280#define NUM_REGS 38 /* 1 based count */
3281static void e1000_regdump(struct e1000_adapter *adapter)
3282{
3283 struct e1000_hw *hw = &adapter->hw;
3284 u32 regs[NUM_REGS];
3285 u32 *regs_buff = regs;
3286 int i = 0;
3287
Tushar Davee29b5d82012-02-10 08:06:36 +00003288 static const char * const reg_name[] = {
3289 "CTRL", "STATUS",
3290 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3291 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3292 "TIDV", "TXDCTL", "TADV", "TARC0",
3293 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3294 "TXDCTL1", "TARC1",
3295 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3296 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3297 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003298 };
3299
3300 regs_buff[0] = er32(CTRL);
3301 regs_buff[1] = er32(STATUS);
3302
3303 regs_buff[2] = er32(RCTL);
3304 regs_buff[3] = er32(RDLEN);
3305 regs_buff[4] = er32(RDH);
3306 regs_buff[5] = er32(RDT);
3307 regs_buff[6] = er32(RDTR);
3308
3309 regs_buff[7] = er32(TCTL);
3310 regs_buff[8] = er32(TDBAL);
3311 regs_buff[9] = er32(TDBAH);
3312 regs_buff[10] = er32(TDLEN);
3313 regs_buff[11] = er32(TDH);
3314 regs_buff[12] = er32(TDT);
3315 regs_buff[13] = er32(TIDV);
3316 regs_buff[14] = er32(TXDCTL);
3317 regs_buff[15] = er32(TADV);
3318 regs_buff[16] = er32(TARC0);
3319
3320 regs_buff[17] = er32(TDBAL1);
3321 regs_buff[18] = er32(TDBAH1);
3322 regs_buff[19] = er32(TDLEN1);
3323 regs_buff[20] = er32(TDH1);
3324 regs_buff[21] = er32(TDT1);
3325 regs_buff[22] = er32(TXDCTL1);
3326 regs_buff[23] = er32(TARC1);
3327 regs_buff[24] = er32(CTRL_EXT);
3328 regs_buff[25] = er32(ERT);
3329 regs_buff[26] = er32(RDBAL0);
3330 regs_buff[27] = er32(RDBAH0);
3331 regs_buff[28] = er32(TDFH);
3332 regs_buff[29] = er32(TDFT);
3333 regs_buff[30] = er32(TDFHS);
3334 regs_buff[31] = er32(TDFTS);
3335 regs_buff[32] = er32(TDFPC);
3336 regs_buff[33] = er32(RDFH);
3337 regs_buff[34] = er32(RDFT);
3338 regs_buff[35] = er32(RDFHS);
3339 regs_buff[36] = er32(RDFTS);
3340 regs_buff[37] = er32(RDFPC);
3341
3342 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003343 for (i = 0; i < NUM_REGS; i++)
3344 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003345}
3346
3347/*
3348 * e1000_dump: Print registers, tx ring and rx ring
3349 */
3350static void e1000_dump(struct e1000_adapter *adapter)
3351{
3352 /* this code doesn't handle multiple rings */
3353 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3354 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3355 int i;
3356
3357 if (!netif_msg_hw(adapter))
3358 return;
3359
3360 /* Print Registers */
3361 e1000_regdump(adapter);
3362
3363 /*
3364 * transmit dump
3365 */
3366 pr_info("TX Desc ring0 dump\n");
3367
3368 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3369 *
3370 * Legacy Transmit Descriptor
3371 * +--------------------------------------------------------------+
3372 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3373 * +--------------------------------------------------------------+
3374 * 8 | Special | CSS | Status | CMD | CSO | Length |
3375 * +--------------------------------------------------------------+
3376 * 63 48 47 36 35 32 31 24 23 16 15 0
3377 *
3378 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3379 * 63 48 47 40 39 32 31 16 15 8 7 0
3380 * +----------------------------------------------------------------+
3381 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3382 * +----------------------------------------------------------------+
3383 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3384 * +----------------------------------------------------------------+
3385 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3386 *
3387 * Extended Data Descriptor (DTYP=0x1)
3388 * +----------------------------------------------------------------+
3389 * 0 | Buffer Address [63:0] |
3390 * +----------------------------------------------------------------+
3391 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3392 * +----------------------------------------------------------------+
3393 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3394 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003395 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3396 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003397
3398 if (!netif_msg_tx_done(adapter))
3399 goto rx_ring_summary;
3400
3401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3402 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3403 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003404 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003405 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003406 const char *type;
3407
Tushar Daveb04e36b2012-01-27 09:00:46 +00003408 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003409 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003410 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003411 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003412 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003413 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003414 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003415 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003416
Tushar Davee29b5d82012-02-10 08:06:36 +00003417 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3418 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3419 le64_to_cpu(u->a), le64_to_cpu(u->b),
3420 (u64)buffer_info->dma, buffer_info->length,
3421 buffer_info->next_to_watch,
3422 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003423 }
3424
3425rx_ring_summary:
3426 /*
3427 * receive dump
3428 */
3429 pr_info("\nRX Desc ring dump\n");
3430
3431 /* Legacy Receive Descriptor Format
3432 *
3433 * +-----------------------------------------------------+
3434 * | Buffer Address [63:0] |
3435 * +-----------------------------------------------------+
3436 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437 * +-----------------------------------------------------+
3438 * 63 48 47 40 39 32 31 16 15 0
3439 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003440 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003441
3442 if (!netif_msg_rx_status(adapter))
3443 goto exit;
3444
3445 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003448 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003449 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003450 const char *type;
3451
Tushar Daveb04e36b2012-01-27 09:00:46 +00003452 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003453 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003454 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003455 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003456 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003457 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003458
Tushar Davee29b5d82012-02-10 08:06:36 +00003459 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3460 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461 (u64)buffer_info->dma, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003462 } /* for */
3463
3464 /* dump the descriptor caches */
3465 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003466 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003467 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003468 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469 i,
3470 readl(adapter->hw.hw_addr + i+4),
3471 readl(adapter->hw.hw_addr + i),
3472 readl(adapter->hw.hw_addr + i+12),
3473 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003474 }
3475 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003476 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003477 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003478 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479 i,
3480 readl(adapter->hw.hw_addr + i+4),
3481 readl(adapter->hw.hw_addr + i),
3482 readl(adapter->hw.hw_addr + i+12),
3483 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003484 }
3485exit:
3486 return;
3487}
3488
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489/**
3490 * e1000_tx_timeout - Respond to a Tx Hang
3491 * @netdev: network interface device structure
3492 **/
3493
Joe Perches64798842008-07-11 15:17:02 -07003494static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003496 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
3498 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003499 adapter->tx_timeout_count++;
3500 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501}
3502
Joe Perches64798842008-07-11 15:17:02 -07003503static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
David Howells65f27f32006-11-22 14:55:48 +00003505 struct e1000_adapter *adapter =
3506 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00003508 if (test_bit(__E1000_DOWN, &adapter->flags))
3509 return;
Tushar Daveb04e36b2012-01-27 09:00:46 +00003510 e_err(drv, "Reset adapter\n");
Jesse Brandeburg338c15e2010-09-22 18:22:42 +00003511 e1000_reinit_safe(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512}
3513
3514/**
3515 * e1000_get_stats - Get System Network Statistics
3516 * @netdev: network interface device structure
3517 *
3518 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003519 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 **/
3521
Joe Perches64798842008-07-11 15:17:02 -07003522static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003524 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003525 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526}
3527
3528/**
3529 * e1000_change_mtu - Change the Maximum Transfer Unit
3530 * @netdev: network interface device structure
3531 * @new_mtu: new value for maximum frame size
3532 *
3533 * Returns 0 on success, negative on failure
3534 **/
3535
Joe Perches64798842008-07-11 15:17:02 -07003536static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003538 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003539 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3541
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003542 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3543 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003544 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003545 return -EINVAL;
3546 }
3547
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003548 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003549 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003550 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003551 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003552 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003553 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003554 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003555 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003556 default:
3557 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3558 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003559 }
3560
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003561 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3562 msleep(1);
3563 /* e1000_down has a dependency on max_frame_size */
3564 hw->max_frame_size = max_frame;
3565 if (netif_running(netdev))
3566 e1000_down(adapter);
3567
David S. Miller87f50322006-07-31 22:39:40 -07003568 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003569 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003570 * larger slab size.
3571 * i.e. RXBUFFER_2048 --> size-4096 slab
3572 * however with the new *_jumbo_rx* routines, jumbo receives will use
3573 * fragmented skbs */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003574
Jesse Brandeburg99261462010-01-22 22:56:16 +00003575 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003576 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003577 else
3578#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003579 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003580#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3581 adapter->rx_buffer_len = PAGE_SIZE;
3582#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003583
3584 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003585 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003586 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003587 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3588 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003589
Emil Tantilov675ad472010-04-27 14:02:58 +00003590 pr_info("%s changing MTU from %d to %d\n",
3591 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003592 netdev->mtu = new_mtu;
3593
Auke Kok2db10a02006-06-27 09:06:28 -07003594 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003595 e1000_up(adapter);
3596 else
3597 e1000_reset(adapter);
3598
3599 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 return 0;
3602}
3603
3604/**
3605 * e1000_update_stats - Update the board statistics counters
3606 * @adapter: board private structure
3607 **/
3608
Joe Perches64798842008-07-11 15:17:02 -07003609void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003611 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003613 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003615 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3618
Linas Vepstas282f33c2006-06-08 22:19:44 -07003619 /*
3620 * Prevent stats update while adapter is being reset, or if the pci
3621 * connection is down.
3622 */
Auke Kok90267292006-06-08 09:30:24 -07003623 if (adapter->link_speed == 0)
3624 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003625 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003626 return;
Auke Kok90267292006-06-08 09:30:24 -07003627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 spin_lock_irqsave(&adapter->stats_lock, flags);
3629
Masatake YAMATO828d0552007-10-20 03:06:37 +02003630 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 * called from the interrupt context, so they must only
3632 * be written while holding adapter->stats_lock
3633 */
3634
Joe Perches1dc32912008-07-11 15:17:08 -07003635 adapter->stats.crcerrs += er32(CRCERRS);
3636 adapter->stats.gprc += er32(GPRC);
3637 adapter->stats.gorcl += er32(GORCL);
3638 adapter->stats.gorch += er32(GORCH);
3639 adapter->stats.bprc += er32(BPRC);
3640 adapter->stats.mprc += er32(MPRC);
3641 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003642
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003643 adapter->stats.prc64 += er32(PRC64);
3644 adapter->stats.prc127 += er32(PRC127);
3645 adapter->stats.prc255 += er32(PRC255);
3646 adapter->stats.prc511 += er32(PRC511);
3647 adapter->stats.prc1023 += er32(PRC1023);
3648 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649
Joe Perches1dc32912008-07-11 15:17:08 -07003650 adapter->stats.symerrs += er32(SYMERRS);
3651 adapter->stats.mpc += er32(MPC);
3652 adapter->stats.scc += er32(SCC);
3653 adapter->stats.ecol += er32(ECOL);
3654 adapter->stats.mcc += er32(MCC);
3655 adapter->stats.latecol += er32(LATECOL);
3656 adapter->stats.dc += er32(DC);
3657 adapter->stats.sec += er32(SEC);
3658 adapter->stats.rlec += er32(RLEC);
3659 adapter->stats.xonrxc += er32(XONRXC);
3660 adapter->stats.xontxc += er32(XONTXC);
3661 adapter->stats.xoffrxc += er32(XOFFRXC);
3662 adapter->stats.xofftxc += er32(XOFFTXC);
3663 adapter->stats.fcruc += er32(FCRUC);
3664 adapter->stats.gptc += er32(GPTC);
3665 adapter->stats.gotcl += er32(GOTCL);
3666 adapter->stats.gotch += er32(GOTCH);
3667 adapter->stats.rnbc += er32(RNBC);
3668 adapter->stats.ruc += er32(RUC);
3669 adapter->stats.rfc += er32(RFC);
3670 adapter->stats.rjc += er32(RJC);
3671 adapter->stats.torl += er32(TORL);
3672 adapter->stats.torh += er32(TORH);
3673 adapter->stats.totl += er32(TOTL);
3674 adapter->stats.toth += er32(TOTH);
3675 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003676
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003677 adapter->stats.ptc64 += er32(PTC64);
3678 adapter->stats.ptc127 += er32(PTC127);
3679 adapter->stats.ptc255 += er32(PTC255);
3680 adapter->stats.ptc511 += er32(PTC511);
3681 adapter->stats.ptc1023 += er32(PTC1023);
3682 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003683
Joe Perches1dc32912008-07-11 15:17:08 -07003684 adapter->stats.mptc += er32(MPTC);
3685 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686
3687 /* used for adaptive IFS */
3688
Joe Perches1dc32912008-07-11 15:17:08 -07003689 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003691 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 adapter->stats.colc += hw->collision_delta;
3693
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003694 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003695 adapter->stats.algnerrc += er32(ALGNERRC);
3696 adapter->stats.rxerrc += er32(RXERRC);
3697 adapter->stats.tncrs += er32(TNCRS);
3698 adapter->stats.cexterr += er32(CEXTERR);
3699 adapter->stats.tsctc += er32(TSCTC);
3700 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 }
3702
3703 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003704 netdev->stats.multicast = adapter->stats.mprc;
3705 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706
3707 /* Rx Errors */
3708
Jeff Kirsher87041632006-03-02 18:21:24 -08003709 /* RLEC on some newer hardware can be incorrect so build
3710 * our own version based on RUC and ROC */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003711 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003713 adapter->stats.ruc + adapter->stats.roc +
3714 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003715 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003716 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3717 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3718 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3719 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
3721 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003722 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003723 netdev->stats.tx_errors = adapter->stats.txerrc;
3724 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3725 netdev->stats.tx_window_errors = adapter->stats.latecol;
3726 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003727 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003728 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003729 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003730 adapter->stats.tncrs = 0;
3731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732
3733 /* Tx Dropped needs to be maintained elsewhere */
3734
3735 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003736 if (hw->media_type == e1000_media_type_copper) {
3737 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3739 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3740 adapter->phy_stats.idle_errors += phy_tmp;
3741 }
3742
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003743 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 (hw->phy_type == e1000_phy_m88) &&
3745 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3746 adapter->phy_stats.receive_errors += phy_tmp;
3747 }
3748
Jeff Garzik15e376b2006-12-15 11:16:33 -05003749 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003750 if (hw->has_smbus) {
3751 adapter->stats.mgptc += er32(MGTPTC);
3752 adapter->stats.mgprc += er32(MGTPRC);
3753 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003754 }
3755
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3757}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003758
3759/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 * e1000_intr - Interrupt Handler
3761 * @irq: interrupt number
3762 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 **/
3764
Joe Perches64798842008-07-11 15:17:02 -07003765static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766{
3767 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003768 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003770 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003771
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003772 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003773 return IRQ_NONE; /* Not our interrupt */
3774
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003775 /*
3776 * we might have caused the interrupt, but the above
3777 * read cleared it, and just in case the driver is
3778 * down there is nothing to do so return handled
3779 */
3780 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3781 return IRQ_HANDLED;
3782
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003783 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003785 /* guard against interrupt when we're going down */
3786 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003787 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 }
3789
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003790 /* disable interrupts, without the synchronize_irq bit */
3791 ew32(IMC, ~0);
3792 E1000_WRITE_FLUSH();
3793
Ben Hutchings288379f2009-01-19 16:43:59 -08003794 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003795 adapter->total_tx_bytes = 0;
3796 adapter->total_tx_packets = 0;
3797 adapter->total_rx_bytes = 0;
3798 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003799 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003800 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003801 /* this really should not happen! if it does it is basically a
3802 * bug, but not a hard error, so enable ints and continue */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003803 if (!test_bit(__E1000_DOWN, &adapter->flags))
3804 e1000_irq_enable(adapter);
3805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 return IRQ_HANDLED;
3808}
3809
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810/**
3811 * e1000_clean - NAPI Rx polling callback
3812 * @adapter: board private structure
3813 **/
Joe Perches64798842008-07-11 15:17:02 -07003814static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003816 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003817 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003818
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003819 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003820
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003821 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003822
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003823 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003824 work_done = budget;
3825
David S. Miller53e52c72008-01-07 21:06:12 -08003826 /* If budget not fully consumed, exit the polling mode */
3827 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003828 if (likely(adapter->itr_setting & 3))
3829 e1000_set_itr(adapter);
Ben Hutchings288379f2009-01-19 16:43:59 -08003830 napi_complete(napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003831 if (!test_bit(__E1000_DOWN, &adapter->flags))
3832 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 }
3834
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003835 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836}
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838/**
3839 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3840 * @adapter: board private structure
3841 **/
Joe Perches64798842008-07-11 15:17:02 -07003842static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3843 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844{
Joe Perches1dc32912008-07-11 15:17:08 -07003845 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 struct net_device *netdev = adapter->netdev;
3847 struct e1000_tx_desc *tx_desc, *eop_desc;
3848 struct e1000_buffer *buffer_info;
3849 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003850 unsigned int count = 0;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003851 unsigned int total_tx_bytes=0, total_tx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852
3853 i = tx_ring->next_to_clean;
3854 eop = tx_ring->buffer_info[i].next_to_watch;
3855 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3856
Alexander Duyckccfb3422009-03-25 21:59:04 +00003857 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3858 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003859 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00003860 rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003861 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 tx_desc = E1000_TX_DESC(*tx_ring, i);
3863 buffer_info = &tx_ring->buffer_info[i];
3864 cleaned = (i == eop);
3865
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003866 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003867 total_tx_packets += buffer_info->segs;
3868 total_tx_bytes += buffer_info->bytecount;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003869 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003870 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003871 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003873 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003875
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 eop = tx_ring->buffer_info[i].next_to_watch;
3877 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3878 }
3879
3880 tx_ring->next_to_clean = i;
3881
Auke Kok77b2aad2006-04-14 19:05:25 -07003882#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003883 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003884 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3885 /* Make sure that anybody stopping the queue after this
3886 * sees the new next_to_clean.
3887 */
3888 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003889
3890 if (netif_queue_stopped(netdev) &&
3891 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003892 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003893 ++adapter->restart_queue;
3894 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003895 }
Malli Chilakala26483452005-04-28 19:44:46 -07003896
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003897 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003898 /* Detect a transmit hang in hardware, this serializes the
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 * check with the clearing of time_stamp and movement of i */
Joe Perchesc3033b02008-03-21 11:06:25 -07003900 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003901 if (tx_ring->buffer_info[eop].time_stamp &&
3902 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00003903 (adapter->tx_timeout_factor * HZ)) &&
3904 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003905
3906 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003907 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003908 " Tx Queue <%lu>\n"
3909 " TDH <%x>\n"
3910 " TDT <%x>\n"
3911 " next_to_use <%x>\n"
3912 " next_to_clean <%x>\n"
3913 "buffer_info[next_to_clean]\n"
3914 " time_stamp <%lx>\n"
3915 " next_to_watch <%x>\n"
3916 " jiffies <%lx>\n"
3917 " next_to_watch.status <%x>\n",
Jeff Kirsher7bfa4812006-01-12 16:50:41 -08003918 (unsigned long)((tx_ring - adapter->tx_ring) /
3919 sizeof(struct e1000_tx_ring)),
Joe Perches1dc32912008-07-11 15:17:08 -07003920 readl(hw->hw_addr + tx_ring->tdh),
3921 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003922 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003923 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003924 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003925 eop,
3926 jiffies,
3927 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003928 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003932 adapter->total_tx_bytes += total_tx_bytes;
3933 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003934 netdev->stats.tx_bytes += total_tx_bytes;
3935 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003936 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937}
3938
3939/**
3940 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003941 * @adapter: board private structure
3942 * @status_err: receive descriptor status and error fields
3943 * @csum: receive descriptor csum field
3944 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 **/
3946
Joe Perches64798842008-07-11 15:17:02 -07003947static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3948 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949{
Joe Perches1dc32912008-07-11 15:17:08 -07003950 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003951 u16 status = (u16)status_err;
3952 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003953
3954 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003955
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 /* 82543 or newer only */
Joe Perches1dc32912008-07-11 15:17:08 -07003957 if (unlikely(hw->mac_type < e1000_82543)) return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 /* Ignore Checksum bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003959 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003960 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003961 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003962 /* let the stack verify checksum errors */
3963 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 return;
3965 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003966 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003967 if (!(status & E1000_RXD_STAT_TCPCS))
3968 return;
3969
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003970 /* It must be a TCP or UDP packet with a valid checksum */
3971 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 /* TCP checksum is good */
3973 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003975 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976}
3977
3978/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003979 * e1000_consume_page - helper function
3980 **/
3981static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3982 u16 length)
3983{
3984 bi->page = NULL;
3985 skb->len += length;
3986 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00003987 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003988}
3989
3990/**
3991 * e1000_receive_skb - helper function to handle rx indications
3992 * @adapter: board private structure
3993 * @status: descriptor status field as written by hardware
3994 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3995 * @skb: pointer to sk_buff to be indicated to stack
3996 */
3997static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3998 __le16 vlan, struct sk_buff *skb)
3999{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00004000 skb->protocol = eth_type_trans(skb, adapter->netdev);
4001
Jiri Pirko5622e402011-07-21 03:26:31 +00004002 if (status & E1000_RXD_STAT_VP) {
4003 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4004
4005 __vlan_hwaccel_put_tag(skb, vid);
4006 }
4007 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004008}
4009
4010/**
4011 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4012 * @adapter: board private structure
4013 * @rx_ring: ring to clean
4014 * @work_done: amount of napi work completed this call
4015 * @work_to_do: max amount of work allowed for this call to do
4016 *
4017 * the return value indicates whether actual cleaning was done, there
4018 * is no guarantee that everything was cleaned
4019 */
4020static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4021 struct e1000_rx_ring *rx_ring,
4022 int *work_done, int work_to_do)
4023{
4024 struct e1000_hw *hw = &adapter->hw;
4025 struct net_device *netdev = adapter->netdev;
4026 struct pci_dev *pdev = adapter->pdev;
4027 struct e1000_rx_desc *rx_desc, *next_rxd;
4028 struct e1000_buffer *buffer_info, *next_buffer;
4029 unsigned long irq_flags;
4030 u32 length;
4031 unsigned int i;
4032 int cleaned_count = 0;
4033 bool cleaned = false;
4034 unsigned int total_rx_bytes=0, total_rx_packets=0;
4035
4036 i = rx_ring->next_to_clean;
4037 rx_desc = E1000_RX_DESC(*rx_ring, i);
4038 buffer_info = &rx_ring->buffer_info[i];
4039
4040 while (rx_desc->status & E1000_RXD_STAT_DD) {
4041 struct sk_buff *skb;
4042 u8 status;
4043
4044 if (*work_done >= work_to_do)
4045 break;
4046 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004047 rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004048
4049 status = rx_desc->status;
4050 skb = buffer_info->skb;
4051 buffer_info->skb = NULL;
4052
4053 if (++i == rx_ring->count) i = 0;
4054 next_rxd = E1000_RX_DESC(*rx_ring, i);
4055 prefetch(next_rxd);
4056
4057 next_buffer = &rx_ring->buffer_info[i];
4058
4059 cleaned = true;
4060 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004061 dma_unmap_page(&pdev->dev, buffer_info->dma,
4062 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004063 buffer_info->dma = 0;
4064
4065 length = le16_to_cpu(rx_desc->length);
4066
4067 /* errors is only valid for DD + EOP descriptors */
4068 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4069 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004070 u8 *mapped;
4071 u8 last_byte;
4072
4073 mapped = page_address(buffer_info->page);
4074 last_byte = *(mapped + length - 1);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004075 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4076 last_byte)) {
4077 spin_lock_irqsave(&adapter->stats_lock,
4078 irq_flags);
4079 e1000_tbi_adjust_stats(hw, &adapter->stats,
Sebastian Andrzej Siewior281a8f22012-05-15 09:18:55 +00004080 length, mapped);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004081 spin_unlock_irqrestore(&adapter->stats_lock,
4082 irq_flags);
4083 length--;
4084 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004085 if (netdev->features & NETIF_F_RXALL)
4086 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004087 /* recycle both page and skb */
4088 buffer_info->skb = skb;
4089 /* an error means any chain goes out the window
4090 * too */
4091 if (rx_ring->rx_skb_top)
4092 dev_kfree_skb(rx_ring->rx_skb_top);
4093 rx_ring->rx_skb_top = NULL;
4094 goto next_desc;
4095 }
4096 }
4097
4098#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004099process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004100 if (!(status & E1000_RXD_STAT_EOP)) {
4101 /* this descriptor is only the beginning (or middle) */
4102 if (!rxtop) {
4103 /* this is the beginning of a chain */
4104 rxtop = skb;
4105 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4106 0, length);
4107 } else {
4108 /* this is the middle of a chain */
4109 skb_fill_page_desc(rxtop,
4110 skb_shinfo(rxtop)->nr_frags,
4111 buffer_info->page, 0, length);
4112 /* re-use the skb, only consumed the page */
4113 buffer_info->skb = skb;
4114 }
4115 e1000_consume_page(buffer_info, rxtop, length);
4116 goto next_desc;
4117 } else {
4118 if (rxtop) {
4119 /* end of the chain */
4120 skb_fill_page_desc(rxtop,
4121 skb_shinfo(rxtop)->nr_frags,
4122 buffer_info->page, 0, length);
4123 /* re-use the current skb, we only consumed the
4124 * page */
4125 buffer_info->skb = skb;
4126 skb = rxtop;
4127 rxtop = NULL;
4128 e1000_consume_page(buffer_info, skb, length);
4129 } else {
4130 /* no chain, got EOP, this buf is the packet
4131 * copybreak to save the put_page/alloc_page */
4132 if (length <= copybreak &&
4133 skb_tailroom(skb) >= length) {
4134 u8 *vaddr;
Cong Wang46790262011-11-25 23:14:23 +08004135 vaddr = kmap_atomic(buffer_info->page);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004136 memcpy(skb_tail_pointer(skb), vaddr, length);
Cong Wang46790262011-11-25 23:14:23 +08004137 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004138 /* re-use the page, so don't erase
4139 * buffer_info->page */
4140 skb_put(skb, length);
4141 } else {
4142 skb_fill_page_desc(skb, 0,
4143 buffer_info->page, 0,
4144 length);
4145 e1000_consume_page(buffer_info, skb,
4146 length);
4147 }
4148 }
4149 }
4150
4151 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4152 e1000_rx_checksum(adapter,
4153 (u32)(status) |
4154 ((u32)(rx_desc->errors) << 24),
4155 le16_to_cpu(rx_desc->csum), skb);
4156
Ben Greearb0d15622012-02-11 15:40:11 +00004157 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4158 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4159 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004160 total_rx_packets++;
4161
4162 /* eth type trans needs skb->data to point to something */
4163 if (!pskb_may_pull(skb, ETH_HLEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004164 e_err(drv, "pskb_may_pull failed.\n");
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004165 dev_kfree_skb(skb);
4166 goto next_desc;
4167 }
4168
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004169 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4170
4171next_desc:
4172 rx_desc->status = 0;
4173
4174 /* return some buffers to hardware, one at a time is too slow */
4175 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4176 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4177 cleaned_count = 0;
4178 }
4179
4180 /* use prefetched values */
4181 rx_desc = next_rxd;
4182 buffer_info = next_buffer;
4183 }
4184 rx_ring->next_to_clean = i;
4185
4186 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4187 if (cleaned_count)
4188 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4189
4190 adapter->total_rx_packets += total_rx_packets;
4191 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004192 netdev->stats.rx_bytes += total_rx_bytes;
4193 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004194 return cleaned;
4195}
4196
Joe Perches57bf6ee2010-05-13 15:26:17 +00004197/*
4198 * this should improve performance for small packets with large amounts
4199 * of reassembly being done in the stack
4200 */
4201static void e1000_check_copybreak(struct net_device *netdev,
4202 struct e1000_buffer *buffer_info,
4203 u32 length, struct sk_buff **skb)
4204{
4205 struct sk_buff *new_skb;
4206
4207 if (length > copybreak)
4208 return;
4209
4210 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4211 if (!new_skb)
4212 return;
4213
4214 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4215 (*skb)->data - NET_IP_ALIGN,
4216 length + NET_IP_ALIGN);
4217 /* save the skb in buffer_info as good */
4218 buffer_info->skb = *skb;
4219 *skb = new_skb;
4220}
4221
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004222/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004223 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004225 * @rx_ring: ring to clean
4226 * @work_done: amount of napi work completed this call
4227 * @work_to_do: max amount of work allowed for this call to do
4228 */
Joe Perches64798842008-07-11 15:17:02 -07004229static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4230 struct e1000_rx_ring *rx_ring,
4231 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232{
Joe Perches1dc32912008-07-11 15:17:08 -07004233 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 struct net_device *netdev = adapter->netdev;
4235 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004236 struct e1000_rx_desc *rx_desc, *next_rxd;
4237 struct e1000_buffer *buffer_info, *next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07004239 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004241 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004242 bool cleaned = false;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004243 unsigned int total_rx_bytes=0, total_rx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
4245 i = rx_ring->next_to_clean;
4246 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004247 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004249 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004250 struct sk_buff *skb;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004251 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004252
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004253 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 break;
4255 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004256 rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004257
Jeff Kirshera292ca62006-01-12 16:51:30 -08004258 status = rx_desc->status;
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004259 skb = buffer_info->skb;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004260 buffer_info->skb = NULL;
4261
Jeff Kirsher30320be2006-03-02 18:21:57 -08004262 prefetch(skb->data - NET_IP_ALIGN);
4263
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004264 if (++i == rx_ring->count) i = 0;
4265 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004266 prefetch(next_rxd);
4267
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004268 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004269
Joe Perchesc3033b02008-03-21 11:06:25 -07004270 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004271 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004272 dma_unmap_single(&pdev->dev, buffer_info->dma,
4273 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004274 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 length = le16_to_cpu(rx_desc->length);
Neil Hormanea30e112009-06-02 01:29:58 -07004277 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004278 * packet, if thats the case we need to toss it. In fact, we
4279 * to toss every packet with the EOP bit clear and the next
4280 * frame that _does_ have the EOP bit set, as it is by
4281 * definition only a frame fragment
4282 */
4283 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4284 adapter->discarding = true;
4285
4286 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004287 /* All receives must fit into a single buffer */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004288 e_dbg("Receive packet consumed multiple buffers\n");
Auke Kok864c4e42006-06-27 09:06:53 -07004289 /* recycle */
Auke Kok8fc897b2006-08-28 14:56:16 -07004290 buffer_info->skb = skb;
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004291 if (status & E1000_RXD_STAT_EOP)
4292 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 goto next_desc;
4294 }
4295
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004296 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004297 u8 last_byte = *(skb->data + length - 1);
Joe Perches1dc32912008-07-11 15:17:08 -07004298 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4299 last_byte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004301 e1000_tbi_adjust_stats(hw, &adapter->stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 length, skb->data);
4303 spin_unlock_irqrestore(&adapter->stats_lock,
4304 flags);
4305 length--;
4306 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004307 if (netdev->features & NETIF_F_RXALL)
4308 goto process_skb;
Auke Kok9e2feac2006-04-14 19:05:18 -07004309 /* recycle */
4310 buffer_info->skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 goto next_desc;
4312 }
Auke Kok1cb58212006-04-18 12:31:04 -07004313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
Ben Greeare825b732012-04-04 06:01:29 +00004315process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004316 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004317 total_rx_packets++;
4318
Ben Greearb0d15622012-02-11 15:40:11 +00004319 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4320 /* adjust length to remove Ethernet CRC, this must be
4321 * done after the TBI_ACCEPT workaround above
4322 */
4323 length -= 4;
4324
Joe Perches57bf6ee2010-05-13 15:26:17 +00004325 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4326
Auke Kok996695d2006-11-01 08:47:50 -08004327 skb_put(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
4329 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004330 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004331 (u32)(status) |
4332 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004333 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004334
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004335 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004336
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337next_desc:
4338 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004340 /* return some buffers to hardware, one at a time is too slow */
4341 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4342 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4343 cleaned_count = 0;
4344 }
4345
Jeff Kirsher30320be2006-03-02 18:21:57 -08004346 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004347 rx_desc = next_rxd;
4348 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004351
4352 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4353 if (cleaned_count)
4354 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004356 adapter->total_rx_packets += total_rx_packets;
4357 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004358 netdev->stats.rx_bytes += total_rx_bytes;
4359 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 return cleaned;
4361}
4362
4363/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004364 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4365 * @adapter: address of board private structure
4366 * @rx_ring: pointer to receive ring structure
4367 * @cleaned_count: number of buffers to allocate this pass
4368 **/
4369
4370static void
4371e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4372 struct e1000_rx_ring *rx_ring, int cleaned_count)
4373{
4374 struct net_device *netdev = adapter->netdev;
4375 struct pci_dev *pdev = adapter->pdev;
4376 struct e1000_rx_desc *rx_desc;
4377 struct e1000_buffer *buffer_info;
4378 struct sk_buff *skb;
4379 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004380 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004381
4382 i = rx_ring->next_to_use;
4383 buffer_info = &rx_ring->buffer_info[i];
4384
4385 while (cleaned_count--) {
4386 skb = buffer_info->skb;
4387 if (skb) {
4388 skb_trim(skb, 0);
4389 goto check_page;
4390 }
4391
Eric Dumazet89d71a62009-10-13 05:34:20 +00004392 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004393 if (unlikely(!skb)) {
4394 /* Better luck next round */
4395 adapter->alloc_rx_buff_failed++;
4396 break;
4397 }
4398
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004399 buffer_info->skb = skb;
4400 buffer_info->length = adapter->rx_buffer_len;
4401check_page:
4402 /* allocate a new page if necessary */
4403 if (!buffer_info->page) {
4404 buffer_info->page = alloc_page(GFP_ATOMIC);
4405 if (unlikely(!buffer_info->page)) {
4406 adapter->alloc_rx_buff_failed++;
4407 break;
4408 }
4409 }
4410
Anton Blanchardb5abb022010-02-19 17:54:53 +00004411 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004412 buffer_info->dma = dma_map_page(&pdev->dev,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004413 buffer_info->page, 0,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004414 buffer_info->length,
4415 DMA_FROM_DEVICE);
4416 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004417 put_page(buffer_info->page);
4418 dev_kfree_skb(skb);
4419 buffer_info->page = NULL;
4420 buffer_info->skb = NULL;
4421 buffer_info->dma = 0;
4422 adapter->alloc_rx_buff_failed++;
4423 break; /* while !buffer_info->skb */
4424 }
4425 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004426
4427 rx_desc = E1000_RX_DESC(*rx_ring, i);
4428 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4429
4430 if (unlikely(++i == rx_ring->count))
4431 i = 0;
4432 buffer_info = &rx_ring->buffer_info[i];
4433 }
4434
4435 if (likely(rx_ring->next_to_use != i)) {
4436 rx_ring->next_to_use = i;
4437 if (unlikely(i-- == 0))
4438 i = (rx_ring->count - 1);
4439
4440 /* Force memory writes to complete before letting h/w
4441 * know there are new descriptors to fetch. (Only
4442 * applicable for weak-ordered memory model archs,
4443 * such as IA-64). */
4444 wmb();
4445 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4446 }
4447}
4448
4449/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004450 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 * @adapter: address of board private structure
4452 **/
4453
Joe Perches64798842008-07-11 15:17:02 -07004454static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4455 struct e1000_rx_ring *rx_ring,
4456 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457{
Joe Perches1dc32912008-07-11 15:17:08 -07004458 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 struct net_device *netdev = adapter->netdev;
4460 struct pci_dev *pdev = adapter->pdev;
4461 struct e1000_rx_desc *rx_desc;
4462 struct e1000_buffer *buffer_info;
4463 struct sk_buff *skb;
Malli Chilakala26483452005-04-28 19:44:46 -07004464 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004465 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
4467 i = rx_ring->next_to_use;
4468 buffer_info = &rx_ring->buffer_info[i];
4469
Jeff Kirshera292ca62006-01-12 16:51:30 -08004470 while (cleaned_count--) {
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004471 skb = buffer_info->skb;
4472 if (skb) {
Jeff Kirshera292ca62006-01-12 16:51:30 -08004473 skb_trim(skb, 0);
4474 goto map_skb;
4475 }
4476
Eric Dumazet89d71a62009-10-13 05:34:20 +00004477 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004478 if (unlikely(!skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004480 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 break;
4482 }
4483
Malli Chilakala26483452005-04-28 19:44:46 -07004484 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4486 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004487 e_err(rx_err, "skb align check failed: %u bytes at "
4488 "%p\n", bufsz, skb->data);
Malli Chilakala26483452005-04-28 19:44:46 -07004489 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004490 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Malli Chilakala26483452005-04-28 19:44:46 -07004491 /* Failed allocation, critical failure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 if (!skb) {
4493 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004494 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 break;
4496 }
Malli Chilakala26483452005-04-28 19:44:46 -07004497
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4499 /* give up */
4500 dev_kfree_skb(skb);
4501 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004502 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 break; /* while !buffer_info->skb */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004505
4506 /* Use new allocation */
4507 dev_kfree_skb(oldskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 buffer_info->skb = skb;
4510 buffer_info->length = adapter->rx_buffer_len;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004511map_skb:
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004512 buffer_info->dma = dma_map_single(&pdev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 skb->data,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004514 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004515 DMA_FROM_DEVICE);
4516 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004517 dev_kfree_skb(skb);
4518 buffer_info->skb = NULL;
4519 buffer_info->dma = 0;
4520 adapter->alloc_rx_buff_failed++;
4521 break; /* while !buffer_info->skb */
4522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004524 /*
4525 * XXX if it was allocated cleanly it will never map to a
4526 * boundary crossing
4527 */
4528
Malli Chilakala26483452005-04-28 19:44:46 -07004529 /* Fix for errata 23, can't cross 64kB boundary */
4530 if (!e1000_check_64k_bound(adapter,
4531 (void *)(unsigned long)buffer_info->dma,
4532 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004533 e_err(rx_err, "dma align check failed: %u bytes at "
4534 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004535 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 dev_kfree_skb(skb);
4537 buffer_info->skb = NULL;
4538
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004539 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004541 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004542 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004544 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 break; /* while !buffer_info->skb */
4546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 rx_desc = E1000_RX_DESC(*rx_ring, i);
4548 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4549
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004550 if (unlikely(++i == rx_ring->count))
4551 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 buffer_info = &rx_ring->buffer_info[i];
4553 }
4554
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004555 if (likely(rx_ring->next_to_use != i)) {
4556 rx_ring->next_to_use = i;
4557 if (unlikely(i-- == 0))
4558 i = (rx_ring->count - 1);
4559
4560 /* Force memory writes to complete before letting h/w
4561 * know there are new descriptors to fetch. (Only
4562 * applicable for weak-ordered memory model archs,
4563 * such as IA-64). */
4564 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004565 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567}
4568
4569/**
4570 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4571 * @adapter:
4572 **/
4573
Joe Perches64798842008-07-11 15:17:02 -07004574static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575{
Joe Perches1dc32912008-07-11 15:17:08 -07004576 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004577 u16 phy_status;
4578 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579
Joe Perches1dc32912008-07-11 15:17:08 -07004580 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4581 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 return;
4583
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004584 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 /* If Master/Slave config fault is asserted twice,
4586 * we assume back-to-back */
Joe Perches1dc32912008-07-11 15:17:08 -07004587 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004588 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004589 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004590 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004591 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004592 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004594 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 phy_ctrl);
4596 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004597 if (!e1000_phy_setup_autoneg(hw) &&
4598 !e1000_read_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 &phy_ctrl)) {
4600 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4601 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004602 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 phy_ctrl);
4604 }
4605 }
4606 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004607 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004609 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004611 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4612 if (!e1000_phy_setup_autoneg(hw) &&
4613 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4615 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004616 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 }
4618 }
4619 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004620 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 adapter->smartspeed = 0;
4622}
4623
4624/**
4625 * e1000_ioctl -
4626 * @netdev:
4627 * @ifreq:
4628 * @cmd:
4629 **/
4630
Joe Perches64798842008-07-11 15:17:02 -07004631static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632{
4633 switch (cmd) {
4634 case SIOCGMIIPHY:
4635 case SIOCGMIIREG:
4636 case SIOCSMIIREG:
4637 return e1000_mii_ioctl(netdev, ifr, cmd);
4638 default:
4639 return -EOPNOTSUPP;
4640 }
4641}
4642
4643/**
4644 * e1000_mii_ioctl -
4645 * @netdev:
4646 * @ifreq:
4647 * @cmd:
4648 **/
4649
Joe Perches64798842008-07-11 15:17:02 -07004650static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4651 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004653 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004654 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655 struct mii_ioctl_data *data = if_mii(ifr);
4656 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004657 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004658 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659
Joe Perches1dc32912008-07-11 15:17:08 -07004660 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 return -EOPNOTSUPP;
4662
4663 switch (cmd) {
4664 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004665 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 break;
4667 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004668 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004669 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004670 &data->val_out)) {
4671 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004673 }
4674 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 break;
4676 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004677 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 return -EFAULT;
4679 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004680 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004681 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004682 mii_reg)) {
4683 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004685 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004686 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004687 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 switch (data->reg_num) {
4689 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004690 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004692 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004693 hw->autoneg = 1;
4694 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004696 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004698 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004700 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 else
David Decotigny14ad2512011-04-27 18:32:43 +00004702 speed = SPEED_10;
4703 retval = e1000_set_spd_dplx(
4704 adapter, speed,
4705 ((mii_reg & 0x100)
4706 ? DUPLEX_FULL :
4707 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004708 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 return retval;
4710 }
Auke Kok2db10a02006-06-27 09:06:28 -07004711 if (netif_running(adapter->netdev))
4712 e1000_reinit_locked(adapter);
4713 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 e1000_reset(adapter);
4715 break;
4716 case M88E1000_PHY_SPEC_CTRL:
4717 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004718 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 return -EIO;
4720 break;
4721 }
4722 } else {
4723 switch (data->reg_num) {
4724 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004725 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004727 if (netif_running(adapter->netdev))
4728 e1000_reinit_locked(adapter);
4729 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730 e1000_reset(adapter);
4731 break;
4732 }
4733 }
4734 break;
4735 default:
4736 return -EOPNOTSUPP;
4737 }
4738 return E1000_SUCCESS;
4739}
4740
Joe Perches64798842008-07-11 15:17:02 -07004741void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742{
4743 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004744 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004746 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004747 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748}
4749
Joe Perches64798842008-07-11 15:17:02 -07004750void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751{
4752 struct e1000_adapter *adapter = hw->back;
4753
4754 pci_clear_mwi(adapter->pdev);
4755}
4756
Joe Perches64798842008-07-11 15:17:02 -07004757int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004758{
4759 struct e1000_adapter *adapter = hw->back;
4760 return pcix_get_mmrbc(adapter->pdev);
4761}
4762
Joe Perches64798842008-07-11 15:17:02 -07004763void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004764{
4765 struct e1000_adapter *adapter = hw->back;
4766 pcix_set_mmrbc(adapter->pdev, mmrbc);
4767}
4768
Joe Perches64798842008-07-11 15:17:02 -07004769void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770{
4771 outl(value, port);
4772}
4773
Jiri Pirko5622e402011-07-21 03:26:31 +00004774static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775{
Jiri Pirko5622e402011-07-21 03:26:31 +00004776 u16 vid;
4777
4778 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4779 return true;
4780 return false;
4781}
4782
Jiri Pirko52f55092012-03-20 18:10:01 +00004783static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4784 netdev_features_t features)
4785{
4786 struct e1000_hw *hw = &adapter->hw;
4787 u32 ctrl;
4788
4789 ctrl = er32(CTRL);
4790 if (features & NETIF_F_HW_VLAN_RX) {
4791 /* enable VLAN tag insert/strip */
4792 ctrl |= E1000_CTRL_VME;
4793 } else {
4794 /* disable VLAN tag insert/strip */
4795 ctrl &= ~E1000_CTRL_VME;
4796 }
4797 ew32(CTRL, ctrl);
4798}
Jiri Pirko5622e402011-07-21 03:26:31 +00004799static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4800 bool filter_on)
4801{
Joe Perches1dc32912008-07-11 15:17:08 -07004802 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004803 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004805 if (!test_bit(__E1000_DOWN, &adapter->flags))
4806 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
Jiri Pirko52f55092012-03-20 18:10:01 +00004808 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004809 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004810 /* enable VLAN receive filtering */
4811 rctl = er32(RCTL);
4812 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004813 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004814 rctl |= E1000_RCTL_VFE;
4815 ew32(RCTL, rctl);
4816 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004818 /* disable VLAN receive filtering */
4819 rctl = er32(RCTL);
4820 rctl &= ~E1000_RCTL_VFE;
4821 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 }
4823
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004824 if (!test_bit(__E1000_DOWN, &adapter->flags))
4825 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826}
4827
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004828static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004829 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004830{
4831 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004832
4833 if (!test_bit(__E1000_DOWN, &adapter->flags))
4834 e1000_irq_disable(adapter);
4835
Jiri Pirko52f55092012-03-20 18:10:01 +00004836 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004837
4838 if (!test_bit(__E1000_DOWN, &adapter->flags))
4839 e1000_irq_enable(adapter);
4840}
4841
Jiri Pirko8e586132011-12-08 19:52:37 -05004842static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004844 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004845 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004846 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004847
Joe Perches1dc32912008-07-11 15:17:08 -07004848 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004849 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4850 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004851 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004852
4853 if (!e1000_vlan_used(adapter))
4854 e1000_vlan_filter_on_off(adapter, true);
4855
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 /* add VID to filter table */
4857 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004858 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004860 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004861
4862 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004863
4864 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865}
4866
Jiri Pirko8e586132011-12-08 19:52:37 -05004867static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004869 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004870 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004871 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004873 if (!test_bit(__E1000_DOWN, &adapter->flags))
4874 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004875 if (!test_bit(__E1000_DOWN, &adapter->flags))
4876 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877
4878 /* remove VID from filter table */
4879 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004880 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004882 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004883
4884 clear_bit(vid, adapter->active_vlans);
4885
4886 if (!e1000_vlan_used(adapter))
4887 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05004888
4889 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890}
4891
Joe Perches64798842008-07-11 15:17:02 -07004892static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893{
Jiri Pirko5622e402011-07-21 03:26:31 +00004894 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895
Jiri Pirko5622e402011-07-21 03:26:31 +00004896 if (!e1000_vlan_used(adapter))
4897 return;
4898
4899 e1000_vlan_filter_on_off(adapter, true);
4900 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4901 e1000_vlan_rx_add_vid(adapter->netdev, vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902}
4903
David Decotigny14ad2512011-04-27 18:32:43 +00004904int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905{
Joe Perches1dc32912008-07-11 15:17:08 -07004906 struct e1000_hw *hw = &adapter->hw;
4907
4908 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909
David Decotigny14ad2512011-04-27 18:32:43 +00004910 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4911 * for the switch() below to work */
4912 if ((spd & 1) || (dplx & ~1))
4913 goto err_inval;
4914
Malli Chilakala69213682005-06-17 17:44:20 -07004915 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07004916 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00004917 spd != SPEED_1000 &&
4918 dplx != DUPLEX_FULL)
4919 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07004920
David Decotigny14ad2512011-04-27 18:32:43 +00004921 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004923 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 break;
4925 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004926 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 break;
4928 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004929 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 break;
4931 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004932 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 break;
4934 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004935 hw->autoneg = 1;
4936 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 break;
4938 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4939 default:
David Decotigny14ad2512011-04-27 18:32:43 +00004940 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941 }
4942 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00004943
4944err_inval:
4945 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4946 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947}
4948
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00004949static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950{
4951 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07004952 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004953 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004954 u32 ctrl, ctrl_ext, rctl, status;
4955 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07004956#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08004957 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07004958#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959
4960 netif_device_detach(netdev);
4961
Auke Kok2db10a02006-06-27 09:06:28 -07004962 if (netif_running(netdev)) {
4963 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07004965 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004967#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08004968 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00004969 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004970 return retval;
4971#endif
4972
Joe Perches1dc32912008-07-11 15:17:08 -07004973 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004974 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 wufc &= ~E1000_WUFC_LNKC;
4976
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004977 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08004979 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980
Dean Nelsonb8681792012-01-19 17:47:24 +00004981 rctl = er32(RCTL);
4982
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00004984 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00004986
4987 /* enable receives in the hardware */
4988 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989
Joe Perches1dc32912008-07-11 15:17:08 -07004990 if (hw->mac_type >= e1000_82540) {
4991 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 /* advertise wake from D3Cold */
4993 #define E1000_CTRL_ADVD3WUC 0x00100000
4994 /* phy power management enable */
4995 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4996 ctrl |= E1000_CTRL_ADVD3WUC |
4997 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07004998 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 }
5000
Joe Perches1dc32912008-07-11 15:17:08 -07005001 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00005002 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005004 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005006 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 }
5008
Joe Perches1dc32912008-07-11 15:17:08 -07005009 ew32(WUC, E1000_WUC_PME_EN);
5010 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005012 ew32(WUC, 0);
5013 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 }
5015
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005016 e1000_release_manageability(adapter);
5017
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005018 *enable_wake = !!wufc;
5019
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005020 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005021 if (adapter->en_mng_pt)
5022 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
Auke Kokedd106f2006-11-06 08:57:12 -08005024 if (netif_running(netdev))
5025 e1000_free_irq(adapter);
5026
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005028
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029 return 0;
5030}
5031
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005032#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005033static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5034{
5035 int retval;
5036 bool wake;
5037
5038 retval = __e1000_shutdown(pdev, &wake);
5039 if (retval)
5040 return retval;
5041
5042 if (wake) {
5043 pci_prepare_to_sleep(pdev);
5044 } else {
5045 pci_wake_from_d3(pdev, false);
5046 pci_set_power_state(pdev, PCI_D3hot);
5047 }
5048
5049 return 0;
5050}
5051
Joe Perches64798842008-07-11 15:17:02 -07005052static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053{
5054 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005055 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005056 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005057 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058
Auke Kokd0e027d2006-04-14 19:04:40 -07005059 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005060 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005061 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005062
5063 if (adapter->need_ioport)
5064 err = pci_enable_device(pdev);
5065 else
5066 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005067 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005068 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005069 return err;
5070 }
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005071 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072
Auke Kokd0e027d2006-04-14 19:04:40 -07005073 pci_enable_wake(pdev, PCI_D3hot, 0);
5074 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075
Joe Perchesc7be73b2008-07-11 15:17:28 -07005076 if (netif_running(netdev)) {
5077 err = e1000_request_irq(adapter);
5078 if (err)
5079 return err;
5080 }
Auke Kokedd106f2006-11-06 08:57:12 -08005081
5082 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005084 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005086 e1000_init_manageability(adapter);
5087
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005088 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 e1000_up(adapter);
5090
5091 netif_device_attach(netdev);
5092
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 return 0;
5094}
5095#endif
Auke Kokc653e632006-05-23 13:35:57 -07005096
5097static void e1000_shutdown(struct pci_dev *pdev)
5098{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005099 bool wake;
5100
5101 __e1000_shutdown(pdev, &wake);
5102
5103 if (system_state == SYSTEM_POWER_OFF) {
5104 pci_wake_from_d3(pdev, wake);
5105 pci_set_power_state(pdev, PCI_D3hot);
5106 }
Auke Kokc653e632006-05-23 13:35:57 -07005107}
5108
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109#ifdef CONFIG_NET_POLL_CONTROLLER
5110/*
5111 * Polling 'interrupt' - used by things like netconsole to send skbs
5112 * without having to re-enable interrupts. It's not called while
5113 * the interrupt routine is executing.
5114 */
Joe Perches64798842008-07-11 15:17:02 -07005115static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005117 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005118
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005120 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 enable_irq(adapter->pdev->irq);
5122}
5123#endif
5124
Auke Kok90267292006-06-08 09:30:24 -07005125/**
5126 * e1000_io_error_detected - called when PCI error is detected
5127 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005128 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005129 *
5130 * This function is called after a PCI bus error affecting
5131 * this device has been detected.
5132 */
Joe Perches64798842008-07-11 15:17:02 -07005133static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5134 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005135{
5136 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005137 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005138
5139 netif_device_detach(netdev);
5140
Andre Detscheab63302009-06-30 12:46:13 +00005141 if (state == pci_channel_io_perm_failure)
5142 return PCI_ERS_RESULT_DISCONNECT;
5143
Auke Kok90267292006-06-08 09:30:24 -07005144 if (netif_running(netdev))
5145 e1000_down(adapter);
Linas Vepstas72e8d6b2006-09-18 20:58:06 -07005146 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005147
5148 /* Request a slot slot reset. */
5149 return PCI_ERS_RESULT_NEED_RESET;
5150}
5151
5152/**
5153 * e1000_io_slot_reset - called after the pci bus has been reset.
5154 * @pdev: Pointer to PCI device
5155 *
5156 * Restart the card from scratch, as if from a cold-boot. Implementation
5157 * resembles the first-half of the e1000_resume routine.
5158 */
5159static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5160{
5161 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005162 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005163 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005164 int err;
Auke Kok90267292006-06-08 09:30:24 -07005165
Taku Izumi81250292008-07-11 15:17:44 -07005166 if (adapter->need_ioport)
5167 err = pci_enable_device(pdev);
5168 else
5169 err = pci_enable_device_mem(pdev);
5170 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005171 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005172 return PCI_ERS_RESULT_DISCONNECT;
5173 }
5174 pci_set_master(pdev);
5175
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005176 pci_enable_wake(pdev, PCI_D3hot, 0);
5177 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005178
Auke Kok90267292006-06-08 09:30:24 -07005179 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005180 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005181
5182 return PCI_ERS_RESULT_RECOVERED;
5183}
5184
5185/**
5186 * e1000_io_resume - called when traffic can start flowing again.
5187 * @pdev: Pointer to PCI device
5188 *
5189 * This callback is called when the error recovery driver tells us that
5190 * its OK to resume normal operation. Implementation resembles the
5191 * second-half of the e1000_resume routine.
5192 */
5193static void e1000_io_resume(struct pci_dev *pdev)
5194{
5195 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005196 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005197
5198 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005199
5200 if (netif_running(netdev)) {
5201 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005202 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005203 return;
5204 }
5205 }
5206
5207 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005208}
5209
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210/* e1000_main.c */