blob: 7483ca0a6282f8e2111c44ca67a0518c8edba661 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000049static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000148 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800149 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000159static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Jiri Pirko5622e402011-07-21 03:26:31 +0000164static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
Jiri Pirko8e586132011-12-08 19:52:37 -0500169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
Auke Kok6fdfef12006-06-27 09:06:36 -0700173#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int e1000_resume(struct pci_dev *pdev);
176#endif
Auke Kokc653e632006-05-23 13:35:57 -0700177static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void e1000_netpoll (struct net_device *netdev);
182#endif
183
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100184#define COPYBREAK_DEFAULT 256
185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186module_param(copybreak, uint, 0644);
187MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
189
Auke Kok90267292006-06-08 09:30:24 -0700190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193static void e1000_io_resume(struct pci_dev *pdev);
194
195static struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
199};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
Auke Kokc4e24f02006-09-27 12:53:19 -0700206#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300207 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700209 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif
Auke Kok90267292006-06-08 09:30:24 -0700211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213};
214
215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_VERSION);
219
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222module_param(debug, int, 0);
223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
228 *
229 **/
230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231{
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
234}
235
236/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 * e1000_init_module - Driver Registration Routine
238 *
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
241 **/
242
Joe Perches64798842008-07-11 15:17:02 -0700243static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Emil Tantilov675ad472010-04-27 14:02:58 +0000248 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jeff Garzik29917622006-08-19 17:48:59 -0400250 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100251 if (copybreak != COPYBREAK_DEFAULT) {
252 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000253 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100254 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 return ret;
259}
260
261module_init(e1000_init_module);
262
263/**
264 * e1000_exit_module - Driver Exit Cleanup Routine
265 *
266 * e1000_exit_module is called just before the driver is removed
267 * from memory.
268 **/
269
Joe Perches64798842008-07-11 15:17:02 -0700270static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
Auke Kok2db10a02006-06-27 09:06:28 -0700277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000280 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700281 int irq_flags = IRQF_SHARED;
282 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700283
Auke Koke94bd232007-05-16 01:49:46 -0700284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700288 }
Auke Kok2db10a02006-06-27 09:06:28 -0700289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
304
Joe Perches64798842008-07-11 15:17:02 -0700305static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Joe Perches1dc32912008-07-11 15:17:08 -0700307 struct e1000_hw *hw = &adapter->hw;
308
309 ew32(IMC, ~0);
310 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 synchronize_irq(adapter->pdev->irq);
312}
313
314/**
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 **/
318
Joe Perches64798842008-07-11 15:17:02 -0700319static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Joe Perches1dc32912008-07-11 15:17:08 -0700321 struct e1000_hw *hw = &adapter->hw;
322
323 ew32(IMS, IMS_ENABLE_MASK);
324 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100326
Joe Perches64798842008-07-11 15:17:02 -0700327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700328{
Joe Perches1dc32912008-07-11 15:17:08 -0700329 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700330 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700331 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700332 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800333
Jiri Pirko5622e402011-07-21 03:26:31 +0000334 if (!e1000_vlan_used(adapter))
335 return;
336
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800341 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000342 } else {
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344 }
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346 (vid != old_vid) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
349 } else {
350 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700351 }
352}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800353
Joe Perches64798842008-07-11 15:17:02 -0700354static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500355{
Joe Perches1dc32912008-07-11 15:17:08 -0700356 struct e1000_hw *hw = &adapter->hw;
357
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500358 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700359 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500360
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
363
Joe Perches1dc32912008-07-11 15:17:08 -0700364 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500365 }
366}
367
Joe Perches64798842008-07-11 15:17:02 -0700368static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500369{
Joe Perches1dc32912008-07-11 15:17:08 -0700370 struct e1000_hw *hw = &adapter->hw;
371
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500372 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700373 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500374
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
377
Joe Perches1dc32912008-07-11 15:17:08 -0700378 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500379 }
380}
381
Auke Koke0aac5a2007-03-06 08:57:21 -0800382/**
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
385 **/
386static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700389 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Patrick McHardydb0ce502007-11-13 20:54:59 -0800391 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500394 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800402 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800407}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800408
Auke Koke0aac5a2007-03-06 08:57:21 -0800409int e1000_up(struct e1000_adapter *adapter)
410{
Joe Perches1dc32912008-07-11 15:17:08 -0700411 struct e1000_hw *hw = &adapter->hw;
412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700415
Auke Kok1314bbf2006-09-27 12:54:02 -0700416 clear_bit(__E1000_DOWN, &adapter->flags);
417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700419
Auke Koke0aac5a2007-03-06 08:57:21 -0800420 e1000_irq_enable(adapter);
421
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000422 netif_wake_queue(adapter->netdev);
423
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100424 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700425 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return 0;
427}
428
Auke Kok79f05bf2006-06-27 09:06:32 -0700429/**
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
432 *
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
436 *
437 **/
438
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700439void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700440{
Joe Perches1dc32912008-07-11 15:17:08 -0700441 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700442 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700443
444 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700445 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
Joe Perches1dc32912008-07-11 15:17:08 -0700448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700449 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700451 }
452}
453
454static void e1000_power_down_phy(struct e1000_adapter *adapter)
455{
Joe Perches1dc32912008-07-11 15:17:08 -0700456 struct e1000_hw *hw = &adapter->hw;
457
Bruce Allan61c25052006-09-27 12:53:54 -0700458 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700459 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700460 * (a) WoL is enabled
461 * (b) AMT is active
462 * (c) SoL/IDER session is active */
Joe Perches1dc32912008-07-11 15:17:08 -0700463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700465 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700466
Joe Perches1dc32912008-07-11 15:17:08 -0700467 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000472 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700479 goto out;
480 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700481 default:
482 goto out;
483 }
Joe Perches1dc32912008-07-11 15:17:08 -0700484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700485 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000487 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700488 }
Bruce Allan61c25052006-09-27 12:53:54 -0700489out:
490 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700491}
492
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
504}
505
Joe Perches64798842008-07-11 15:17:02 -0700506void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000508 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000510 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Auke Kok1314bbf2006-09-27 12:54:02 -0700512
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000513 /* disable receives in the hardware */
514 rctl = er32(RCTL);
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
517
Jesse Brandeburg51851072009-09-25 12:17:01 +0000518 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519
520 /* disable transmits in the hardware */
521 tctl = er32(TCTL);
522 tctl &= ~E1000_TCTL_EN;
523 ew32(TCTL, tctl);
524 /* flush both disables and wait for them to finish */
525 E1000_WRITE_FLUSH();
526 msleep(10);
527
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700528 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800531
Anupam Chandaab088532010-11-21 09:54:21 -0800532 /*
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000535 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800536 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000537 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
stephen hemminger38df7a32010-10-21 07:50:57 +0000548static void e1000_reinit_safe(struct e1000_adapter *adapter)
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000549{
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551 msleep(1);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000552 mutex_lock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000553 e1000_down(adapter);
554 e1000_up(adapter);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000555 mutex_unlock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000556 clear_bit(__E1000_RESETTING, &adapter->flags);
557}
558
Joe Perches64798842008-07-11 15:17:02 -0700559void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700560{
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000561 /* if rtnl_lock is not held the call path is bogus */
562 ASSERT_RTNL();
Auke Kok2db10a02006-06-27 09:06:28 -0700563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565 msleep(1);
566 e1000_down(adapter);
567 e1000_up(adapter);
568 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Joe Perches64798842008-07-11 15:17:02 -0700571void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Joe Perches1dc32912008-07-11 15:17:08 -0700573 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700575 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000576 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
580 */
581
Joe Perches1dc32912008-07-11 15:17:08 -0700582 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
585 case e1000_82543:
586 case e1000_82544:
587 case e1000_82540:
588 case e1000_82541:
589 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700590 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100591 pba = E1000_PBA_48K;
592 break;
593 case e1000_82545:
594 case e1000_82545_rev_3:
595 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000596 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100597 case e1000_82546_rev_3:
598 pba = E1000_PBA_48K;
599 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700600 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700601 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700602 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700603 pba = E1000_PBA_30K;
604 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100605 case e1000_undefined:
606 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700607 break;
608 }
609
Joe Perchesc3033b02008-03-21 11:06:25 -0700610 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100612 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700613
Joe Perches1dc32912008-07-11 15:17:08 -0700614 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
620 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100622 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700623 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700624
Bruce Allan018ea442006-12-15 10:39:45 +0100625 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000626 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100627 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000628 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
Joe Perches1dc32912008-07-11 15:17:08 -0700631 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 pba &= 0xffff;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000636 /*
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
639 */
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
642 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700643 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100644 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700647 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100648 min_rx_space >>= 10;
649
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
656
657 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700658 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
661 break;
662 default:
663 break;
664 }
665
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000668 if (pba < min_rx_space)
669 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700672
Joe Perches1dc32912008-07-11 15:17:08 -0700673 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000675 /*
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
684 */
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800687
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700691 hw->fc_send_xon = 1;
692 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700694 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700695 e1000_reset_hw(hw);
696 if (hw->mac_type >= e1000_82544)
697 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700698
Joe Perches1dc32912008-07-11 15:17:08 -0700699 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700700 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700701 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100702
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700704 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700705 hw->autoneg == 1 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700712 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Joe Perches1dc32912008-07-11 15:17:08 -0700718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700720
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500721 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
724/**
Auke Kok67b3c272007-12-17 13:50:23 -0800725 * Dump the eeprom for users having checksum issues
726 **/
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800727static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800728{
729 struct net_device *netdev = adapter->netdev;
730 struct ethtool_eeprom eeprom;
731 const struct ethtool_ops *ops = netdev->ethtool_ops;
732 u8 *data;
733 int i;
734 u16 csum_old, csum_new = 0;
735
736 eeprom.len = ops->get_eeprom_len(netdev);
737 eeprom.offset = 0;
738
739 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000740 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800741 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800742
743 ops->get_eeprom(netdev, &eeprom, data);
744
745 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
746 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
747 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
748 csum_new += data[i] + (data[i + 1] << 8);
749 csum_new = EEPROM_SUM - csum_new;
750
Emil Tantilov675ad472010-04-27 14:02:58 +0000751 pr_err("/*********************/\n");
752 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
753 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800754
Emil Tantilov675ad472010-04-27 14:02:58 +0000755 pr_err("Offset Values\n");
756 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800757 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
758
Emil Tantilov675ad472010-04-27 14:02:58 +0000759 pr_err("Include this output when contacting your support provider.\n");
760 pr_err("This is not a software error! Something bad happened to\n");
761 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
762 pr_err("result in further problems, possibly loss of data,\n");
763 pr_err("corruption or system hangs!\n");
764 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
765 pr_err("which is invalid and requires you to set the proper MAC\n");
766 pr_err("address manually before continuing to enable this network\n");
767 pr_err("device. Please inspect the EEPROM dump and report the\n");
768 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
769 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800770
771 kfree(data);
772}
773
774/**
Taku Izumi81250292008-07-11 15:17:44 -0700775 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
776 * @pdev: PCI device information struct
777 *
778 * Return true if an adapter needs ioport resources
779 **/
780static int e1000_is_need_ioport(struct pci_dev *pdev)
781{
782 switch (pdev->device) {
783 case E1000_DEV_ID_82540EM:
784 case E1000_DEV_ID_82540EM_LOM:
785 case E1000_DEV_ID_82540EP:
786 case E1000_DEV_ID_82540EP_LOM:
787 case E1000_DEV_ID_82540EP_LP:
788 case E1000_DEV_ID_82541EI:
789 case E1000_DEV_ID_82541EI_MOBILE:
790 case E1000_DEV_ID_82541ER:
791 case E1000_DEV_ID_82541ER_LOM:
792 case E1000_DEV_ID_82541GI:
793 case E1000_DEV_ID_82541GI_LF:
794 case E1000_DEV_ID_82541GI_MOBILE:
795 case E1000_DEV_ID_82544EI_COPPER:
796 case E1000_DEV_ID_82544EI_FIBER:
797 case E1000_DEV_ID_82544GC_COPPER:
798 case E1000_DEV_ID_82544GC_LOM:
799 case E1000_DEV_ID_82545EM_COPPER:
800 case E1000_DEV_ID_82545EM_FIBER:
801 case E1000_DEV_ID_82546EB_COPPER:
802 case E1000_DEV_ID_82546EB_FIBER:
803 case E1000_DEV_ID_82546EB_QUAD_COPPER:
804 return true;
805 default:
806 return false;
807 }
808}
809
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000810static netdev_features_t e1000_fix_features(struct net_device *netdev,
811 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000812{
813 /*
814 * Since there is no support for separate rx/tx vlan accel
815 * enable/disable make sure tx flag is always in same state as rx.
816 */
817 if (features & NETIF_F_HW_VLAN_RX)
818 features |= NETIF_F_HW_VLAN_TX;
819 else
820 features &= ~NETIF_F_HW_VLAN_TX;
821
822 return features;
823}
824
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000825static int e1000_set_features(struct net_device *netdev,
826 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000827{
828 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000829 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000830
Jiri Pirko5622e402011-07-21 03:26:31 +0000831 if (changed & NETIF_F_HW_VLAN_RX)
832 e1000_vlan_mode(netdev, features);
833
Ben Greeare825b732012-04-04 06:01:29 +0000834 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000835 return 0;
836
Ben Greeare825b732012-04-04 06:01:29 +0000837 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000838 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
839
840 if (netif_running(netdev))
841 e1000_reinit_locked(adapter);
842 else
843 e1000_reset(adapter);
844
845 return 0;
846}
847
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800848static const struct net_device_ops e1000_netdev_ops = {
849 .ndo_open = e1000_open,
850 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800851 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800852 .ndo_get_stats = e1000_get_stats,
853 .ndo_set_rx_mode = e1000_set_rx_mode,
854 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000855 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800856 .ndo_change_mtu = e1000_change_mtu,
857 .ndo_do_ioctl = e1000_ioctl,
858 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800859 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
860 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
861#ifdef CONFIG_NET_POLL_CONTROLLER
862 .ndo_poll_controller = e1000_netpoll,
863#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000864 .ndo_fix_features = e1000_fix_features,
865 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800866};
867
Taku Izumi81250292008-07-11 15:17:44 -0700868/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000869 * e1000_init_hw_struct - initialize members of hw struct
870 * @adapter: board private struct
871 * @hw: structure used by e1000_hw.c
872 *
873 * Factors out initialization of the e1000_hw struct to its own function
874 * that can be called very early at init (just after struct allocation).
875 * Fields are initialized based on PCI device information and
876 * OS network device settings (MTU size).
877 * Returns negative error codes if MAC type setup fails.
878 */
879static int e1000_init_hw_struct(struct e1000_adapter *adapter,
880 struct e1000_hw *hw)
881{
882 struct pci_dev *pdev = adapter->pdev;
883
884 /* PCI config space info */
885 hw->vendor_id = pdev->vendor;
886 hw->device_id = pdev->device;
887 hw->subsystem_vendor_id = pdev->subsystem_vendor;
888 hw->subsystem_id = pdev->subsystem_device;
889 hw->revision_id = pdev->revision;
890
891 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
892
893 hw->max_frame_size = adapter->netdev->mtu +
894 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
895 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
896
897 /* identify the MAC */
898 if (e1000_set_mac_type(hw)) {
899 e_err(probe, "Unknown MAC Type\n");
900 return -EIO;
901 }
902
903 switch (hw->mac_type) {
904 default:
905 break;
906 case e1000_82541:
907 case e1000_82547:
908 case e1000_82541_rev_2:
909 case e1000_82547_rev_2:
910 hw->phy_init_script = 1;
911 break;
912 }
913
914 e1000_set_media_type(hw);
915 e1000_get_bus_info(hw);
916
917 hw->wait_autoneg_complete = false;
918 hw->tbi_compatibility_en = true;
919 hw->adaptive_ifs = true;
920
921 /* Copper options */
922
923 if (hw->media_type == e1000_media_type_copper) {
924 hw->mdix = AUTO_ALL_MODES;
925 hw->disable_polarity_correction = false;
926 hw->master_slave = E1000_MASTER_SLAVE;
927 }
928
929 return 0;
930}
931
932/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 * e1000_probe - Device Initialization Routine
934 * @pdev: PCI device information struct
935 * @ent: entry in e1000_pci_tbl
936 *
937 * Returns 0 on success, negative on failure
938 *
939 * e1000_probe initializes an adapter identified by a pci_dev structure.
940 * The OS initialization, configuring of the adapter private structure,
941 * and a hardware reset occur.
942 **/
Joe Perches1dc32912008-07-11 15:17:08 -0700943static int __devinit e1000_probe(struct pci_dev *pdev,
944 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 struct net_device *netdev;
947 struct e1000_adapter *adapter;
Joe Perches1dc32912008-07-11 15:17:08 -0700948 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 static int cards_found = 0;
Jesse Brandeburg120cd572006-08-31 14:27:46 -0700951 static int global_quad_port_a = 0; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700952 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700953 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000954 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700955 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700956 int bars, need_ioport;
Joe Perches0795af52007-10-03 17:59:30 -0700957
Taku Izumi81250292008-07-11 15:17:44 -0700958 /* do not allocate ioport bars when not needed */
959 need_ioport = e1000_is_need_ioport(pdev);
960 if (need_ioport) {
961 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
962 err = pci_enable_device(pdev);
963 } else {
964 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800965 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700966 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700967 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 return err;
969
Taku Izumi81250292008-07-11 15:17:44 -0700970 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700971 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700972 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000975 err = pci_save_state(pdev);
976 if (err)
977 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700979 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700981 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 SET_NETDEV_DEV(netdev, &pdev->dev);
985
986 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700987 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 adapter->netdev = netdev;
989 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000990 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700991 adapter->bars = bars;
992 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Joe Perches1dc32912008-07-11 15:17:08 -0700994 hw = &adapter->hw;
995 hw->back = adapter;
996
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700997 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700998 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700999 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Taku Izumi81250292008-07-11 15:17:44 -07001002 if (adapter->need_ioport) {
1003 for (i = BAR_1; i <= BAR_5; i++) {
1004 if (pci_resource_len(pdev, i) == 0)
1005 continue;
1006 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1007 hw->io_base = pci_resource_start(pdev, i);
1008 break;
1009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011 }
1012
Jesse Brandeburge508be12010-09-07 21:01:12 +00001013 /* make ready for any if (hw->...) below */
1014 err = e1000_init_hw_struct(adapter, hw);
1015 if (err)
1016 goto err_sw_init;
1017
1018 /*
1019 * there is a workaround being applied below that limits
1020 * 64-bit DMA addresses to 64-bit hardware. There are some
1021 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1022 */
1023 pci_using_dac = 0;
1024 if ((hw->bus_type == e1000_bus_type_pcix) &&
1025 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1026 /*
1027 * according to DMA-API-HOWTO, coherent calls will always
1028 * succeed if the set call did
1029 */
1030 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1031 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001032 } else {
Dean Nelson19a0b672010-11-11 05:50:25 +00001033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1034 if (err) {
1035 pr_err("No usable DMA config, aborting\n");
1036 goto err_dma;
1037 }
1038 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Jesse Brandeburge508be12010-09-07 21:01:12 +00001039 }
1040
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001041 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001044 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001045
Auke Kok0eb5a342006-09-27 12:53:17 -07001046 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 adapter->bd_number = cards_found;
1049
1050 /* setup the private structure */
1051
Joe Perchesc7be73b2008-07-11 15:17:28 -07001052 err = e1000_sw_init(adapter);
1053 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 goto err_sw_init;
1055
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001056 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001057 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001058 hw->ce4100_gbe_mdio_base_virt =
1059 ioremap(pci_resource_start(pdev, BAR_1),
Dirk Brandewie5377a412011-01-06 14:29:54 +00001060 pci_resource_len(pdev, BAR_1));
1061
Florian Fainelli13acde82012-01-04 20:23:35 +00001062 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001063 goto err_mdio_ioremap;
1064 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001065
Joe Perches1dc32912008-07-11 15:17:08 -07001066 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001067 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001068 NETIF_F_HW_CSUM |
1069 NETIF_F_HW_VLAN_RX;
Michał Mirosławe97d3202011-06-08 08:36:42 +00001070 netdev->features = NETIF_F_HW_VLAN_TX |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 NETIF_F_HW_VLAN_FILTER;
1072 }
1073
Joe Perches1dc32912008-07-11 15:17:08 -07001074 if ((hw->mac_type >= e1000_82544) &&
1075 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001076 netdev->hw_features |= NETIF_F_TSO;
1077
Ben Greear11a78dc2012-02-11 15:40:01 +00001078 netdev->priv_flags |= IFF_SUPP_NOFCS;
1079
Michał Mirosławe97d3202011-06-08 08:36:42 +00001080 netdev->features |= netdev->hw_features;
1081 netdev->hw_features |= NETIF_F_RXCSUM;
Ben Greeare825b732012-04-04 06:01:29 +00001082 netdev->hw_features |= NETIF_F_RXALL;
Ben Greearb0d15622012-02-11 15:40:11 +00001083 netdev->hw_features |= NETIF_F_RXFCS;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001084
Yi Zou7b872a52010-09-22 17:57:58 +00001085 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001087 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Patrick McHardy20501a62008-10-11 12:25:59 -07001090 netdev->vlan_features |= NETIF_F_TSO;
Patrick McHardy20501a62008-10-11 12:25:59 -07001091 netdev->vlan_features |= NETIF_F_HW_CSUM;
1092 netdev->vlan_features |= NETIF_F_SG;
1093
Jiri Pirko01789342011-08-16 06:29:00 +00001094 netdev->priv_flags |= IFF_UNICAST_FLT;
1095
Joe Perches1dc32912008-07-11 15:17:08 -07001096 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001097
Auke Kokcd94dd02006-06-27 09:08:22 -07001098 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001099 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001100 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001101 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001102 }
1103
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001104 /* before reading the EEPROM, reset the controller to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 * put the device in a known good starting state */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001106
Joe Perches1dc32912008-07-11 15:17:08 -07001107 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001110 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001111 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001112 e1000_dump_eeprom(adapter);
1113 /*
1114 * set MAC address to all zeroes to invalidate and temporary
1115 * disable this device for the user. This blocks regular
1116 * traffic while still permitting ethtool ioctls from reaching
1117 * the hardware as well as allowing the user to run the
1118 * interface after manually setting a hw addr using
1119 * `ip set address`
1120 */
Joe Perches1dc32912008-07-11 15:17:08 -07001121 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001122 } else {
1123 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001124 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001125 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 }
Auke Kok67b3c272007-12-17 13:50:23 -08001127 /* don't block initalization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001128 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1129 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
Auke Kok67b3c272007-12-17 13:50:23 -08001131 if (!is_valid_ether_addr(netdev->perm_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001132 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001135 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1136 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1137 e1000_82547_tx_fifo_stall_task);
1138 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001139 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 e1000_check_options(adapter);
1142
1143 /* Initial Wake on LAN setting
1144 * If APM wake is enabled in the EEPROM,
1145 * enable the ACPI Magic Packet filter
1146 */
1147
Joe Perches1dc32912008-07-11 15:17:08 -07001148 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 case e1000_82542_rev2_0:
1150 case e1000_82542_rev2_1:
1151 case e1000_82543:
1152 break;
1153 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001154 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1156 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1157 break;
1158 case e1000_82546:
1159 case e1000_82546_rev_3:
Joe Perches1dc32912008-07-11 15:17:08 -07001160 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1161 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1163 break;
1164 }
1165 /* Fall Through */
1166 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001167 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1169 break;
1170 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001171 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001172 adapter->eeprom_wol |= E1000_WUFC_MAG;
1173
1174 /* now that we have the eeprom settings, apply the special cases
1175 * where the eeprom may be wrong or the board simply won't support
1176 * wake on lan on a particular port */
1177 switch (pdev->device) {
1178 case E1000_DEV_ID_82546GB_PCIE:
1179 adapter->eeprom_wol = 0;
1180 break;
1181 case E1000_DEV_ID_82546EB_FIBER:
1182 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001183 /* Wake events only supported on port A for dual fiber
1184 * regardless of eeprom setting */
Joe Perches1dc32912008-07-11 15:17:08 -07001185 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001186 adapter->eeprom_wol = 0;
1187 break;
1188 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1189 /* if quad port adapter, disable WoL on all but port A */
1190 if (global_quad_port_a != 0)
1191 adapter->eeprom_wol = 0;
1192 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001193 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001194 /* Reset for multiple quad port adapters */
1195 if (++global_quad_port_a == 4)
1196 global_quad_port_a = 0;
1197 break;
1198 }
1199
1200 /* initialize the wol settings based on the eeprom settings */
1201 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001202 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Dirk Brandewie5377a412011-01-06 14:29:54 +00001204 /* Auto detect PHY address */
1205 if (hw->mac_type == e1000_ce4100) {
1206 for (i = 0; i < 32; i++) {
1207 hw->phy_addr = i;
1208 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1209 if (tmp == 0 || tmp == 0xFF) {
1210 if (i == 31)
1211 goto err_eeprom;
1212 continue;
1213 } else
1214 break;
1215 }
1216 }
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 /* reset the hardware with the new settings */
1219 e1000_reset(adapter);
1220
Auke Kok416b5d12007-06-01 10:22:39 -07001221 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001222 err = register_netdev(netdev);
1223 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001224 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001225
Jiri Pirko52f55092012-03-20 18:10:01 +00001226 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001227
Emil Tantilov675ad472010-04-27 14:02:58 +00001228 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001229 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001230 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1231 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1232 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1233 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1234 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1235 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1236 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001237
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001238 /* carrier off reporting is important to ethtool even BEFORE open */
1239 netif_carrier_off(netdev);
1240
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001241 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 cards_found++;
1244 return 0;
1245
1246err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001247err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001248 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001249
Joe Perches1dc32912008-07-11 15:17:08 -07001250 if (hw->flash_address)
1251 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001252 kfree(adapter->tx_ring);
1253 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001254err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001256err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001257 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001258 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259err_ioremap:
1260 free_netdev(netdev);
1261err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001262 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001263err_pci_reg:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001264 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 return err;
1266}
1267
1268/**
1269 * e1000_remove - Device Removal Routine
1270 * @pdev: PCI device information struct
1271 *
1272 * e1000_remove is called by the PCI subsystem to alert the driver
1273 * that it should release a PCI device. The could be caused by a
1274 * Hot-Plug event, or because the driver is going to be removed from
1275 * memory.
1276 **/
1277
Joe Perches64798842008-07-11 15:17:02 -07001278static void __devexit e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
1280 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001281 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001282 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001284 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001285 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001287 unregister_netdev(netdev);
1288
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001289 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001291 kfree(adapter->tx_ring);
1292 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001293
Florian Fainelli1c267502012-01-04 20:23:34 +00001294 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001295 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001296 iounmap(hw->hw_addr);
1297 if (hw->flash_address)
1298 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001299 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 free_netdev(netdev);
1302
1303 pci_disable_device(pdev);
1304}
1305
1306/**
1307 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1308 * @adapter: board private structure to initialize
1309 *
1310 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001311 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 **/
1313
Joe Perches64798842008-07-11 15:17:02 -07001314static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315{
Auke Kokeb0f8052006-07-14 16:14:48 -07001316 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001318 adapter->num_tx_queues = 1;
1319 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001320
1321 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001322 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001323 return -ENOMEM;
1324 }
1325
Herbert Xu47313052007-05-29 15:07:31 -07001326 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001327 e1000_irq_disable(adapter);
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 spin_lock_init(&adapter->stats_lock);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00001330 mutex_init(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Auke Kok1314bbf2006-09-27 12:54:02 -07001332 set_bit(__E1000_DOWN, &adapter->flags);
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return 0;
1335}
1336
1337/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001338 * e1000_alloc_queues - Allocate memory for all rings
1339 * @adapter: board private structure to initialize
1340 *
1341 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001342 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001343 **/
1344
Joe Perches64798842008-07-11 15:17:02 -07001345static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001346{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001347 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1348 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001349 if (!adapter->tx_ring)
1350 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001351
Yan Burman1c7e5b12007-03-06 08:58:04 -08001352 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1353 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001354 if (!adapter->rx_ring) {
1355 kfree(adapter->tx_ring);
1356 return -ENOMEM;
1357 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001358
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001359 return E1000_SUCCESS;
1360}
1361
1362/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 * e1000_open - Called when a network interface is made active
1364 * @netdev: network interface device structure
1365 *
1366 * Returns 0 on success, negative value on failure
1367 *
1368 * The open entry point is called when a network interface is made
1369 * active by the system (IFF_UP). At this point all resources needed
1370 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001371 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 * and the stack is notified that the interface is ready.
1373 **/
1374
Joe Perches64798842008-07-11 15:17:02 -07001375static int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001377 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001378 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 int err;
1380
Auke Kok2db10a02006-06-27 09:06:28 -07001381 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001382 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001383 return -EBUSY;
1384
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001385 netif_carrier_off(netdev);
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001388 err = e1000_setup_all_tx_resources(adapter);
1389 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 goto err_setup_tx;
1391
1392 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001393 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001394 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001395 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001396
Auke Kok79f05bf2006-06-27 09:06:32 -07001397 e1000_power_up_phy(adapter);
1398
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001399 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001400 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001401 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1402 e1000_update_mng_vlan(adapter);
1403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Auke Koke0aac5a2007-03-06 08:57:21 -08001405 /* before we allocate an interrupt, we must be ready to handle it.
1406 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1407 * as soon as we call pci_request_irq, so we have to setup our
1408 * clean_rx handler before we do so. */
1409 e1000_configure(adapter);
1410
1411 err = e1000_request_irq(adapter);
1412 if (err)
1413 goto err_req_irq;
1414
1415 /* From here on the code is the same as e1000_up() */
1416 clear_bit(__E1000_DOWN, &adapter->flags);
1417
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001418 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001419
Auke Koke0aac5a2007-03-06 08:57:21 -08001420 e1000_irq_enable(adapter);
1421
Ben Hutchings076152d2008-07-18 17:50:57 -07001422 netif_start_queue(netdev);
1423
Auke Koke0aac5a2007-03-06 08:57:21 -08001424 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001425 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 return E1000_SUCCESS;
1428
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001429err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001430 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001431 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001433 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434err_setup_tx:
1435 e1000_reset(adapter);
1436
1437 return err;
1438}
1439
1440/**
1441 * e1000_close - Disables a network interface
1442 * @netdev: network interface device structure
1443 *
1444 * Returns 0, this is not allowed to fail
1445 *
1446 * The close entry point is called when an interface is de-activated
1447 * by the OS. The hardware is still under the drivers control, but
1448 * needs to be disabled. A global MAC reset is issued to stop the
1449 * hardware, and all transmit and receive resources are freed.
1450 **/
1451
Joe Perches64798842008-07-11 15:17:02 -07001452static int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001454 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001455 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Auke Kok2db10a02006-06-27 09:06:28 -07001457 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001459 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001460 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001462 e1000_free_all_tx_resources(adapter);
1463 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
Bruce Allan46665602006-09-27 12:54:08 -07001465 /* kill manageability vlan ID if supported, but not if a vlan with
1466 * the same ID is registered on the host OS (let 8021q kill it) */
Joe Perches1dc32912008-07-11 15:17:08 -07001467 if ((hw->mng_cookie.status &
Bruce Allan46665602006-09-27 12:54:08 -07001468 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
Jiri Pirko5622e402011-07-21 03:26:31 +00001469 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001470 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1471 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return 0;
1474}
1475
1476/**
1477 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1478 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001479 * @start: address of beginning of memory
1480 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 **/
Joe Perches64798842008-07-11 15:17:02 -07001482static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Joe Perches1dc32912008-07-11 15:17:08 -07001485 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001486 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 unsigned long end = begin + len;
1488
Malli Chilakala26483452005-04-28 19:44:46 -07001489 /* First rev 82545 and 82546 need to not allow any memory
1490 * write location to cross 64k boundary due to errata 23 */
Joe Perches1dc32912008-07-11 15:17:08 -07001491 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001492 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001493 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001494 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
Joe Perchesc3033b02008-03-21 11:06:25 -07001497 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
1500/**
1501 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1502 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001503 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 *
1505 * Return 0 on success, negative on failure
1506 **/
1507
Joe Perches64798842008-07-11 15:17:02 -07001508static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1509 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 struct pci_dev *pdev = adapter->pdev;
1512 int size;
1513
1514 size = sizeof(struct e1000_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001515 txdr->buffer_info = vzalloc(size);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001516 if (!txdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001517 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1518 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 return -ENOMEM;
1520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 /* round up to nearest 4K */
1523
1524 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001525 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001527 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1528 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001529 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 vfree(txdr->buffer_info);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001532 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1533 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return -ENOMEM;
1535 }
1536
Malli Chilakala26483452005-04-28 19:44:46 -07001537 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539 void *olddesc = txdr->desc;
1540 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001541 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001542 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001543 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001544 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1545 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001546 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001547 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001548 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1549 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 goto setup_tx_desc_die;
1551 }
1552
1553 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1554 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001555 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1556 txdr->dma);
1557 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1558 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001559 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001560 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 vfree(txdr->buffer_info);
1562 return -ENOMEM;
1563 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001564 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001565 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1566 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 }
1568 }
1569 memset(txdr->desc, 0, txdr->size);
1570
1571 txdr->next_to_use = 0;
1572 txdr->next_to_clean = 0;
1573
1574 return 0;
1575}
1576
1577/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001578 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1579 * (Descriptors) for all queues
1580 * @adapter: board private structure
1581 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001582 * Return 0 on success, negative on failure
1583 **/
1584
Joe Perches64798842008-07-11 15:17:02 -07001585int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001586{
1587 int i, err = 0;
1588
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001589 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001590 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1591 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001592 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001593 for (i-- ; i >= 0; i--)
1594 e1000_free_tx_resources(adapter,
1595 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001596 break;
1597 }
1598 }
1599
1600 return err;
1601}
1602
1603/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1605 * @adapter: board private structure
1606 *
1607 * Configure the Tx unit of the MAC after a reset.
1608 **/
1609
Joe Perches64798842008-07-11 15:17:02 -07001610static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
Joe Perches406874a2008-04-03 10:06:32 -07001612 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001613 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001614 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001615 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 /* Setup the HW Tx Head and Tail descriptor pointers */
1618
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001619 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001620 case 1:
1621 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001622 tdba = adapter->tx_ring[0].dma;
1623 tdlen = adapter->tx_ring[0].count *
1624 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001625 ew32(TDLEN, tdlen);
1626 ew32(TDBAH, (tdba >> 32));
1627 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1628 ew32(TDT, 0);
1629 ew32(TDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001630 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1631 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001632 break;
1633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001636 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001637 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001638 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1639 else
1640 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1641
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001642 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 case e1000_82542_rev2_0:
1644 case e1000_82542_rev2_1:
1645 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001646 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1647 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 break;
1649 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001650 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1651 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1652 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001654 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1655 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001656 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658 /* Set the Tx Interrupt Delay register */
1659
Joe Perches1dc32912008-07-11 15:17:08 -07001660 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001661 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001662 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
1664 /* Program the Transmit Control Register */
1665
Joe Perches1dc32912008-07-11 15:17:08 -07001666 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001668 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1670
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001671 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
1673 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001674 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1675
1676 /* only set IDE if we are delaying interrupts using the timers */
1677 if (adapter->tx_int_delay)
1678 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001680 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1682 else
1683 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1684
1685 /* Cache if we're 82544 running in PCI-X because we'll
1686 * need this to apply a workaround later in the send path. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001687 if (hw->mac_type == e1000_82544 &&
1688 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001689 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001690
Joe Perches1dc32912008-07-11 15:17:08 -07001691 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693}
1694
1695/**
1696 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1697 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001698 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 *
1700 * Returns 0 on success, negative on failure
1701 **/
1702
Joe Perches64798842008-07-11 15:17:02 -07001703static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1704 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001707 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 size = sizeof(struct e1000_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001710 rxdr->buffer_info = vzalloc(size);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001711 if (!rxdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001712 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1713 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return -ENOMEM;
1715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001717 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 /* Round up to nearest 4K */
1720
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001721 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001722 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001724 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1725 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001727 if (!rxdr->desc) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001728 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1729 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 vfree(rxdr->buffer_info);
1732 return -ENOMEM;
1733 }
1734
Malli Chilakala26483452005-04-28 19:44:46 -07001735 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1737 void *olddesc = rxdr->desc;
1738 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001739 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001740 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001741 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001742 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1743 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001744 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001745 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001746 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1747 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001748 e_err(probe, "Unable to allocate memory for the Rx "
1749 "descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 goto setup_rx_desc_die;
1751 }
1752
1753 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1754 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001755 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1756 rxdr->dma);
1757 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1758 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001759 e_err(probe, "Unable to allocate aligned memory for "
1760 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001761 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001763 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001764 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1765 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
1767 }
1768 memset(rxdr->desc, 0, rxdr->size);
1769
1770 rxdr->next_to_clean = 0;
1771 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001772 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 return 0;
1775}
1776
1777/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001778 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1779 * (Descriptors) for all queues
1780 * @adapter: board private structure
1781 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001782 * Return 0 on success, negative on failure
1783 **/
1784
Joe Perches64798842008-07-11 15:17:02 -07001785int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001786{
1787 int i, err = 0;
1788
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001789 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001790 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1791 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001792 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001793 for (i-- ; i >= 0; i--)
1794 e1000_free_rx_resources(adapter,
1795 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001796 break;
1797 }
1798 }
1799
1800 return err;
1801}
1802
1803/**
Malli Chilakala26483452005-04-28 19:44:46 -07001804 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 * @adapter: Board private structure
1806 **/
Joe Perches64798842008-07-11 15:17:02 -07001807static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808{
Joe Perches1dc32912008-07-11 15:17:08 -07001809 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001810 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
Joe Perches1dc32912008-07-11 15:17:08 -07001812 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1815
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001816 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1817 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001818 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Joe Perches1dc32912008-07-11 15:17:08 -07001820 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 rctl |= E1000_RCTL_SBP;
1822 else
1823 rctl &= ~E1000_RCTL_SBP;
1824
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001825 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1826 rctl &= ~E1000_RCTL_LPE;
1827 else
1828 rctl |= E1000_RCTL_LPE;
1829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001831 rctl &= ~E1000_RCTL_SZ_4096;
1832 rctl |= E1000_RCTL_BSEX;
1833 switch (adapter->rx_buffer_len) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08001834 case E1000_RXBUFFER_2048:
1835 default:
1836 rctl |= E1000_RCTL_SZ_2048;
1837 rctl &= ~E1000_RCTL_BSEX;
1838 break;
1839 case E1000_RXBUFFER_4096:
1840 rctl |= E1000_RCTL_SZ_4096;
1841 break;
1842 case E1000_RXBUFFER_8192:
1843 rctl |= E1000_RCTL_SZ_8192;
1844 break;
1845 case E1000_RXBUFFER_16384:
1846 rctl |= E1000_RCTL_SZ_16384;
1847 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001848 }
1849
Ben Greeare825b732012-04-04 06:01:29 +00001850 /* This is useful for sniffing bad packets. */
1851 if (adapter->netdev->features & NETIF_F_RXALL) {
1852 /* UPE and MPE will be handled by normal PROMISC logic
1853 * in e1000e_set_rx_mode */
1854 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1855 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1856 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1857
1858 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1859 E1000_RCTL_DPF | /* Allow filtered pause */
1860 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1861 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1862 * and that breaks VLANs.
1863 */
1864 }
1865
Joe Perches1dc32912008-07-11 15:17:08 -07001866 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867}
1868
1869/**
1870 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1871 * @adapter: board private structure
1872 *
1873 * Configure the Rx unit of the MAC after a reset.
1874 **/
1875
Joe Perches64798842008-07-11 15:17:02 -07001876static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877{
Joe Perches406874a2008-04-03 10:06:32 -07001878 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001879 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001880 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001881
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001882 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1883 rdlen = adapter->rx_ring[0].count *
1884 sizeof(struct e1000_rx_desc);
1885 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1886 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1887 } else {
1888 rdlen = adapter->rx_ring[0].count *
1889 sizeof(struct e1000_rx_desc);
1890 adapter->clean_rx = e1000_clean_rx_irq;
1891 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
1894 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001895 rctl = er32(RCTL);
1896 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
1898 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001899 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001901 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001902 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001903 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001904 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001907 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1908 * the Base and Length of the Rx Descriptor Ring */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001909 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001910 case 1:
1911 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001912 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001913 ew32(RDLEN, rdlen);
1914 ew32(RDBAH, (rdba >> 32));
1915 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1916 ew32(RDT, 0);
1917 ew32(RDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001918 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1919 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001920 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001921 }
1922
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001924 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001925 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001926 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001927 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001928 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001929 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001930 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001931 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 }
1933
1934 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001935 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936}
1937
1938/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001939 * e1000_free_tx_resources - Free Tx Resources per Queue
1940 * @adapter: board private structure
1941 * @tx_ring: Tx descriptor ring for a specific queue
1942 *
1943 * Free all transmit software resources
1944 **/
1945
Joe Perches64798842008-07-11 15:17:02 -07001946static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1947 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001948{
1949 struct pci_dev *pdev = adapter->pdev;
1950
1951 e1000_clean_tx_ring(adapter, tx_ring);
1952
1953 vfree(tx_ring->buffer_info);
1954 tx_ring->buffer_info = NULL;
1955
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001956 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1957 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001958
1959 tx_ring->desc = NULL;
1960}
1961
1962/**
1963 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 * @adapter: board private structure
1965 *
1966 * Free all transmit software resources
1967 **/
1968
Joe Perches64798842008-07-11 15:17:02 -07001969void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001971 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001973 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001974 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
Joe Perches64798842008-07-11 15:17:02 -07001977static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1978 struct e1000_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
Alexander Duyck602c0552009-12-02 16:46:00 +00001980 if (buffer_info->dma) {
1981 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001982 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1983 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001984 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001985 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001986 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001987 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001988 buffer_info->dma = 0;
1989 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001990 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001992 buffer_info->skb = NULL;
1993 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001994 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001995 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}
1997
1998/**
1999 * e1000_clean_tx_ring - Free Tx Buffers
2000 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002001 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 **/
2003
Joe Perches64798842008-07-11 15:17:02 -07002004static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2005 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
Joe Perches1dc32912008-07-11 15:17:08 -07002007 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 struct e1000_buffer *buffer_info;
2009 unsigned long size;
2010 unsigned int i;
2011
2012 /* Free all the Tx ring sk_buffs */
2013
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002014 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 buffer_info = &tx_ring->buffer_info[i];
2016 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2017 }
2018
2019 size = sizeof(struct e1000_buffer) * tx_ring->count;
2020 memset(tx_ring->buffer_info, 0, size);
2021
2022 /* Zero out the descriptor ring */
2023
2024 memset(tx_ring->desc, 0, tx_ring->size);
2025
2026 tx_ring->next_to_use = 0;
2027 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002028 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Joe Perches1dc32912008-07-11 15:17:08 -07002030 writel(0, hw->hw_addr + tx_ring->tdh);
2031 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002032}
2033
2034/**
2035 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2036 * @adapter: board private structure
2037 **/
2038
Joe Perches64798842008-07-11 15:17:02 -07002039static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002040{
2041 int i;
2042
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002043 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002044 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
2047/**
2048 * e1000_free_rx_resources - Free Rx Resources
2049 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002050 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 *
2052 * Free all receive software resources
2053 **/
2054
Joe Perches64798842008-07-11 15:17:02 -07002055static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2056 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 struct pci_dev *pdev = adapter->pdev;
2059
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002060 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 vfree(rx_ring->buffer_info);
2063 rx_ring->buffer_info = NULL;
2064
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002065 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2066 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 rx_ring->desc = NULL;
2069}
2070
2071/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002072 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002074 *
2075 * Free all receive software resources
2076 **/
2077
Joe Perches64798842008-07-11 15:17:02 -07002078void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002079{
2080 int i;
2081
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002082 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002083 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2084}
2085
2086/**
2087 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2088 * @adapter: board private structure
2089 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 **/
2091
Joe Perches64798842008-07-11 15:17:02 -07002092static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2093 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Joe Perches1dc32912008-07-11 15:17:08 -07002095 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 struct e1000_buffer *buffer_info;
2097 struct pci_dev *pdev = adapter->pdev;
2098 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002099 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 /* Free all the Rx ring sk_buffs */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002102 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 buffer_info = &rx_ring->buffer_info[i];
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002104 if (buffer_info->dma &&
2105 adapter->clean_rx == e1000_clean_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002106 dma_unmap_single(&pdev->dev, buffer_info->dma,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002107 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002108 DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002109 } else if (buffer_info->dma &&
2110 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002111 dma_unmap_page(&pdev->dev, buffer_info->dma,
2112 buffer_info->length,
2113 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002116 buffer_info->dma = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002117 if (buffer_info->page) {
2118 put_page(buffer_info->page);
2119 buffer_info->page = NULL;
2120 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002121 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 dev_kfree_skb(buffer_info->skb);
2123 buffer_info->skb = NULL;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08002124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 }
2126
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002127 /* there also may be some cached data from a chained receive */
2128 if (rx_ring->rx_skb_top) {
2129 dev_kfree_skb(rx_ring->rx_skb_top);
2130 rx_ring->rx_skb_top = NULL;
2131 }
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 size = sizeof(struct e1000_buffer) * rx_ring->count;
2134 memset(rx_ring->buffer_info, 0, size);
2135
2136 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 memset(rx_ring->desc, 0, rx_ring->size);
2138
2139 rx_ring->next_to_clean = 0;
2140 rx_ring->next_to_use = 0;
2141
Joe Perches1dc32912008-07-11 15:17:08 -07002142 writel(0, hw->hw_addr + rx_ring->rdh);
2143 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002144}
2145
2146/**
2147 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2148 * @adapter: board private structure
2149 **/
2150
Joe Perches64798842008-07-11 15:17:02 -07002151static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002152{
2153 int i;
2154
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002155 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002156 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157}
2158
2159/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2160 * and memory write and invalidate disabled for certain operations
2161 */
Joe Perches64798842008-07-11 15:17:02 -07002162static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163{
Joe Perches1dc32912008-07-11 15:17:08 -07002164 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002166 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Joe Perches1dc32912008-07-11 15:17:08 -07002168 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
Joe Perches1dc32912008-07-11 15:17:08 -07002170 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002172 ew32(RCTL, rctl);
2173 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 mdelay(5);
2175
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002176 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002177 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178}
2179
Joe Perches64798842008-07-11 15:17:02 -07002180static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181{
Joe Perches1dc32912008-07-11 15:17:08 -07002182 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002184 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Joe Perches1dc32912008-07-11 15:17:08 -07002186 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002188 ew32(RCTL, rctl);
2189 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 mdelay(5);
2191
Joe Perches1dc32912008-07-11 15:17:08 -07002192 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2193 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002195 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002196 /* No need to loop, because 82542 supports only 1 queue */
2197 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002198 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002199 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 }
2201}
2202
2203/**
2204 * e1000_set_mac - Change the Ethernet Address of the NIC
2205 * @netdev: network interface device structure
2206 * @p: pointer to an address structure
2207 *
2208 * Returns 0 on success, negative on failure
2209 **/
2210
Joe Perches64798842008-07-11 15:17:02 -07002211static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002213 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002214 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 struct sockaddr *addr = p;
2216
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002217 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 return -EADDRNOTAVAIL;
2219
2220 /* 82542 2.0 needs to be in reset to write receive address registers */
2221
Joe Perches1dc32912008-07-11 15:17:08 -07002222 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 e1000_enter_82542_rst(adapter);
2224
2225 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002226 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Joe Perches1dc32912008-07-11 15:17:08 -07002228 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Joe Perches1dc32912008-07-11 15:17:08 -07002230 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 e1000_leave_82542_rst(adapter);
2232
2233 return 0;
2234}
2235
2236/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002237 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 * @netdev: network interface device structure
2239 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002240 * The set_rx_mode entry point is called whenever the unicast or multicast
2241 * address lists or the network interface flags are updated. This routine is
2242 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 * promiscuous mode, and all-multi behavior.
2244 **/
2245
Joe Perches64798842008-07-11 15:17:02 -07002246static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002248 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002250 struct netdev_hw_addr *ha;
2251 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002252 u32 rctl;
2253 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002254 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002255 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002256 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2257
2258 if (!mcarray) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002259 e_err(probe, "memory allocation failed\n");
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002260 return;
2261 }
Auke Kokcd94dd02006-06-27 09:08:22 -07002262
Malli Chilakala26483452005-04-28 19:44:46 -07002263 /* Check for Promiscuous and All Multicast modes */
2264
Joe Perches1dc32912008-07-11 15:17:08 -07002265 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002267 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002269 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002271 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002272 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002273 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002274 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002275 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002276 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002277 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002278 }
2279
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002280 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002281 rctl |= E1000_RCTL_UPE;
2282 } else if (!(netdev->flags & IFF_PROMISC)) {
2283 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002284 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 }
2286
Joe Perches1dc32912008-07-11 15:17:08 -07002287 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 /* 82542 2.0 needs to be in reset to write receive address registers */
2290
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002291 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 e1000_enter_82542_rst(adapter);
2293
Patrick McHardydb0ce502007-11-13 20:54:59 -08002294 /* load the first 14 addresses into the exact filters 1-14. Unicast
2295 * addresses take precedence to avoid disabling unicast filtering
2296 * when possible.
2297 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002298 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 * if there are not 14 addresses, go ahead and clear the filters
2300 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002301 i = 1;
2302 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002303 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002304 if (i == rar_entries)
2305 break;
2306 e1000_rar_set(hw, ha->addr, i++);
2307 }
2308
Jiri Pirko22bedad32010-04-01 21:22:57 +00002309 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002310 if (i == rar_entries) {
2311 /* load any remaining addresses into the hash table */
2312 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002313 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002314 hash_reg = (hash_value >> 5) & 0x7F;
2315 hash_bit = hash_value & 0x1F;
2316 mta = (1 << hash_bit);
2317 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002318 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002319 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 }
2321 }
2322
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002323 for (; i < rar_entries; i++) {
2324 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2325 E1000_WRITE_FLUSH();
2326 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2327 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 }
2329
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002330 /* write the hash table completely, write from bottom to avoid
2331 * both stupid write combining chipsets, and flushing each write */
2332 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2333 /*
2334 * If we are on an 82544 has an errata where writing odd
2335 * offsets overwrites the previous even offset, but writing
2336 * backwards over the range solves the issue by always
2337 * writing the odd offset first
2338 */
2339 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2340 }
2341 E1000_WRITE_FLUSH();
2342
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002343 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002345
2346 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347}
2348
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002349/**
2350 * e1000_update_phy_info_task - get phy info
2351 * @work: work struct contained inside adapter struct
2352 *
2353 * Need to wait a few seconds after link up to get diagnostic information from
2354 * the phy
2355 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002356static void e1000_update_phy_info_task(struct work_struct *work)
2357{
2358 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002359 struct e1000_adapter,
2360 phy_info_task.work);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002361 if (test_bit(__E1000_DOWN, &adapter->flags))
2362 return;
2363 mutex_lock(&adapter->mutex);
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002364 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002365 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
2367
2368/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002369 * e1000_82547_tx_fifo_stall_task - task to complete work
2370 * @work: work struct contained inside adapter struct
2371 **/
2372static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2373{
2374 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002375 struct e1000_adapter,
2376 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002377 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002379 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002381 if (test_bit(__E1000_DOWN, &adapter->flags))
2382 return;
2383 mutex_lock(&adapter->mutex);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002384 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002385 if ((er32(TDT) == er32(TDH)) &&
2386 (er32(TDFT) == er32(TDFH)) &&
2387 (er32(TDFTS) == er32(TDFHS))) {
2388 tctl = er32(TCTL);
2389 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2390 ew32(TDFT, adapter->tx_head_addr);
2391 ew32(TDFH, adapter->tx_head_addr);
2392 ew32(TDFTS, adapter->tx_head_addr);
2393 ew32(TDFHS, adapter->tx_head_addr);
2394 ew32(TCTL, tctl);
2395 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
2397 adapter->tx_fifo_head = 0;
2398 atomic_set(&adapter->tx_fifo_stall, 0);
2399 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002400 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002401 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 }
2403 }
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002404 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405}
2406
Nick Nunleyb5481922010-02-03 14:49:28 +00002407bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002408{
2409 struct e1000_hw *hw = &adapter->hw;
2410 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002411
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002412 /* get_link_status is set on LSC (link status) interrupt or rx
2413 * sequence error interrupt (except on intel ce4100).
2414 * get_link_status will stay false until the
2415 * e1000_check_for_link establishes link for copper adapters
2416 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002417 */
2418 switch (hw->media_type) {
2419 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002420 if (hw->mac_type == e1000_ce4100)
2421 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002422 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002423 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002424 link_active = !hw->get_link_status;
2425 } else {
2426 link_active = true;
2427 }
2428 break;
2429 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002430 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002431 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2432 break;
2433 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002434 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002435 link_active = hw->serdes_has_link;
2436 break;
2437 default:
2438 break;
2439 }
2440
2441 return link_active;
2442}
2443
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002445 * e1000_watchdog - work function
2446 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002448static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002450 struct e1000_adapter *adapter = container_of(work,
2451 struct e1000_adapter,
2452 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002453 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002455 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002456 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002458 if (test_bit(__E1000_DOWN, &adapter->flags))
2459 return;
2460
2461 mutex_lock(&adapter->mutex);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002462 link = e1000_has_link(adapter);
2463 if ((netif_carrier_ok(netdev)) && link)
2464 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002466 if (link) {
2467 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002468 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002469 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002470 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002471 e1000_get_speed_and_duplex(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 &adapter->link_speed,
2473 &adapter->link_duplex);
2474
Joe Perches1dc32912008-07-11 15:17:08 -07002475 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002476 pr_info("%s NIC Link is Up %d Mbps %s, "
2477 "Flow Control: %s\n",
2478 netdev->name,
2479 adapter->link_speed,
2480 adapter->link_duplex == FULL_DUPLEX ?
2481 "Full Duplex" : "Half Duplex",
2482 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2483 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2484 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2485 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002487 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002488 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002489 switch (adapter->link_speed) {
2490 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002491 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002492 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002493 break;
2494 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002495 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002496 /* maybe add some timeout factor ? */
2497 break;
2498 }
2499
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002500 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002501 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002502 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002503 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002504
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002506 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002507 schedule_delayed_work(&adapter->phy_info_task,
2508 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 adapter->smartspeed = 0;
2510 }
2511 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002512 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 adapter->link_speed = 0;
2514 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002515 pr_info("%s NIC Link is Down\n",
2516 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002518
2519 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002520 schedule_delayed_work(&adapter->phy_info_task,
2521 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 }
2523
2524 e1000_smartspeed(adapter);
2525 }
2526
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002527link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 e1000_update_stats(adapter);
2529
Joe Perches1dc32912008-07-11 15:17:08 -07002530 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002532 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 adapter->colc_old = adapter->stats.colc;
2534
2535 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2536 adapter->gorcl_old = adapter->stats.gorcl;
2537 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2538 adapter->gotcl_old = adapter->stats.gotcl;
2539
Joe Perches1dc32912008-07-11 15:17:08 -07002540 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002542 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002543 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 /* We've lost link, so the controller stops DMA,
2545 * but we've got queued Tx work that's never going
2546 * to get done, so reset controller to flush Tx.
2547 * (Do the reset outside of interrupt context). */
Jeff Kirsher87041632006-03-02 18:21:24 -08002548 adapter->tx_timeout_count++;
2549 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002550 /* exit immediately since reset is imminent */
2551 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 }
2553 }
2554
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002555 /* Simple mode for Interrupt Throttle Rate (ITR) */
2556 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2557 /*
2558 * Symmetric Tx/Rx gets a reduced ITR=2000;
2559 * Total asymmetrical Tx or Rx gets ITR=8000;
2560 * everyone else is between 2000-8000.
2561 */
2562 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2563 u32 dif = (adapter->gotcl > adapter->gorcl ?
2564 adapter->gotcl - adapter->gorcl :
2565 adapter->gorcl - adapter->gotcl) / 10000;
2566 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2567
2568 ew32(ITR, 1000000000 / (itr * 256));
2569 }
2570
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002572 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573
Malli Chilakala26483452005-04-28 19:44:46 -07002574 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002575 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002577 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002578 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002579 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002580
2581unlock:
2582 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583}
2584
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002585enum latency_range {
2586 lowest_latency = 0,
2587 low_latency = 1,
2588 bulk_latency = 2,
2589 latency_invalid = 255
2590};
2591
2592/**
2593 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002594 * @adapter: pointer to adapter
2595 * @itr_setting: current adapter->itr
2596 * @packets: the number of packets during this measurement interval
2597 * @bytes: the number of bytes during this measurement interval
2598 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002599 * Stores a new ITR value based on packets and byte
2600 * counts during the last interrupt. The advantage of per interrupt
2601 * computation is faster updates and more accurate ITR for the current
2602 * traffic pattern. Constants in this function were computed
2603 * based on theoretical maximum wire speed and thresholds were set based
2604 * on testing data as well as attempting to minimize response time
2605 * while increasing bulk throughput.
2606 * this functionality is controlled by the InterruptThrottleRate module
2607 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002608 **/
2609static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002610 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002611{
2612 unsigned int retval = itr_setting;
2613 struct e1000_hw *hw = &adapter->hw;
2614
2615 if (unlikely(hw->mac_type < e1000_82540))
2616 goto update_itr_done;
2617
2618 if (packets == 0)
2619 goto update_itr_done;
2620
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002621 switch (itr_setting) {
2622 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002623 /* jumbo frames get bulk treatment*/
2624 if (bytes/packets > 8000)
2625 retval = bulk_latency;
2626 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002627 retval = low_latency;
2628 break;
2629 case low_latency: /* 50 usec aka 20000 ints/s */
2630 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002631 /* jumbo frames need bulk latency setting */
2632 if (bytes/packets > 8000)
2633 retval = bulk_latency;
2634 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002635 retval = bulk_latency;
2636 else if ((packets > 35))
2637 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002638 } else if (bytes/packets > 2000)
2639 retval = bulk_latency;
2640 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002641 retval = lowest_latency;
2642 break;
2643 case bulk_latency: /* 250 usec aka 4000 ints/s */
2644 if (bytes > 25000) {
2645 if (packets > 35)
2646 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002647 } else if (bytes < 6000) {
2648 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002649 }
2650 break;
2651 }
2652
2653update_itr_done:
2654 return retval;
2655}
2656
2657static void e1000_set_itr(struct e1000_adapter *adapter)
2658{
2659 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002660 u16 current_itr;
2661 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002662
2663 if (unlikely(hw->mac_type < e1000_82540))
2664 return;
2665
2666 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2667 if (unlikely(adapter->link_speed != SPEED_1000)) {
2668 current_itr = 0;
2669 new_itr = 4000;
2670 goto set_itr_now;
2671 }
2672
2673 adapter->tx_itr = e1000_update_itr(adapter,
2674 adapter->tx_itr,
2675 adapter->total_tx_packets,
2676 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002677 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2678 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2679 adapter->tx_itr = low_latency;
2680
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002681 adapter->rx_itr = e1000_update_itr(adapter,
2682 adapter->rx_itr,
2683 adapter->total_rx_packets,
2684 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002685 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2686 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2687 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002688
2689 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2690
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002691 switch (current_itr) {
2692 /* counts and packets in update_itr are dependent on these numbers */
2693 case lowest_latency:
2694 new_itr = 70000;
2695 break;
2696 case low_latency:
2697 new_itr = 20000; /* aka hwitr = ~200 */
2698 break;
2699 case bulk_latency:
2700 new_itr = 4000;
2701 break;
2702 default:
2703 break;
2704 }
2705
2706set_itr_now:
2707 if (new_itr != adapter->itr) {
2708 /* this attempts to bias the interrupt rate towards Bulk
2709 * by adding intermediate steps when interrupt rate is
2710 * increasing */
2711 new_itr = new_itr > adapter->itr ?
2712 min(adapter->itr + (new_itr >> 2), new_itr) :
2713 new_itr;
2714 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002715 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002716 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002717}
2718
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719#define E1000_TX_FLAGS_CSUM 0x00000001
2720#define E1000_TX_FLAGS_VLAN 0x00000002
2721#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002722#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002723#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2725#define E1000_TX_FLAGS_VLAN_SHIFT 16
2726
Joe Perches64798842008-07-11 15:17:02 -07002727static int e1000_tso(struct e1000_adapter *adapter,
2728 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002731 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002733 u32 cmd_length = 0;
2734 u16 ipcse = 0, tucse, mss;
2735 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 int err;
2737
Herbert Xu89114af2006-07-08 13:34:32 -07002738 if (skb_is_gso(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 if (skb_header_cloned(skb)) {
2740 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2741 if (err)
2742 return err;
2743 }
2744
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002745 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002746 mss = skb_shinfo(skb)->gso_size;
Alexey Dobriyan60828232006-05-23 14:52:21 -07002747 if (skb->protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002748 struct iphdr *iph = ip_hdr(skb);
2749 iph->tot_len = 0;
2750 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002751 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2752 iph->daddr, 0,
2753 IPPROTO_TCP,
2754 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002755 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002756 ipcse = skb_transport_offset(skb) - 1;
Auke Koke15fdd02006-08-16 11:28:45 -07002757 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002758 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002759 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002760 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2761 &ipv6_hdr(skb)->daddr,
2762 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002763 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002764 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002765 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002766 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002767 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002768 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 tucse = 0;
2770
2771 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002772 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002774 i = tx_ring->next_to_use;
2775 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002776 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
2778 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2779 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2780 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2781 context_desc->upper_setup.tcp_fields.tucss = tucss;
2782 context_desc->upper_setup.tcp_fields.tucso = tucso;
2783 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2784 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2785 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2786 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2787
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002788 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002789 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002790
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002791 if (++i == tx_ring->count) i = 0;
2792 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
Joe Perchesc3033b02008-03-21 11:06:25 -07002794 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002796 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797}
2798
Joe Perches64798842008-07-11 15:17:02 -07002799static bool e1000_tx_csum(struct e1000_adapter *adapter,
2800 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801{
2802 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002803 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002805 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002806 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
Dave Graham3ed30672008-10-09 14:29:26 -07002808 if (skb->ip_summed != CHECKSUM_PARTIAL)
2809 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Dave Graham3ed30672008-10-09 14:29:26 -07002811 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002812 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002813 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2814 cmd_len |= E1000_TXD_CMD_TCP;
2815 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002816 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002817 /* XXX not handling all IPV6 headers */
2818 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2819 cmd_len |= E1000_TXD_CMD_TCP;
2820 break;
2821 default:
2822 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002823 e_warn(drv, "checksum_partial proto=%x!\n",
2824 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002825 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 }
2827
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002828 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002829
2830 i = tx_ring->next_to_use;
2831 buffer_info = &tx_ring->buffer_info[i];
2832 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2833
2834 context_desc->lower_setup.ip_config = 0;
2835 context_desc->upper_setup.tcp_fields.tucss = css;
2836 context_desc->upper_setup.tcp_fields.tucso =
2837 css + skb->csum_offset;
2838 context_desc->upper_setup.tcp_fields.tucse = 0;
2839 context_desc->tcp_seg_setup.data = 0;
2840 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2841
2842 buffer_info->time_stamp = jiffies;
2843 buffer_info->next_to_watch = i;
2844
2845 if (unlikely(++i == tx_ring->count)) i = 0;
2846 tx_ring->next_to_use = i;
2847
2848 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849}
2850
2851#define E1000_MAX_TXD_PWR 12
2852#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2853
Joe Perches64798842008-07-11 15:17:02 -07002854static int e1000_tx_map(struct e1000_adapter *adapter,
2855 struct e1000_tx_ring *tx_ring,
2856 struct sk_buff *skb, unsigned int first,
2857 unsigned int max_per_txd, unsigned int nr_frags,
2858 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
Joe Perches1dc32912008-07-11 15:17:08 -07002860 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002861 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002862 struct e1000_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002863 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002864 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002865 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
2867 i = tx_ring->next_to_use;
2868
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002869 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002870 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002872 /* Workaround for Controller erratum --
2873 * descriptor for non-tso packet in a linear SKB that follows a
2874 * tso gets written back prematurely before the data is fully
Jeff Kirsher0f15a8f2006-03-02 18:46:29 -08002875 * DMA'd to the controller */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002876 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002877 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002878 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002879 size -= 4;
2880 }
2881
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 /* Workaround for premature desc write-backs
2883 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002884 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002886 /* work-around for errata 10 and it applies
2887 * to all controllers in PCI-X mode
2888 * The fix is to make sure that the first descriptor of a
2889 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2890 */
Joe Perches1dc32912008-07-11 15:17:08 -07002891 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07002892 (size > 2015) && count == 0))
2893 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002894
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 /* Workaround for potential 82544 hang in PCI-X. Avoid
2896 * terminating buffers within evenly-aligned dwords. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002897 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2899 size > 4))
2900 size -= 4;
2901
2902 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002903 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002905 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002906 buffer_info->dma = dma_map_single(&pdev->dev,
2907 skb->data + offset,
2908 size, DMA_TO_DEVICE);
2909 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002910 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002911 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913 len -= size;
2914 offset += size;
2915 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002916 if (len) {
2917 i++;
2918 if (unlikely(i == tx_ring->count))
2919 i = 0;
2920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 }
2922
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002923 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002924 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
2926 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002927 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002928 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002930 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002931 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002932 i++;
2933 if (unlikely(i == tx_ring->count))
2934 i = 0;
2935
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 buffer_info = &tx_ring->buffer_info[i];
2937 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 /* Workaround for premature desc write-backs
2939 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002940 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 /* Workaround for potential 82544 hang in PCI-X.
2943 * Avoid terminating buffers within evenly-aligned
2944 * dwords. */
Ian Campbell877749b2011-08-29 23:18:26 +00002945 bufend = (unsigned long)
2946 page_to_phys(skb_frag_page(frag));
2947 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002948 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002949 !(bufend & 4) &&
2950 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 size -= 4;
2952
2953 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002955 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002956 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2957 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002958 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002959 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002960 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
2962 len -= size;
2963 offset += size;
2964 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 }
2966 }
2967
Dean Nelson31c15a22011-08-25 14:39:24 +00002968 segs = skb_shinfo(skb)->gso_segs ?: 1;
2969 /* multiply data chunks by size of headers */
2970 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002973 tx_ring->buffer_info[i].segs = segs;
2974 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 tx_ring->buffer_info[first].next_to_watch = i;
2976
2977 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002978
2979dma_error:
2980 dev_err(&pdev->dev, "TX DMA map failed\n");
2981 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002982 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002983 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002984
2985 while (count--) {
2986 if (i==0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002987 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002988 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002989 buffer_info = &tx_ring->buffer_info[i];
2990 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2991 }
2992
2993 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994}
2995
Joe Perches64798842008-07-11 15:17:02 -07002996static void e1000_tx_queue(struct e1000_adapter *adapter,
2997 struct e1000_tx_ring *tx_ring, int tx_flags,
2998 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999{
Joe Perches1dc32912008-07-11 15:17:08 -07003000 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 struct e1000_tx_desc *tx_desc = NULL;
3002 struct e1000_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07003003 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 unsigned int i;
3005
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003006 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3008 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003009 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3010
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003011 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003012 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 }
3014
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003015 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3017 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3018 }
3019
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003020 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 txd_lower |= E1000_TXD_CMD_VLE;
3022 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3023 }
3024
Ben Greear11a78dc2012-02-11 15:40:01 +00003025 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3026 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 i = tx_ring->next_to_use;
3029
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003030 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 buffer_info = &tx_ring->buffer_info[i];
3032 tx_desc = E1000_TX_DESC(*tx_ring, i);
3033 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3034 tx_desc->lower.data =
3035 cpu_to_le32(txd_lower | buffer_info->length);
3036 tx_desc->upper.data = cpu_to_le32(txd_upper);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003037 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 }
3039
3040 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3041
Ben Greear11a78dc2012-02-11 15:40:01 +00003042 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3043 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3044 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3045
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 /* Force memory writes to complete before letting h/w
3047 * know there are new descriptors to fetch. (Only
3048 * applicable for weak-ordered memory model archs,
3049 * such as IA-64). */
3050 wmb();
3051
3052 tx_ring->next_to_use = i;
Joe Perches1dc32912008-07-11 15:17:08 -07003053 writel(i, hw->hw_addr + tx_ring->tdt);
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003054 /* we need this if more than one processor can write to our tail
3055 * at a time, it syncronizes IO on IA64/Altix systems */
3056 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057}
3058
3059/**
3060 * 82547 workaround to avoid controller hang in half-duplex environment.
3061 * The workaround is to avoid queuing a large packet that would span
3062 * the internal Tx FIFO ring boundary by notifying the stack to resend
3063 * the packet at a later time. This gives the Tx FIFO an opportunity to
3064 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3065 * to the beginning of the Tx FIFO.
3066 **/
3067
3068#define E1000_FIFO_HDR 0x10
3069#define E1000_82547_PAD_LEN 0x3E0
3070
Joe Perches64798842008-07-11 15:17:02 -07003071static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3072 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073{
Joe Perches406874a2008-04-03 10:06:32 -07003074 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3075 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003077 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003079 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 goto no_fifo_stall_required;
3081
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003082 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 return 1;
3084
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003085 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 atomic_set(&adapter->tx_fifo_stall, 1);
3087 return 1;
3088 }
3089
3090no_fifo_stall_required:
3091 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003092 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3094 return 0;
3095}
3096
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003097static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3098{
3099 struct e1000_adapter *adapter = netdev_priv(netdev);
3100 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3101
3102 netif_stop_queue(netdev);
3103 /* Herbert's original patch had:
3104 * smp_mb__after_netif_stop_queue();
3105 * but since that doesn't exist yet, just open code it. */
3106 smp_mb();
3107
3108 /* We need to check again in a case another CPU has just
3109 * made room available. */
3110 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3111 return -EBUSY;
3112
3113 /* A reprieve! */
3114 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003115 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003116 return 0;
3117}
3118
3119static int e1000_maybe_stop_tx(struct net_device *netdev,
3120 struct e1000_tx_ring *tx_ring, int size)
3121{
3122 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3123 return 0;
3124 return __e1000_maybe_stop_tx(netdev, size);
3125}
3126
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003128static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3129 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003131 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003132 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003133 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3135 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3136 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003137 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003138 unsigned int nr_frags;
3139 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003141 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 unsigned int f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003144 /* This goes back to the question of how to logically map a tx queue
3145 * to a flow. Right now, performance is impacted slightly negatively
3146 * if using multiple tx queues. If the stack breaks away from a
3147 * single qdisc implementation, we can look at this again. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003148 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003149
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003150 if (unlikely(skb->len <= 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 dev_kfree_skb_any(skb);
3152 return NETDEV_TX_OK;
3153 }
3154
Herbert Xu79671682006-06-22 02:40:14 -07003155 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003156 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 * make sure there is enough room in the FIFO before
3158 * initiating the DMA for each buffer. The calc is:
3159 * 4 = ceil(buffer len/mss). To make sure we don't
3160 * overrun the FIFO, adjust the max buffer len if mss
3161 * drops. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003162 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003163 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 max_per_txd = min(mss << 2, max_per_txd);
3165 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003166
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003167 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003168 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003169 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003170 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003171 case e1000_82544:
3172 /* Make sure we have room to chop off 4 bytes,
3173 * and that the end alignment will work out to
3174 * this hardware's requirements
3175 * NOTE: this is a TSO only workaround
3176 * if end byte alignment not correct move us
3177 * into the next dword */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07003178 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003179 break;
3180 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003181 pull_size = min((unsigned int)4, skb->data_len);
3182 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003183 e_err(drv, "__pskb_pull_tail "
3184 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003185 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003186 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003187 }
Eric Dumazete743d312010-04-14 15:59:40 -07003188 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003189 break;
3190 default:
3191 /* do nothing */
3192 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003193 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 }
3196
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003197 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003198 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003200 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003201
Jeff Kirsherfd803242005-12-13 00:06:22 -05003202 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003203 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003204 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003205
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 count += TXD_USE_COUNT(len, max_txd_pwr);
3207
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003208 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 count++;
3210
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003211 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003212 * in PCI-X mode, so add one more descriptor to the count
3213 */
Joe Perches1dc32912008-07-11 15:17:08 -07003214 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003215 (len > 2015)))
3216 count++;
3217
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003219 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003220 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003222 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 count += nr_frags;
3224
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 /* need: count + 2 desc gap to keep tail from touching
3226 * head, otherwise try next time */
Alexander Duyck80179432009-01-21 14:42:47 -08003227 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003230 if (unlikely((hw->mac_type == e1000_82547) &&
3231 (e1000_82547_fifo_workaround(adapter, skb)))) {
3232 netif_stop_queue(netdev);
3233 if (!test_bit(__E1000_DOWN, &adapter->flags))
3234 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3235 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 }
3237
Jiri Pirko5622e402011-07-21 03:26:31 +00003238 if (vlan_tx_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 tx_flags |= E1000_TX_FLAGS_VLAN;
3240 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3241 }
3242
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003243 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003244
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003245 tso = e1000_tso(adapter, tx_ring, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 if (tso < 0) {
3247 dev_kfree_skb_any(skb);
3248 return NETDEV_TX_OK;
3249 }
3250
Jeff Kirsherfd803242005-12-13 00:06:22 -05003251 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003252 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003253 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 tx_flags |= E1000_TX_FLAGS_TSO;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003255 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 tx_flags |= E1000_TX_FLAGS_CSUM;
3257
Alexey Dobriyan60828232006-05-23 14:52:21 -07003258 if (likely(skb->protocol == htons(ETH_P_IP)))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003259 tx_flags |= E1000_TX_FLAGS_IPV4;
3260
Ben Greear11a78dc2012-02-11 15:40:01 +00003261 if (unlikely(skb->no_fcs))
3262 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3263
Alexander Duyck37e73df2009-03-25 21:58:45 +00003264 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3265 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
Alexander Duyck37e73df2009-03-25 21:58:45 +00003267 if (count) {
Willem de Bruijneab467f2012-04-27 09:04:04 +00003268 skb_tx_timestamp(skb);
3269
Alexander Duyck37e73df2009-03-25 21:58:45 +00003270 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duyck37e73df2009-03-25 21:58:45 +00003271 /* Make sure there is space in the ring for the next send. */
3272 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
Alexander Duyck37e73df2009-03-25 21:58:45 +00003274 } else {
3275 dev_kfree_skb_any(skb);
3276 tx_ring->buffer_info[first].time_stamp = 0;
3277 tx_ring->next_to_use = first;
3278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 return NETDEV_TX_OK;
3281}
3282
Tushar Daveb04e36b2012-01-27 09:00:46 +00003283#define NUM_REGS 38 /* 1 based count */
3284static void e1000_regdump(struct e1000_adapter *adapter)
3285{
3286 struct e1000_hw *hw = &adapter->hw;
3287 u32 regs[NUM_REGS];
3288 u32 *regs_buff = regs;
3289 int i = 0;
3290
Tushar Davee29b5d82012-02-10 08:06:36 +00003291 static const char * const reg_name[] = {
3292 "CTRL", "STATUS",
3293 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3294 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3295 "TIDV", "TXDCTL", "TADV", "TARC0",
3296 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3297 "TXDCTL1", "TARC1",
3298 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3299 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3300 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003301 };
3302
3303 regs_buff[0] = er32(CTRL);
3304 regs_buff[1] = er32(STATUS);
3305
3306 regs_buff[2] = er32(RCTL);
3307 regs_buff[3] = er32(RDLEN);
3308 regs_buff[4] = er32(RDH);
3309 regs_buff[5] = er32(RDT);
3310 regs_buff[6] = er32(RDTR);
3311
3312 regs_buff[7] = er32(TCTL);
3313 regs_buff[8] = er32(TDBAL);
3314 regs_buff[9] = er32(TDBAH);
3315 regs_buff[10] = er32(TDLEN);
3316 regs_buff[11] = er32(TDH);
3317 regs_buff[12] = er32(TDT);
3318 regs_buff[13] = er32(TIDV);
3319 regs_buff[14] = er32(TXDCTL);
3320 regs_buff[15] = er32(TADV);
3321 regs_buff[16] = er32(TARC0);
3322
3323 regs_buff[17] = er32(TDBAL1);
3324 regs_buff[18] = er32(TDBAH1);
3325 regs_buff[19] = er32(TDLEN1);
3326 regs_buff[20] = er32(TDH1);
3327 regs_buff[21] = er32(TDT1);
3328 regs_buff[22] = er32(TXDCTL1);
3329 regs_buff[23] = er32(TARC1);
3330 regs_buff[24] = er32(CTRL_EXT);
3331 regs_buff[25] = er32(ERT);
3332 regs_buff[26] = er32(RDBAL0);
3333 regs_buff[27] = er32(RDBAH0);
3334 regs_buff[28] = er32(TDFH);
3335 regs_buff[29] = er32(TDFT);
3336 regs_buff[30] = er32(TDFHS);
3337 regs_buff[31] = er32(TDFTS);
3338 regs_buff[32] = er32(TDFPC);
3339 regs_buff[33] = er32(RDFH);
3340 regs_buff[34] = er32(RDFT);
3341 regs_buff[35] = er32(RDFHS);
3342 regs_buff[36] = er32(RDFTS);
3343 regs_buff[37] = er32(RDFPC);
3344
3345 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003346 for (i = 0; i < NUM_REGS; i++)
3347 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003348}
3349
3350/*
3351 * e1000_dump: Print registers, tx ring and rx ring
3352 */
3353static void e1000_dump(struct e1000_adapter *adapter)
3354{
3355 /* this code doesn't handle multiple rings */
3356 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3357 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3358 int i;
3359
3360 if (!netif_msg_hw(adapter))
3361 return;
3362
3363 /* Print Registers */
3364 e1000_regdump(adapter);
3365
3366 /*
3367 * transmit dump
3368 */
3369 pr_info("TX Desc ring0 dump\n");
3370
3371 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372 *
3373 * Legacy Transmit Descriptor
3374 * +--------------------------------------------------------------+
3375 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3376 * +--------------------------------------------------------------+
3377 * 8 | Special | CSS | Status | CMD | CSO | Length |
3378 * +--------------------------------------------------------------+
3379 * 63 48 47 36 35 32 31 24 23 16 15 0
3380 *
3381 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382 * 63 48 47 40 39 32 31 16 15 8 7 0
3383 * +----------------------------------------------------------------+
3384 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3385 * +----------------------------------------------------------------+
3386 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3387 * +----------------------------------------------------------------+
3388 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3389 *
3390 * Extended Data Descriptor (DTYP=0x1)
3391 * +----------------------------------------------------------------+
3392 * 0 | Buffer Address [63:0] |
3393 * +----------------------------------------------------------------+
3394 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3395 * +----------------------------------------------------------------+
3396 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3397 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003398 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003400
3401 if (!netif_msg_tx_done(adapter))
3402 goto rx_ring_summary;
3403
3404 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003407 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003408 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003409 const char *type;
3410
Tushar Daveb04e36b2012-01-27 09:00:46 +00003411 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003412 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003413 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003414 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003415 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003416 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003417 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003418 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003419
Tushar Davee29b5d82012-02-10 08:06:36 +00003420 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3421 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422 le64_to_cpu(u->a), le64_to_cpu(u->b),
3423 (u64)buffer_info->dma, buffer_info->length,
3424 buffer_info->next_to_watch,
3425 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003426 }
3427
3428rx_ring_summary:
3429 /*
3430 * receive dump
3431 */
3432 pr_info("\nRX Desc ring dump\n");
3433
3434 /* Legacy Receive Descriptor Format
3435 *
3436 * +-----------------------------------------------------+
3437 * | Buffer Address [63:0] |
3438 * +-----------------------------------------------------+
3439 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3440 * +-----------------------------------------------------+
3441 * 63 48 47 40 39 32 31 16 15 0
3442 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003443 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003444
3445 if (!netif_msg_rx_status(adapter))
3446 goto exit;
3447
3448 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3449 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3450 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003451 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003452 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003453 const char *type;
3454
Tushar Daveb04e36b2012-01-27 09:00:46 +00003455 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003456 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003457 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003458 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003459 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003460 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003461
Tushar Davee29b5d82012-02-10 08:06:36 +00003462 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3463 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3464 (u64)buffer_info->dma, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003465 } /* for */
3466
3467 /* dump the descriptor caches */
3468 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003469 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003470 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003471 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3472 i,
3473 readl(adapter->hw.hw_addr + i+4),
3474 readl(adapter->hw.hw_addr + i),
3475 readl(adapter->hw.hw_addr + i+12),
3476 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003477 }
3478 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003479 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003480 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003481 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3482 i,
3483 readl(adapter->hw.hw_addr + i+4),
3484 readl(adapter->hw.hw_addr + i),
3485 readl(adapter->hw.hw_addr + i+12),
3486 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003487 }
3488exit:
3489 return;
3490}
3491
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492/**
3493 * e1000_tx_timeout - Respond to a Tx Hang
3494 * @netdev: network interface device structure
3495 **/
3496
Joe Perches64798842008-07-11 15:17:02 -07003497static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003499 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
3501 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003502 adapter->tx_timeout_count++;
3503 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504}
3505
Joe Perches64798842008-07-11 15:17:02 -07003506static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507{
David Howells65f27f32006-11-22 14:55:48 +00003508 struct e1000_adapter *adapter =
3509 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00003511 if (test_bit(__E1000_DOWN, &adapter->flags))
3512 return;
Tushar Daveb04e36b2012-01-27 09:00:46 +00003513 e_err(drv, "Reset adapter\n");
Jesse Brandeburg338c15e2010-09-22 18:22:42 +00003514 e1000_reinit_safe(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515}
3516
3517/**
3518 * e1000_get_stats - Get System Network Statistics
3519 * @netdev: network interface device structure
3520 *
3521 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003522 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 **/
3524
Joe Perches64798842008-07-11 15:17:02 -07003525static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003527 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003528 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529}
3530
3531/**
3532 * e1000_change_mtu - Change the Maximum Transfer Unit
3533 * @netdev: network interface device structure
3534 * @new_mtu: new value for maximum frame size
3535 *
3536 * Returns 0 on success, negative on failure
3537 **/
3538
Joe Perches64798842008-07-11 15:17:02 -07003539static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003541 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003542 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3544
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003545 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3546 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003547 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003548 return -EINVAL;
3549 }
3550
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003551 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003552 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003553 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003554 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003555 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003556 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003557 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003558 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003559 default:
3560 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3561 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003562 }
3563
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3565 msleep(1);
3566 /* e1000_down has a dependency on max_frame_size */
3567 hw->max_frame_size = max_frame;
3568 if (netif_running(netdev))
3569 e1000_down(adapter);
3570
David S. Miller87f50322006-07-31 22:39:40 -07003571 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003572 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003573 * larger slab size.
3574 * i.e. RXBUFFER_2048 --> size-4096 slab
3575 * however with the new *_jumbo_rx* routines, jumbo receives will use
3576 * fragmented skbs */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003577
Jesse Brandeburg99261462010-01-22 22:56:16 +00003578 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003579 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003580 else
3581#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003582 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003583#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3584 adapter->rx_buffer_len = PAGE_SIZE;
3585#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003586
3587 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003588 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003589 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003590 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3591 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003592
Emil Tantilov675ad472010-04-27 14:02:58 +00003593 pr_info("%s changing MTU from %d to %d\n",
3594 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003595 netdev->mtu = new_mtu;
3596
Auke Kok2db10a02006-06-27 09:06:28 -07003597 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003598 e1000_up(adapter);
3599 else
3600 e1000_reset(adapter);
3601
3602 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 return 0;
3605}
3606
3607/**
3608 * e1000_update_stats - Update the board statistics counters
3609 * @adapter: board private structure
3610 **/
3611
Joe Perches64798842008-07-11 15:17:02 -07003612void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003614 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003616 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003618 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
3620#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3621
Linas Vepstas282f33c2006-06-08 22:19:44 -07003622 /*
3623 * Prevent stats update while adapter is being reset, or if the pci
3624 * connection is down.
3625 */
Auke Kok90267292006-06-08 09:30:24 -07003626 if (adapter->link_speed == 0)
3627 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003628 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003629 return;
Auke Kok90267292006-06-08 09:30:24 -07003630
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 spin_lock_irqsave(&adapter->stats_lock, flags);
3632
Masatake YAMATO828d0552007-10-20 03:06:37 +02003633 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 * called from the interrupt context, so they must only
3635 * be written while holding adapter->stats_lock
3636 */
3637
Joe Perches1dc32912008-07-11 15:17:08 -07003638 adapter->stats.crcerrs += er32(CRCERRS);
3639 adapter->stats.gprc += er32(GPRC);
3640 adapter->stats.gorcl += er32(GORCL);
3641 adapter->stats.gorch += er32(GORCH);
3642 adapter->stats.bprc += er32(BPRC);
3643 adapter->stats.mprc += er32(MPRC);
3644 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003645
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003646 adapter->stats.prc64 += er32(PRC64);
3647 adapter->stats.prc127 += er32(PRC127);
3648 adapter->stats.prc255 += er32(PRC255);
3649 adapter->stats.prc511 += er32(PRC511);
3650 adapter->stats.prc1023 += er32(PRC1023);
3651 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
Joe Perches1dc32912008-07-11 15:17:08 -07003653 adapter->stats.symerrs += er32(SYMERRS);
3654 adapter->stats.mpc += er32(MPC);
3655 adapter->stats.scc += er32(SCC);
3656 adapter->stats.ecol += er32(ECOL);
3657 adapter->stats.mcc += er32(MCC);
3658 adapter->stats.latecol += er32(LATECOL);
3659 adapter->stats.dc += er32(DC);
3660 adapter->stats.sec += er32(SEC);
3661 adapter->stats.rlec += er32(RLEC);
3662 adapter->stats.xonrxc += er32(XONRXC);
3663 adapter->stats.xontxc += er32(XONTXC);
3664 adapter->stats.xoffrxc += er32(XOFFRXC);
3665 adapter->stats.xofftxc += er32(XOFFTXC);
3666 adapter->stats.fcruc += er32(FCRUC);
3667 adapter->stats.gptc += er32(GPTC);
3668 adapter->stats.gotcl += er32(GOTCL);
3669 adapter->stats.gotch += er32(GOTCH);
3670 adapter->stats.rnbc += er32(RNBC);
3671 adapter->stats.ruc += er32(RUC);
3672 adapter->stats.rfc += er32(RFC);
3673 adapter->stats.rjc += er32(RJC);
3674 adapter->stats.torl += er32(TORL);
3675 adapter->stats.torh += er32(TORH);
3676 adapter->stats.totl += er32(TOTL);
3677 adapter->stats.toth += er32(TOTH);
3678 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003679
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003680 adapter->stats.ptc64 += er32(PTC64);
3681 adapter->stats.ptc127 += er32(PTC127);
3682 adapter->stats.ptc255 += er32(PTC255);
3683 adapter->stats.ptc511 += er32(PTC511);
3684 adapter->stats.ptc1023 += er32(PTC1023);
3685 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003686
Joe Perches1dc32912008-07-11 15:17:08 -07003687 adapter->stats.mptc += er32(MPTC);
3688 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689
3690 /* used for adaptive IFS */
3691
Joe Perches1dc32912008-07-11 15:17:08 -07003692 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003694 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 adapter->stats.colc += hw->collision_delta;
3696
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003697 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003698 adapter->stats.algnerrc += er32(ALGNERRC);
3699 adapter->stats.rxerrc += er32(RXERRC);
3700 adapter->stats.tncrs += er32(TNCRS);
3701 adapter->stats.cexterr += er32(CEXTERR);
3702 adapter->stats.tsctc += er32(TSCTC);
3703 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 }
3705
3706 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003707 netdev->stats.multicast = adapter->stats.mprc;
3708 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
3710 /* Rx Errors */
3711
Jeff Kirsher87041632006-03-02 18:21:24 -08003712 /* RLEC on some newer hardware can be incorrect so build
3713 * our own version based on RUC and ROC */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003714 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003716 adapter->stats.ruc + adapter->stats.roc +
3717 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003718 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003719 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3720 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3721 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3722 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723
3724 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003725 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003726 netdev->stats.tx_errors = adapter->stats.txerrc;
3727 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3728 netdev->stats.tx_window_errors = adapter->stats.latecol;
3729 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003730 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003731 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003732 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003733 adapter->stats.tncrs = 0;
3734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
3736 /* Tx Dropped needs to be maintained elsewhere */
3737
3738 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003739 if (hw->media_type == e1000_media_type_copper) {
3740 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3742 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3743 adapter->phy_stats.idle_errors += phy_tmp;
3744 }
3745
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003746 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 (hw->phy_type == e1000_phy_m88) &&
3748 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3749 adapter->phy_stats.receive_errors += phy_tmp;
3750 }
3751
Jeff Garzik15e376b2006-12-15 11:16:33 -05003752 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003753 if (hw->has_smbus) {
3754 adapter->stats.mgptc += er32(MGTPTC);
3755 adapter->stats.mgprc += er32(MGTPRC);
3756 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003757 }
3758
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3760}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003761
3762/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 * e1000_intr - Interrupt Handler
3764 * @irq: interrupt number
3765 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 **/
3767
Joe Perches64798842008-07-11 15:17:02 -07003768static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769{
3770 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003771 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003773 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003774
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003775 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003776 return IRQ_NONE; /* Not our interrupt */
3777
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003778 /*
3779 * we might have caused the interrupt, but the above
3780 * read cleared it, and just in case the driver is
3781 * down there is nothing to do so return handled
3782 */
3783 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3784 return IRQ_HANDLED;
3785
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003786 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003788 /* guard against interrupt when we're going down */
3789 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003790 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 }
3792
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003793 /* disable interrupts, without the synchronize_irq bit */
3794 ew32(IMC, ~0);
3795 E1000_WRITE_FLUSH();
3796
Ben Hutchings288379f2009-01-19 16:43:59 -08003797 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003798 adapter->total_tx_bytes = 0;
3799 adapter->total_tx_packets = 0;
3800 adapter->total_rx_bytes = 0;
3801 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003802 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003803 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003804 /* this really should not happen! if it does it is basically a
3805 * bug, but not a hard error, so enable ints and continue */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003806 if (!test_bit(__E1000_DOWN, &adapter->flags))
3807 e1000_irq_enable(adapter);
3808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 return IRQ_HANDLED;
3811}
3812
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813/**
3814 * e1000_clean - NAPI Rx polling callback
3815 * @adapter: board private structure
3816 **/
Joe Perches64798842008-07-11 15:17:02 -07003817static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003819 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003820 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003821
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003822 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003823
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003824 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003825
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003826 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003827 work_done = budget;
3828
David S. Miller53e52c72008-01-07 21:06:12 -08003829 /* If budget not fully consumed, exit the polling mode */
3830 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003831 if (likely(adapter->itr_setting & 3))
3832 e1000_set_itr(adapter);
Ben Hutchings288379f2009-01-19 16:43:59 -08003833 napi_complete(napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003834 if (!test_bit(__E1000_DOWN, &adapter->flags))
3835 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 }
3837
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003838 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839}
3840
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841/**
3842 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3843 * @adapter: board private structure
3844 **/
Joe Perches64798842008-07-11 15:17:02 -07003845static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3846 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847{
Joe Perches1dc32912008-07-11 15:17:08 -07003848 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 struct net_device *netdev = adapter->netdev;
3850 struct e1000_tx_desc *tx_desc, *eop_desc;
3851 struct e1000_buffer *buffer_info;
3852 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003853 unsigned int count = 0;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003854 unsigned int total_tx_bytes=0, total_tx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855
3856 i = tx_ring->next_to_clean;
3857 eop = tx_ring->buffer_info[i].next_to_watch;
3858 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3859
Alexander Duyckccfb3422009-03-25 21:59:04 +00003860 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3861 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003862 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00003863 rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003864 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 tx_desc = E1000_TX_DESC(*tx_ring, i);
3866 buffer_info = &tx_ring->buffer_info[i];
3867 cleaned = (i == eop);
3868
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003869 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003870 total_tx_packets += buffer_info->segs;
3871 total_tx_bytes += buffer_info->bytecount;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003872 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003873 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003874 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003876 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003878
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 eop = tx_ring->buffer_info[i].next_to_watch;
3880 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3881 }
3882
3883 tx_ring->next_to_clean = i;
3884
Auke Kok77b2aad2006-04-14 19:05:25 -07003885#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003886 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003887 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3888 /* Make sure that anybody stopping the queue after this
3889 * sees the new next_to_clean.
3890 */
3891 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003892
3893 if (netif_queue_stopped(netdev) &&
3894 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003895 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003896 ++adapter->restart_queue;
3897 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003898 }
Malli Chilakala26483452005-04-28 19:44:46 -07003899
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003900 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003901 /* Detect a transmit hang in hardware, this serializes the
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 * check with the clearing of time_stamp and movement of i */
Joe Perchesc3033b02008-03-21 11:06:25 -07003903 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003904 if (tx_ring->buffer_info[eop].time_stamp &&
3905 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00003906 (adapter->tx_timeout_factor * HZ)) &&
3907 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003908
3909 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003910 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003911 " Tx Queue <%lu>\n"
3912 " TDH <%x>\n"
3913 " TDT <%x>\n"
3914 " next_to_use <%x>\n"
3915 " next_to_clean <%x>\n"
3916 "buffer_info[next_to_clean]\n"
3917 " time_stamp <%lx>\n"
3918 " next_to_watch <%x>\n"
3919 " jiffies <%lx>\n"
3920 " next_to_watch.status <%x>\n",
Jeff Kirsher7bfa4812006-01-12 16:50:41 -08003921 (unsigned long)((tx_ring - adapter->tx_ring) /
3922 sizeof(struct e1000_tx_ring)),
Joe Perches1dc32912008-07-11 15:17:08 -07003923 readl(hw->hw_addr + tx_ring->tdh),
3924 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003925 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003926 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003927 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003928 eop,
3929 jiffies,
3930 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003931 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003935 adapter->total_tx_bytes += total_tx_bytes;
3936 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003937 netdev->stats.tx_bytes += total_tx_bytes;
3938 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003939 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940}
3941
3942/**
3943 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003944 * @adapter: board private structure
3945 * @status_err: receive descriptor status and error fields
3946 * @csum: receive descriptor csum field
3947 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 **/
3949
Joe Perches64798842008-07-11 15:17:02 -07003950static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3951 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952{
Joe Perches1dc32912008-07-11 15:17:08 -07003953 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003954 u16 status = (u16)status_err;
3955 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003956
3957 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003958
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 /* 82543 or newer only */
Joe Perches1dc32912008-07-11 15:17:08 -07003960 if (unlikely(hw->mac_type < e1000_82543)) return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 /* Ignore Checksum bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003962 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003963 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003964 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003965 /* let the stack verify checksum errors */
3966 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967 return;
3968 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003969 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003970 if (!(status & E1000_RXD_STAT_TCPCS))
3971 return;
3972
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003973 /* It must be a TCP or UDP packet with a valid checksum */
3974 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 /* TCP checksum is good */
3976 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003978 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979}
3980
3981/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003982 * e1000_consume_page - helper function
3983 **/
3984static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3985 u16 length)
3986{
3987 bi->page = NULL;
3988 skb->len += length;
3989 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00003990 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003991}
3992
3993/**
3994 * e1000_receive_skb - helper function to handle rx indications
3995 * @adapter: board private structure
3996 * @status: descriptor status field as written by hardware
3997 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3998 * @skb: pointer to sk_buff to be indicated to stack
3999 */
4000static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4001 __le16 vlan, struct sk_buff *skb)
4002{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00004003 skb->protocol = eth_type_trans(skb, adapter->netdev);
4004
Jiri Pirko5622e402011-07-21 03:26:31 +00004005 if (status & E1000_RXD_STAT_VP) {
4006 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4007
4008 __vlan_hwaccel_put_tag(skb, vid);
4009 }
4010 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004011}
4012
4013/**
4014 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4015 * @adapter: board private structure
4016 * @rx_ring: ring to clean
4017 * @work_done: amount of napi work completed this call
4018 * @work_to_do: max amount of work allowed for this call to do
4019 *
4020 * the return value indicates whether actual cleaning was done, there
4021 * is no guarantee that everything was cleaned
4022 */
4023static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4024 struct e1000_rx_ring *rx_ring,
4025 int *work_done, int work_to_do)
4026{
4027 struct e1000_hw *hw = &adapter->hw;
4028 struct net_device *netdev = adapter->netdev;
4029 struct pci_dev *pdev = adapter->pdev;
4030 struct e1000_rx_desc *rx_desc, *next_rxd;
4031 struct e1000_buffer *buffer_info, *next_buffer;
4032 unsigned long irq_flags;
4033 u32 length;
4034 unsigned int i;
4035 int cleaned_count = 0;
4036 bool cleaned = false;
4037 unsigned int total_rx_bytes=0, total_rx_packets=0;
4038
4039 i = rx_ring->next_to_clean;
4040 rx_desc = E1000_RX_DESC(*rx_ring, i);
4041 buffer_info = &rx_ring->buffer_info[i];
4042
4043 while (rx_desc->status & E1000_RXD_STAT_DD) {
4044 struct sk_buff *skb;
4045 u8 status;
4046
4047 if (*work_done >= work_to_do)
4048 break;
4049 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004050 rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004051
4052 status = rx_desc->status;
4053 skb = buffer_info->skb;
4054 buffer_info->skb = NULL;
4055
4056 if (++i == rx_ring->count) i = 0;
4057 next_rxd = E1000_RX_DESC(*rx_ring, i);
4058 prefetch(next_rxd);
4059
4060 next_buffer = &rx_ring->buffer_info[i];
4061
4062 cleaned = true;
4063 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004064 dma_unmap_page(&pdev->dev, buffer_info->dma,
4065 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004066 buffer_info->dma = 0;
4067
4068 length = le16_to_cpu(rx_desc->length);
4069
4070 /* errors is only valid for DD + EOP descriptors */
4071 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4072 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004073 u8 *mapped;
4074 u8 last_byte;
4075
4076 mapped = page_address(buffer_info->page);
4077 last_byte = *(mapped + length - 1);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004078 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4079 last_byte)) {
4080 spin_lock_irqsave(&adapter->stats_lock,
4081 irq_flags);
4082 e1000_tbi_adjust_stats(hw, &adapter->stats,
Sebastian Andrzej Siewior281a8f22012-05-15 09:18:55 +00004083 length, mapped);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004084 spin_unlock_irqrestore(&adapter->stats_lock,
4085 irq_flags);
4086 length--;
4087 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004088 if (netdev->features & NETIF_F_RXALL)
4089 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004090 /* recycle both page and skb */
4091 buffer_info->skb = skb;
4092 /* an error means any chain goes out the window
4093 * too */
4094 if (rx_ring->rx_skb_top)
4095 dev_kfree_skb(rx_ring->rx_skb_top);
4096 rx_ring->rx_skb_top = NULL;
4097 goto next_desc;
4098 }
4099 }
4100
4101#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004102process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004103 if (!(status & E1000_RXD_STAT_EOP)) {
4104 /* this descriptor is only the beginning (or middle) */
4105 if (!rxtop) {
4106 /* this is the beginning of a chain */
4107 rxtop = skb;
4108 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4109 0, length);
4110 } else {
4111 /* this is the middle of a chain */
4112 skb_fill_page_desc(rxtop,
4113 skb_shinfo(rxtop)->nr_frags,
4114 buffer_info->page, 0, length);
4115 /* re-use the skb, only consumed the page */
4116 buffer_info->skb = skb;
4117 }
4118 e1000_consume_page(buffer_info, rxtop, length);
4119 goto next_desc;
4120 } else {
4121 if (rxtop) {
4122 /* end of the chain */
4123 skb_fill_page_desc(rxtop,
4124 skb_shinfo(rxtop)->nr_frags,
4125 buffer_info->page, 0, length);
4126 /* re-use the current skb, we only consumed the
4127 * page */
4128 buffer_info->skb = skb;
4129 skb = rxtop;
4130 rxtop = NULL;
4131 e1000_consume_page(buffer_info, skb, length);
4132 } else {
4133 /* no chain, got EOP, this buf is the packet
4134 * copybreak to save the put_page/alloc_page */
4135 if (length <= copybreak &&
4136 skb_tailroom(skb) >= length) {
4137 u8 *vaddr;
Cong Wang46790262011-11-25 23:14:23 +08004138 vaddr = kmap_atomic(buffer_info->page);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004139 memcpy(skb_tail_pointer(skb), vaddr, length);
Cong Wang46790262011-11-25 23:14:23 +08004140 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004141 /* re-use the page, so don't erase
4142 * buffer_info->page */
4143 skb_put(skb, length);
4144 } else {
4145 skb_fill_page_desc(skb, 0,
4146 buffer_info->page, 0,
4147 length);
4148 e1000_consume_page(buffer_info, skb,
4149 length);
4150 }
4151 }
4152 }
4153
4154 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4155 e1000_rx_checksum(adapter,
4156 (u32)(status) |
4157 ((u32)(rx_desc->errors) << 24),
4158 le16_to_cpu(rx_desc->csum), skb);
4159
Ben Greearb0d15622012-02-11 15:40:11 +00004160 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4161 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4162 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004163 total_rx_packets++;
4164
4165 /* eth type trans needs skb->data to point to something */
4166 if (!pskb_may_pull(skb, ETH_HLEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004167 e_err(drv, "pskb_may_pull failed.\n");
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004168 dev_kfree_skb(skb);
4169 goto next_desc;
4170 }
4171
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004172 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4173
4174next_desc:
4175 rx_desc->status = 0;
4176
4177 /* return some buffers to hardware, one at a time is too slow */
4178 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4179 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4180 cleaned_count = 0;
4181 }
4182
4183 /* use prefetched values */
4184 rx_desc = next_rxd;
4185 buffer_info = next_buffer;
4186 }
4187 rx_ring->next_to_clean = i;
4188
4189 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4190 if (cleaned_count)
4191 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4192
4193 adapter->total_rx_packets += total_rx_packets;
4194 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004195 netdev->stats.rx_bytes += total_rx_bytes;
4196 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004197 return cleaned;
4198}
4199
Joe Perches57bf6ee2010-05-13 15:26:17 +00004200/*
4201 * this should improve performance for small packets with large amounts
4202 * of reassembly being done in the stack
4203 */
4204static void e1000_check_copybreak(struct net_device *netdev,
4205 struct e1000_buffer *buffer_info,
4206 u32 length, struct sk_buff **skb)
4207{
4208 struct sk_buff *new_skb;
4209
4210 if (length > copybreak)
4211 return;
4212
4213 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4214 if (!new_skb)
4215 return;
4216
4217 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4218 (*skb)->data - NET_IP_ALIGN,
4219 length + NET_IP_ALIGN);
4220 /* save the skb in buffer_info as good */
4221 buffer_info->skb = *skb;
4222 *skb = new_skb;
4223}
4224
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004225/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004226 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004228 * @rx_ring: ring to clean
4229 * @work_done: amount of napi work completed this call
4230 * @work_to_do: max amount of work allowed for this call to do
4231 */
Joe Perches64798842008-07-11 15:17:02 -07004232static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4233 struct e1000_rx_ring *rx_ring,
4234 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235{
Joe Perches1dc32912008-07-11 15:17:08 -07004236 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 struct net_device *netdev = adapter->netdev;
4238 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004239 struct e1000_rx_desc *rx_desc, *next_rxd;
4240 struct e1000_buffer *buffer_info, *next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07004242 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004244 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004245 bool cleaned = false;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004246 unsigned int total_rx_bytes=0, total_rx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
4248 i = rx_ring->next_to_clean;
4249 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004250 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004252 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004253 struct sk_buff *skb;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004254 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004255
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004256 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 break;
4258 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004259 rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004260
Jeff Kirshera292ca62006-01-12 16:51:30 -08004261 status = rx_desc->status;
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004262 skb = buffer_info->skb;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004263 buffer_info->skb = NULL;
4264
Jeff Kirsher30320be2006-03-02 18:21:57 -08004265 prefetch(skb->data - NET_IP_ALIGN);
4266
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004267 if (++i == rx_ring->count) i = 0;
4268 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004269 prefetch(next_rxd);
4270
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004271 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004272
Joe Perchesc3033b02008-03-21 11:06:25 -07004273 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004274 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004275 dma_unmap_single(&pdev->dev, buffer_info->dma,
4276 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004277 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 length = le16_to_cpu(rx_desc->length);
Neil Hormanea30e112009-06-02 01:29:58 -07004280 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004281 * packet, if thats the case we need to toss it. In fact, we
4282 * to toss every packet with the EOP bit clear and the next
4283 * frame that _does_ have the EOP bit set, as it is by
4284 * definition only a frame fragment
4285 */
4286 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4287 adapter->discarding = true;
4288
4289 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004290 /* All receives must fit into a single buffer */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004291 e_dbg("Receive packet consumed multiple buffers\n");
Auke Kok864c4e42006-06-27 09:06:53 -07004292 /* recycle */
Auke Kok8fc897b2006-08-28 14:56:16 -07004293 buffer_info->skb = skb;
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004294 if (status & E1000_RXD_STAT_EOP)
4295 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 goto next_desc;
4297 }
4298
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004299 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004300 u8 last_byte = *(skb->data + length - 1);
Joe Perches1dc32912008-07-11 15:17:08 -07004301 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4302 last_byte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004304 e1000_tbi_adjust_stats(hw, &adapter->stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 length, skb->data);
4306 spin_unlock_irqrestore(&adapter->stats_lock,
4307 flags);
4308 length--;
4309 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004310 if (netdev->features & NETIF_F_RXALL)
4311 goto process_skb;
Auke Kok9e2feac2006-04-14 19:05:18 -07004312 /* recycle */
4313 buffer_info->skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 goto next_desc;
4315 }
Auke Kok1cb58212006-04-18 12:31:04 -07004316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317
Ben Greeare825b732012-04-04 06:01:29 +00004318process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004319 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004320 total_rx_packets++;
4321
Ben Greearb0d15622012-02-11 15:40:11 +00004322 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4323 /* adjust length to remove Ethernet CRC, this must be
4324 * done after the TBI_ACCEPT workaround above
4325 */
4326 length -= 4;
4327
Joe Perches57bf6ee2010-05-13 15:26:17 +00004328 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4329
Auke Kok996695d2006-11-01 08:47:50 -08004330 skb_put(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331
4332 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004333 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004334 (u32)(status) |
4335 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004336 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004337
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004338 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004339
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340next_desc:
4341 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004343 /* return some buffers to hardware, one at a time is too slow */
4344 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4345 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4346 cleaned_count = 0;
4347 }
4348
Jeff Kirsher30320be2006-03-02 18:21:57 -08004349 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004350 rx_desc = next_rxd;
4351 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004354
4355 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4356 if (cleaned_count)
4357 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004359 adapter->total_rx_packets += total_rx_packets;
4360 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004361 netdev->stats.rx_bytes += total_rx_bytes;
4362 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 return cleaned;
4364}
4365
4366/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004367 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4368 * @adapter: address of board private structure
4369 * @rx_ring: pointer to receive ring structure
4370 * @cleaned_count: number of buffers to allocate this pass
4371 **/
4372
4373static void
4374e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4375 struct e1000_rx_ring *rx_ring, int cleaned_count)
4376{
4377 struct net_device *netdev = adapter->netdev;
4378 struct pci_dev *pdev = adapter->pdev;
4379 struct e1000_rx_desc *rx_desc;
4380 struct e1000_buffer *buffer_info;
4381 struct sk_buff *skb;
4382 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004383 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004384
4385 i = rx_ring->next_to_use;
4386 buffer_info = &rx_ring->buffer_info[i];
4387
4388 while (cleaned_count--) {
4389 skb = buffer_info->skb;
4390 if (skb) {
4391 skb_trim(skb, 0);
4392 goto check_page;
4393 }
4394
Eric Dumazet89d71a62009-10-13 05:34:20 +00004395 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004396 if (unlikely(!skb)) {
4397 /* Better luck next round */
4398 adapter->alloc_rx_buff_failed++;
4399 break;
4400 }
4401
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004402 buffer_info->skb = skb;
4403 buffer_info->length = adapter->rx_buffer_len;
4404check_page:
4405 /* allocate a new page if necessary */
4406 if (!buffer_info->page) {
4407 buffer_info->page = alloc_page(GFP_ATOMIC);
4408 if (unlikely(!buffer_info->page)) {
4409 adapter->alloc_rx_buff_failed++;
4410 break;
4411 }
4412 }
4413
Anton Blanchardb5abb022010-02-19 17:54:53 +00004414 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004415 buffer_info->dma = dma_map_page(&pdev->dev,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004416 buffer_info->page, 0,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004417 buffer_info->length,
4418 DMA_FROM_DEVICE);
4419 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004420 put_page(buffer_info->page);
4421 dev_kfree_skb(skb);
4422 buffer_info->page = NULL;
4423 buffer_info->skb = NULL;
4424 buffer_info->dma = 0;
4425 adapter->alloc_rx_buff_failed++;
4426 break; /* while !buffer_info->skb */
4427 }
4428 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004429
4430 rx_desc = E1000_RX_DESC(*rx_ring, i);
4431 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4432
4433 if (unlikely(++i == rx_ring->count))
4434 i = 0;
4435 buffer_info = &rx_ring->buffer_info[i];
4436 }
4437
4438 if (likely(rx_ring->next_to_use != i)) {
4439 rx_ring->next_to_use = i;
4440 if (unlikely(i-- == 0))
4441 i = (rx_ring->count - 1);
4442
4443 /* Force memory writes to complete before letting h/w
4444 * know there are new descriptors to fetch. (Only
4445 * applicable for weak-ordered memory model archs,
4446 * such as IA-64). */
4447 wmb();
4448 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4449 }
4450}
4451
4452/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004453 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 * @adapter: address of board private structure
4455 **/
4456
Joe Perches64798842008-07-11 15:17:02 -07004457static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4458 struct e1000_rx_ring *rx_ring,
4459 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460{
Joe Perches1dc32912008-07-11 15:17:08 -07004461 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 struct net_device *netdev = adapter->netdev;
4463 struct pci_dev *pdev = adapter->pdev;
4464 struct e1000_rx_desc *rx_desc;
4465 struct e1000_buffer *buffer_info;
4466 struct sk_buff *skb;
Malli Chilakala26483452005-04-28 19:44:46 -07004467 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004468 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469
4470 i = rx_ring->next_to_use;
4471 buffer_info = &rx_ring->buffer_info[i];
4472
Jeff Kirshera292ca62006-01-12 16:51:30 -08004473 while (cleaned_count--) {
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004474 skb = buffer_info->skb;
4475 if (skb) {
Jeff Kirshera292ca62006-01-12 16:51:30 -08004476 skb_trim(skb, 0);
4477 goto map_skb;
4478 }
4479
Eric Dumazet89d71a62009-10-13 05:34:20 +00004480 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004481 if (unlikely(!skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004483 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 break;
4485 }
4486
Malli Chilakala26483452005-04-28 19:44:46 -07004487 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4489 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004490 e_err(rx_err, "skb align check failed: %u bytes at "
4491 "%p\n", bufsz, skb->data);
Malli Chilakala26483452005-04-28 19:44:46 -07004492 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004493 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Malli Chilakala26483452005-04-28 19:44:46 -07004494 /* Failed allocation, critical failure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 if (!skb) {
4496 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004497 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 break;
4499 }
Malli Chilakala26483452005-04-28 19:44:46 -07004500
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4502 /* give up */
4503 dev_kfree_skb(skb);
4504 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004505 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 break; /* while !buffer_info->skb */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004508
4509 /* Use new allocation */
4510 dev_kfree_skb(oldskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 buffer_info->skb = skb;
4513 buffer_info->length = adapter->rx_buffer_len;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004514map_skb:
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004515 buffer_info->dma = dma_map_single(&pdev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 skb->data,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004517 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004518 DMA_FROM_DEVICE);
4519 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004520 dev_kfree_skb(skb);
4521 buffer_info->skb = NULL;
4522 buffer_info->dma = 0;
4523 adapter->alloc_rx_buff_failed++;
4524 break; /* while !buffer_info->skb */
4525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004527 /*
4528 * XXX if it was allocated cleanly it will never map to a
4529 * boundary crossing
4530 */
4531
Malli Chilakala26483452005-04-28 19:44:46 -07004532 /* Fix for errata 23, can't cross 64kB boundary */
4533 if (!e1000_check_64k_bound(adapter,
4534 (void *)(unsigned long)buffer_info->dma,
4535 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004536 e_err(rx_err, "dma align check failed: %u bytes at "
4537 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004538 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 dev_kfree_skb(skb);
4540 buffer_info->skb = NULL;
4541
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004542 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004544 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004545 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004547 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548 break; /* while !buffer_info->skb */
4549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550 rx_desc = E1000_RX_DESC(*rx_ring, i);
4551 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4552
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004553 if (unlikely(++i == rx_ring->count))
4554 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 buffer_info = &rx_ring->buffer_info[i];
4556 }
4557
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004558 if (likely(rx_ring->next_to_use != i)) {
4559 rx_ring->next_to_use = i;
4560 if (unlikely(i-- == 0))
4561 i = (rx_ring->count - 1);
4562
4563 /* Force memory writes to complete before letting h/w
4564 * know there are new descriptors to fetch. (Only
4565 * applicable for weak-ordered memory model archs,
4566 * such as IA-64). */
4567 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004568 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570}
4571
4572/**
4573 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4574 * @adapter:
4575 **/
4576
Joe Perches64798842008-07-11 15:17:02 -07004577static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578{
Joe Perches1dc32912008-07-11 15:17:08 -07004579 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004580 u16 phy_status;
4581 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
Joe Perches1dc32912008-07-11 15:17:08 -07004583 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4584 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 return;
4586
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004587 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 /* If Master/Slave config fault is asserted twice,
4589 * we assume back-to-back */
Joe Perches1dc32912008-07-11 15:17:08 -07004590 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004591 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004592 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004593 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004594 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004595 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004597 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 phy_ctrl);
4599 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004600 if (!e1000_phy_setup_autoneg(hw) &&
4601 !e1000_read_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 &phy_ctrl)) {
4603 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4604 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004605 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 phy_ctrl);
4607 }
4608 }
4609 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004610 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004612 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004614 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4615 if (!e1000_phy_setup_autoneg(hw) &&
4616 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4618 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004619 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620 }
4621 }
4622 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004623 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 adapter->smartspeed = 0;
4625}
4626
4627/**
4628 * e1000_ioctl -
4629 * @netdev:
4630 * @ifreq:
4631 * @cmd:
4632 **/
4633
Joe Perches64798842008-07-11 15:17:02 -07004634static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635{
4636 switch (cmd) {
4637 case SIOCGMIIPHY:
4638 case SIOCGMIIREG:
4639 case SIOCSMIIREG:
4640 return e1000_mii_ioctl(netdev, ifr, cmd);
4641 default:
4642 return -EOPNOTSUPP;
4643 }
4644}
4645
4646/**
4647 * e1000_mii_ioctl -
4648 * @netdev:
4649 * @ifreq:
4650 * @cmd:
4651 **/
4652
Joe Perches64798842008-07-11 15:17:02 -07004653static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4654 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004656 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004657 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 struct mii_ioctl_data *data = if_mii(ifr);
4659 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004660 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004661 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
Joe Perches1dc32912008-07-11 15:17:08 -07004663 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 return -EOPNOTSUPP;
4665
4666 switch (cmd) {
4667 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004668 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 break;
4670 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004671 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004672 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004673 &data->val_out)) {
4674 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004676 }
4677 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 break;
4679 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004680 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 return -EFAULT;
4682 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004683 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004684 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004685 mii_reg)) {
4686 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004688 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004689 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004690 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691 switch (data->reg_num) {
4692 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004693 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004695 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004696 hw->autoneg = 1;
4697 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004699 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004701 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004703 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 else
David Decotigny14ad2512011-04-27 18:32:43 +00004705 speed = SPEED_10;
4706 retval = e1000_set_spd_dplx(
4707 adapter, speed,
4708 ((mii_reg & 0x100)
4709 ? DUPLEX_FULL :
4710 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004711 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 return retval;
4713 }
Auke Kok2db10a02006-06-27 09:06:28 -07004714 if (netif_running(adapter->netdev))
4715 e1000_reinit_locked(adapter);
4716 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 e1000_reset(adapter);
4718 break;
4719 case M88E1000_PHY_SPEC_CTRL:
4720 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004721 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 return -EIO;
4723 break;
4724 }
4725 } else {
4726 switch (data->reg_num) {
4727 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004728 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004730 if (netif_running(adapter->netdev))
4731 e1000_reinit_locked(adapter);
4732 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 e1000_reset(adapter);
4734 break;
4735 }
4736 }
4737 break;
4738 default:
4739 return -EOPNOTSUPP;
4740 }
4741 return E1000_SUCCESS;
4742}
4743
Joe Perches64798842008-07-11 15:17:02 -07004744void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
4746 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004747 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004749 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004750 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751}
4752
Joe Perches64798842008-07-11 15:17:02 -07004753void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754{
4755 struct e1000_adapter *adapter = hw->back;
4756
4757 pci_clear_mwi(adapter->pdev);
4758}
4759
Joe Perches64798842008-07-11 15:17:02 -07004760int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004761{
4762 struct e1000_adapter *adapter = hw->back;
4763 return pcix_get_mmrbc(adapter->pdev);
4764}
4765
Joe Perches64798842008-07-11 15:17:02 -07004766void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004767{
4768 struct e1000_adapter *adapter = hw->back;
4769 pcix_set_mmrbc(adapter->pdev, mmrbc);
4770}
4771
Joe Perches64798842008-07-11 15:17:02 -07004772void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773{
4774 outl(value, port);
4775}
4776
Jiri Pirko5622e402011-07-21 03:26:31 +00004777static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778{
Jiri Pirko5622e402011-07-21 03:26:31 +00004779 u16 vid;
4780
4781 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4782 return true;
4783 return false;
4784}
4785
Jiri Pirko52f55092012-03-20 18:10:01 +00004786static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4787 netdev_features_t features)
4788{
4789 struct e1000_hw *hw = &adapter->hw;
4790 u32 ctrl;
4791
4792 ctrl = er32(CTRL);
4793 if (features & NETIF_F_HW_VLAN_RX) {
4794 /* enable VLAN tag insert/strip */
4795 ctrl |= E1000_CTRL_VME;
4796 } else {
4797 /* disable VLAN tag insert/strip */
4798 ctrl &= ~E1000_CTRL_VME;
4799 }
4800 ew32(CTRL, ctrl);
4801}
Jiri Pirko5622e402011-07-21 03:26:31 +00004802static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4803 bool filter_on)
4804{
Joe Perches1dc32912008-07-11 15:17:08 -07004805 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004806 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004808 if (!test_bit(__E1000_DOWN, &adapter->flags))
4809 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810
Jiri Pirko52f55092012-03-20 18:10:01 +00004811 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004812 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004813 /* enable VLAN receive filtering */
4814 rctl = er32(RCTL);
4815 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004816 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004817 rctl |= E1000_RCTL_VFE;
4818 ew32(RCTL, rctl);
4819 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004821 /* disable VLAN receive filtering */
4822 rctl = er32(RCTL);
4823 rctl &= ~E1000_RCTL_VFE;
4824 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825 }
4826
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004827 if (!test_bit(__E1000_DOWN, &adapter->flags))
4828 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829}
4830
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004831static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004832 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004833{
4834 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004835
4836 if (!test_bit(__E1000_DOWN, &adapter->flags))
4837 e1000_irq_disable(adapter);
4838
Jiri Pirko52f55092012-03-20 18:10:01 +00004839 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004840
4841 if (!test_bit(__E1000_DOWN, &adapter->flags))
4842 e1000_irq_enable(adapter);
4843}
4844
Jiri Pirko8e586132011-12-08 19:52:37 -05004845static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004847 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004848 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004849 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004850
Joe Perches1dc32912008-07-11 15:17:08 -07004851 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004852 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4853 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004854 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004855
4856 if (!e1000_vlan_used(adapter))
4857 e1000_vlan_filter_on_off(adapter, true);
4858
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 /* add VID to filter table */
4860 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004861 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004863 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004864
4865 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004866
4867 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868}
4869
Jiri Pirko8e586132011-12-08 19:52:37 -05004870static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004872 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004873 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004874 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004876 if (!test_bit(__E1000_DOWN, &adapter->flags))
4877 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004878 if (!test_bit(__E1000_DOWN, &adapter->flags))
4879 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880
4881 /* remove VID from filter table */
4882 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004883 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004885 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004886
4887 clear_bit(vid, adapter->active_vlans);
4888
4889 if (!e1000_vlan_used(adapter))
4890 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05004891
4892 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893}
4894
Joe Perches64798842008-07-11 15:17:02 -07004895static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896{
Jiri Pirko5622e402011-07-21 03:26:31 +00004897 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898
Jiri Pirko5622e402011-07-21 03:26:31 +00004899 if (!e1000_vlan_used(adapter))
4900 return;
4901
4902 e1000_vlan_filter_on_off(adapter, true);
4903 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4904 e1000_vlan_rx_add_vid(adapter->netdev, vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905}
4906
David Decotigny14ad2512011-04-27 18:32:43 +00004907int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908{
Joe Perches1dc32912008-07-11 15:17:08 -07004909 struct e1000_hw *hw = &adapter->hw;
4910
4911 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912
David Decotigny14ad2512011-04-27 18:32:43 +00004913 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4914 * for the switch() below to work */
4915 if ((spd & 1) || (dplx & ~1))
4916 goto err_inval;
4917
Malli Chilakala69213682005-06-17 17:44:20 -07004918 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07004919 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00004920 spd != SPEED_1000 &&
4921 dplx != DUPLEX_FULL)
4922 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07004923
David Decotigny14ad2512011-04-27 18:32:43 +00004924 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004926 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 break;
4928 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004929 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 break;
4931 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004932 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 break;
4934 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004935 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 break;
4937 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004938 hw->autoneg = 1;
4939 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940 break;
4941 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4942 default:
David Decotigny14ad2512011-04-27 18:32:43 +00004943 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944 }
4945 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00004946
4947err_inval:
4948 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4949 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950}
4951
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00004952static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953{
4954 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07004955 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004956 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004957 u32 ctrl, ctrl_ext, rctl, status;
4958 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07004959#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08004960 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07004961#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
4963 netif_device_detach(netdev);
4964
Auke Kok2db10a02006-06-27 09:06:28 -07004965 if (netif_running(netdev)) {
4966 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07004968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004970#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08004971 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00004972 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004973 return retval;
4974#endif
4975
Joe Perches1dc32912008-07-11 15:17:08 -07004976 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004977 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 wufc &= ~E1000_WUFC_LNKC;
4979
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004980 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08004982 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983
Dean Nelsonb8681792012-01-19 17:47:24 +00004984 rctl = er32(RCTL);
4985
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00004987 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00004989
4990 /* enable receives in the hardware */
4991 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
Joe Perches1dc32912008-07-11 15:17:08 -07004993 if (hw->mac_type >= e1000_82540) {
4994 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995 /* advertise wake from D3Cold */
4996 #define E1000_CTRL_ADVD3WUC 0x00100000
4997 /* phy power management enable */
4998 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4999 ctrl |= E1000_CTRL_ADVD3WUC |
5000 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07005001 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002 }
5003
Joe Perches1dc32912008-07-11 15:17:08 -07005004 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00005005 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005007 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005009 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 }
5011
Joe Perches1dc32912008-07-11 15:17:08 -07005012 ew32(WUC, E1000_WUC_PME_EN);
5013 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005015 ew32(WUC, 0);
5016 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 }
5018
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005019 e1000_release_manageability(adapter);
5020
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005021 *enable_wake = !!wufc;
5022
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005023 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005024 if (adapter->en_mng_pt)
5025 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026
Auke Kokedd106f2006-11-06 08:57:12 -08005027 if (netif_running(netdev))
5028 e1000_free_irq(adapter);
5029
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005031
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 return 0;
5033}
5034
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005035#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005036static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5037{
5038 int retval;
5039 bool wake;
5040
5041 retval = __e1000_shutdown(pdev, &wake);
5042 if (retval)
5043 return retval;
5044
5045 if (wake) {
5046 pci_prepare_to_sleep(pdev);
5047 } else {
5048 pci_wake_from_d3(pdev, false);
5049 pci_set_power_state(pdev, PCI_D3hot);
5050 }
5051
5052 return 0;
5053}
5054
Joe Perches64798842008-07-11 15:17:02 -07005055static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056{
5057 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005058 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005059 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005060 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061
Auke Kokd0e027d2006-04-14 19:04:40 -07005062 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005063 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005064 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005065
5066 if (adapter->need_ioport)
5067 err = pci_enable_device(pdev);
5068 else
5069 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005070 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005071 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005072 return err;
5073 }
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005074 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075
Auke Kokd0e027d2006-04-14 19:04:40 -07005076 pci_enable_wake(pdev, PCI_D3hot, 0);
5077 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
Joe Perchesc7be73b2008-07-11 15:17:28 -07005079 if (netif_running(netdev)) {
5080 err = e1000_request_irq(adapter);
5081 if (err)
5082 return err;
5083 }
Auke Kokedd106f2006-11-06 08:57:12 -08005084
5085 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005087 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005089 e1000_init_manageability(adapter);
5090
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005091 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092 e1000_up(adapter);
5093
5094 netif_device_attach(netdev);
5095
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096 return 0;
5097}
5098#endif
Auke Kokc653e632006-05-23 13:35:57 -07005099
5100static void e1000_shutdown(struct pci_dev *pdev)
5101{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005102 bool wake;
5103
5104 __e1000_shutdown(pdev, &wake);
5105
5106 if (system_state == SYSTEM_POWER_OFF) {
5107 pci_wake_from_d3(pdev, wake);
5108 pci_set_power_state(pdev, PCI_D3hot);
5109 }
Auke Kokc653e632006-05-23 13:35:57 -07005110}
5111
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112#ifdef CONFIG_NET_POLL_CONTROLLER
5113/*
5114 * Polling 'interrupt' - used by things like netconsole to send skbs
5115 * without having to re-enable interrupts. It's not called while
5116 * the interrupt routine is executing.
5117 */
Joe Perches64798842008-07-11 15:17:02 -07005118static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005120 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005121
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005123 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 enable_irq(adapter->pdev->irq);
5125}
5126#endif
5127
Auke Kok90267292006-06-08 09:30:24 -07005128/**
5129 * e1000_io_error_detected - called when PCI error is detected
5130 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005131 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005132 *
5133 * This function is called after a PCI bus error affecting
5134 * this device has been detected.
5135 */
Joe Perches64798842008-07-11 15:17:02 -07005136static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5137 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005138{
5139 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005140 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005141
5142 netif_device_detach(netdev);
5143
Andre Detscheab63302009-06-30 12:46:13 +00005144 if (state == pci_channel_io_perm_failure)
5145 return PCI_ERS_RESULT_DISCONNECT;
5146
Auke Kok90267292006-06-08 09:30:24 -07005147 if (netif_running(netdev))
5148 e1000_down(adapter);
Linas Vepstas72e8d6b2006-09-18 20:58:06 -07005149 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005150
5151 /* Request a slot slot reset. */
5152 return PCI_ERS_RESULT_NEED_RESET;
5153}
5154
5155/**
5156 * e1000_io_slot_reset - called after the pci bus has been reset.
5157 * @pdev: Pointer to PCI device
5158 *
5159 * Restart the card from scratch, as if from a cold-boot. Implementation
5160 * resembles the first-half of the e1000_resume routine.
5161 */
5162static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5163{
5164 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005165 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005166 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005167 int err;
Auke Kok90267292006-06-08 09:30:24 -07005168
Taku Izumi81250292008-07-11 15:17:44 -07005169 if (adapter->need_ioport)
5170 err = pci_enable_device(pdev);
5171 else
5172 err = pci_enable_device_mem(pdev);
5173 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005174 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005175 return PCI_ERS_RESULT_DISCONNECT;
5176 }
5177 pci_set_master(pdev);
5178
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005179 pci_enable_wake(pdev, PCI_D3hot, 0);
5180 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005181
Auke Kok90267292006-06-08 09:30:24 -07005182 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005183 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005184
5185 return PCI_ERS_RESULT_RECOVERED;
5186}
5187
5188/**
5189 * e1000_io_resume - called when traffic can start flowing again.
5190 * @pdev: Pointer to PCI device
5191 *
5192 * This callback is called when the error recovery driver tells us that
5193 * its OK to resume normal operation. Implementation resembles the
5194 * second-half of the e1000_resume routine.
5195 */
5196static void e1000_io_resume(struct pci_dev *pdev)
5197{
5198 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005199 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005200
5201 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005202
5203 if (netif_running(netdev)) {
5204 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005205 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005206 return;
5207 }
5208 }
5209
5210 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005211}
5212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213/* e1000_main.c */